From 5dca8a70d56034c3c4c4cdb6d42c04f95e83a13d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 27 Jan 2024 15:48:24 +0100 Subject: [PATCH 001/893] ls: include standard `message_type` field in output --- changelog/unreleased/pull-4664 | 8 ++ cmd/restic/cmd_ls.go | 20 ++-- cmd/restic/cmd_ls_test.go | 10 +- cmd/restic/cmd_mount_integration_test.go | 6 -- doc/075_scripting.rst | 111 ++++++++++++----------- 5 files changed, 81 insertions(+), 74 deletions(-) create mode 100644 changelog/unreleased/pull-4664 diff --git a/changelog/unreleased/pull-4664 b/changelog/unreleased/pull-4664 new file mode 100644 index 00000000000..74196cd9bf6 --- /dev/null +++ b/changelog/unreleased/pull-4664 @@ -0,0 +1,8 @@ +Enhancement: `ls` uses `message_type` field to distinguish JSON messages + +The `ls` command was the only command that used the `struct_type` field to determine +the message type in the JSON output format. Now, the JSON output of the +`ls` command also includes the `message_type`. The `struct_type` field is +still included, but it deprecated. + +https://github.com/restic/restic/pull/4664 diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index f412546ae8f..b0246625ee0 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -83,16 +83,18 @@ type jsonLsPrinter struct { func (p *jsonLsPrinter) Snapshot(sn *restic.Snapshot) { type lsSnapshot struct { *restic.Snapshot - ID *restic.ID `json:"id"` - ShortID string `json:"short_id"` - StructType string `json:"struct_type"` // "snapshot" + ID *restic.ID `json:"id"` + ShortID string `json:"short_id"` + MessageType string `json:"message_type"` // "snapshot" + StructType string `json:"struct_type"` // "snapshot", deprecated } err := p.enc.Encode(lsSnapshot{ - Snapshot: sn, - ID: sn.ID(), - ShortID: sn.ID().Str(), - StructType: "snapshot", + Snapshot: sn, + ID: sn.ID(), + ShortID: sn.ID().Str(), + MessageType: "snapshot", + StructType: "snapshot", }) if err != nil { Warnf("JSON encode failed: %v\n", err) @@ -121,7 +123,8 @@ func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error { AccessTime time.Time `json:"atime,omitempty"` ChangeTime time.Time `json:"ctime,omitempty"` Inode uint64 `json:"inode,omitempty"` - StructType string `json:"struct_type"` // "node" + MessageType string `json:"message_type"` // "node" + StructType string `json:"struct_type"` // "node", deprecated size uint64 // Target for Size pointer. }{ @@ -137,6 +140,7 @@ func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error { AccessTime: node.AccessTime, ChangeTime: node.ChangeTime, Inode: node.Inode, + MessageType: "node", StructType: "node", } // Always print size for regular files, even when empty, diff --git a/cmd/restic/cmd_ls_test.go b/cmd/restic/cmd_ls_test.go index 41c235eabad..828b2920e64 100644 --- a/cmd/restic/cmd_ls_test.go +++ b/cmd/restic/cmd_ls_test.go @@ -87,11 +87,11 @@ var lsTestNodes = []lsTestNode{ func TestLsNodeJSON(t *testing.T) { for i, expect := range []string{ - `{"name":"baz","type":"file","path":"/bar/baz","uid":10000000,"gid":20000000,"size":12345,"permissions":"----------","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`, - `{"name":"empty","type":"file","path":"/foo/empty","uid":1001,"gid":1001,"size":0,"permissions":"----------","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`, - `{"name":"link","type":"symlink","path":"/foo/link","uid":0,"gid":0,"mode":134218239,"permissions":"Lrwxrwxrwx","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`, - `{"name":"directory","type":"dir","path":"/some/directory","uid":0,"gid":0,"mode":2147484141,"permissions":"drwxr-xr-x","mtime":"2020-01-02T03:04:05Z","atime":"2021-02-03T04:05:06.000000007Z","ctime":"2022-03-04T05:06:07.000000008Z","struct_type":"node"}`, - `{"name":"sticky","type":"dir","path":"/some/sticky","uid":0,"gid":0,"mode":2161115629,"permissions":"dugtrwxr-xr-x","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","struct_type":"node"}`, + `{"name":"baz","type":"file","path":"/bar/baz","uid":10000000,"gid":20000000,"size":12345,"permissions":"----------","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","message_type":"node","struct_type":"node"}`, + `{"name":"empty","type":"file","path":"/foo/empty","uid":1001,"gid":1001,"size":0,"permissions":"----------","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","message_type":"node","struct_type":"node"}`, + `{"name":"link","type":"symlink","path":"/foo/link","uid":0,"gid":0,"mode":134218239,"permissions":"Lrwxrwxrwx","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","message_type":"node","struct_type":"node"}`, + `{"name":"directory","type":"dir","path":"/some/directory","uid":0,"gid":0,"mode":2147484141,"permissions":"drwxr-xr-x","mtime":"2020-01-02T03:04:05Z","atime":"2021-02-03T04:05:06.000000007Z","ctime":"2022-03-04T05:06:07.000000008Z","message_type":"node","struct_type":"node"}`, + `{"name":"sticky","type":"dir","path":"/some/sticky","uid":0,"gid":0,"mode":2161115629,"permissions":"dugtrwxr-xr-x","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","message_type":"node","struct_type":"node"}`, } { c := lsTestNodes[i] buf := new(bytes.Buffer) diff --git a/cmd/restic/cmd_mount_integration_test.go b/cmd/restic/cmd_mount_integration_test.go index 1b069d58211..d2025a395aa 100644 --- a/cmd/restic/cmd_mount_integration_test.go +++ b/cmd/restic/cmd_mount_integration_test.go @@ -12,7 +12,6 @@ import ( "testing" "time" - "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -160,11 +159,6 @@ func TestMount(t *testing.T) { t.Skip("Skipping fuse tests") } - debugEnabled := debug.TestLogToStderr(t) - if debugEnabled { - defer debug.TestDisableLog(t) - } - env, cleanup := withTestEnvironment(t) // must list snapshots more than once env.gopts.backendTestHook = nil diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 7279ee61474..fda4b2d532a 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -75,9 +75,6 @@ Several commands, in particular long running ones or those that generate a large use a format also known as JSON lines. It consists of a stream of new-line separated JSON messages. You can determine the nature of the message using the ``message_type`` field. -As an exception, the ``ls`` command uses the field ``struct_type`` instead. - - backup ------ @@ -420,63 +417,67 @@ As an exception, the ``struct_type`` field is used to determine the message type snapshot ^^^^^^^^ -+----------------+--------------------------------------------------+ -| ``struct_type``| Always "snapshot" | -+----------------+--------------------------------------------------+ -| ``time`` | Timestamp of when the backup was started | -+----------------+--------------------------------------------------+ -| ``parent`` | ID of the parent snapshot | -+----------------+--------------------------------------------------+ -| ``tree`` | ID of the root tree blob | -+----------------+--------------------------------------------------+ -| ``paths`` | List of paths included in the backup | -+----------------+--------------------------------------------------+ -| ``hostname`` | Hostname of the backed up machine | -+----------------+--------------------------------------------------+ -| ``username`` | Username the backup command was run as | -+----------------+--------------------------------------------------+ -| ``uid`` | ID of owner | -+----------------+--------------------------------------------------+ -| ``gid`` | ID of group | -+----------------+--------------------------------------------------+ -| ``excludes`` | List of paths and globs excluded from the backup | -+----------------+--------------------------------------------------+ -| ``tags`` | List of tags for the snapshot in question | -+----------------+--------------------------------------------------+ -| ``id`` | Snapshot ID | -+----------------+--------------------------------------------------+ -| ``short_id`` | Snapshot ID, short form | -+----------------+--------------------------------------------------+ ++------------------+--------------------------------------------------+ +| ``message_type`` | Always "snapshot" | ++------------------+--------------------------------------------------+ +| ``struct_type`` | Always "snapshot" (deprecated) | ++------------------+--------------------------------------------------+ +| ``time`` | Timestamp of when the backup was started | ++------------------+--------------------------------------------------+ +| ``parent`` | ID of the parent snapshot | ++------------------+--------------------------------------------------+ +| ``tree`` | ID of the root tree blob | ++------------------+--------------------------------------------------+ +| ``paths`` | List of paths included in the backup | ++------------------+--------------------------------------------------+ +| ``hostname`` | Hostname of the backed up machine | ++------------------+--------------------------------------------------+ +| ``username`` | Username the backup command was run as | ++------------------+--------------------------------------------------+ +| ``uid`` | ID of owner | ++------------------+--------------------------------------------------+ +| ``gid`` | ID of group | ++------------------+--------------------------------------------------+ +| ``excludes`` | List of paths and globs excluded from the backup | ++------------------+--------------------------------------------------+ +| ``tags`` | List of tags for the snapshot in question | ++------------------+--------------------------------------------------+ +| ``id`` | Snapshot ID | ++------------------+--------------------------------------------------+ +| ``short_id`` | Snapshot ID, short form | ++------------------+--------------------------------------------------+ node ^^^^ -+-----------------+--------------------------+ -| ``struct_type`` | Always "node" | -+-----------------+--------------------------+ -| ``name`` | Node name | -+-----------------+--------------------------+ -| ``type`` | Node type | -+-----------------+--------------------------+ -| ``path`` | Node path | -+-----------------+--------------------------+ -| ``uid`` | UID of node | -+-----------------+--------------------------+ -| ``gid`` | GID of node | -+-----------------+--------------------------+ -| ``size`` | Size in bytes | -+-----------------+--------------------------+ -| ``mode`` | Node mode | -+-----------------+--------------------------+ -| ``atime`` | Node access time | -+-----------------+--------------------------+ -| ``mtime`` | Node modification time | -+-----------------+--------------------------+ -| ``ctime`` | Node creation time | -+-----------------+--------------------------+ -| ``inode`` | Inode number of node | -+-----------------+--------------------------+ ++------------------+----------------------------+ +| ``message_type`` | Always "node" | ++------------------+----------------------------+ +| ``struct_type`` | Always "node" (deprecated) | ++------------------+----------------------------+ +| ``name`` | Node name | ++------------------+----------------------------+ +| ``type`` | Node type | ++------------------+----------------------------+ +| ``path`` | Node path | ++------------------+----------------------------+ +| ``uid`` | UID of node | ++------------------+----------------------------+ +| ``gid`` | GID of node | ++------------------+----------------------------+ +| ``size`` | Size in bytes | ++------------------+----------------------------+ +| ``mode`` | Node mode | ++------------------+----------------------------+ +| ``atime`` | Node access time | ++------------------+----------------------------+ +| ``mtime`` | Node modification time | ++------------------+----------------------------+ +| ``ctime`` | Node creation time | ++------------------+----------------------------+ +| ``inode`` | Inode number of node | ++------------------+----------------------------+ restore From 80db02fc3521612c7dc7ce86a9e57f17e20ee9df Mon Sep 17 00:00:00 2001 From: lou Date: Thu, 8 Feb 2024 14:11:57 +0800 Subject: [PATCH 002/893] dump flag --target should be allowed to write existing file Signed-off-by: lou --- cmd/restic/cmd_dump.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 1628d6a9533..9178f2abe52 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -174,7 +174,7 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args [] canWriteArchiveFunc := checkStdoutArchive if opts.Target != "" { - file, err := os.OpenFile(opts.Target, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o666) + file, err := os.Create(opts.Target) if err != nil { return fmt.Errorf("cannot dump to file: %w", err) } From 15d6fa1f8374449346eb86cb4919bf8629671c84 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 10 Feb 2024 18:38:36 +0100 Subject: [PATCH 003/893] dump: update docs for --target option --- changelog/unreleased/issue-4678 | 1 + doc/050_restore.rst | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/changelog/unreleased/issue-4678 b/changelog/unreleased/issue-4678 index f98711ea420..9f9a213e105 100644 --- a/changelog/unreleased/issue-4678 +++ b/changelog/unreleased/issue-4678 @@ -5,3 +5,4 @@ Restic `dump` always printed to the standard output. It now permits to select a https://github.com/restic/restic/issues/4678 https://github.com/restic/restic/pull/4682 +https://github.com/restic/restic/pull/4692 diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 3cd05500b88..916b11c8673 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -176,8 +176,7 @@ To include the folder content at the root of the archive, you can use the `` restore.tar It is also possible to ``dump`` the contents of a selected snapshot and folder -structure to a file using the ``--target`` flag. The ``dump`` command will fail -if the already file exists. +structure to a file using the ``--target`` flag. .. code-block:: console $ restic -r /srv/restic-repo dump latest / --target /home/linux.user/output.tar -a tar \ No newline at end of file From 9720935c56b33fa06287ca4153898ded08484295 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 10 Feb 2024 21:56:01 +0100 Subject: [PATCH 004/893] Update Go version for tests to 1.22 --- .github/workflows/tests.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 45681c6c5c1..abcb0727082 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -13,7 +13,7 @@ permissions: contents: read env: - latest_go: "1.21.x" + latest_go: "1.22.x" GO111MODULE: on jobs: @@ -23,34 +23,34 @@ jobs: # list of jobs to run: include: - job_name: Windows - go: 1.21.x + go: 1.22.x os: windows-latest - job_name: macOS - go: 1.21.x + go: 1.22.x os: macOS-latest test_fuse: false - job_name: Linux - go: 1.21.x + go: 1.22.x os: ubuntu-latest test_cloud_backends: true test_fuse: true check_changelog: true - job_name: Linux (race) - go: 1.21.x + go: 1.22.x os: ubuntu-latest test_fuse: true test_opts: "-race" - job_name: Linux - go: 1.20.x + go: 1.21.x os: ubuntu-latest test_fuse: true - job_name: Linux - go: 1.19.x + go: 1.20.x os: ubuntu-latest test_fuse: true From a8cda0119cdec8755f12c27bd07f312338ec15c1 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 10 Feb 2024 22:08:43 +0100 Subject: [PATCH 005/893] Upgrade golangci-lint --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index abcb0727082..cc599491caa 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -255,7 +255,7 @@ jobs: uses: golangci/golangci-lint-action@v3 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.55.2 + version: v1.56.1 args: --verbose --timeout 5m # only run golangci-lint for pull requests, otherwise ALL hints get From c0514dd8baa6460ea1d3cf70d58a1a421eedcea8 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 10 Feb 2024 22:58:10 +0100 Subject: [PATCH 006/893] Fix linter errors (except for tests) --- .golangci.yml | 5 +++++ cmd/restic/cmd_backup.go | 2 +- cmd/restic/cmd_cache.go | 2 +- cmd/restic/cmd_check.go | 8 ++++---- cmd/restic/cmd_diff.go | 2 +- cmd/restic/cmd_generate.go | 2 +- cmd/restic/cmd_key_list.go | 2 +- cmd/restic/cmd_list.go | 4 ++-- cmd/restic/cmd_options.go | 2 +- cmd/restic/cmd_prune.go | 8 ++++---- cmd/restic/cmd_recover.go | 4 ++-- cmd/restic/cmd_repair_index.go | 6 +++--- cmd/restic/cmd_repair_snapshots.go | 2 +- cmd/restic/cmd_restore.go | 4 ++-- cmd/restic/cmd_rewrite.go | 2 +- cmd/restic/cmd_stats.go | 2 +- cmd/restic/cmd_unlock.go | 2 +- cmd/restic/cmd_version.go | 2 +- cmd/restic/main.go | 2 +- internal/archiver/archiver.go | 6 +++--- internal/archiver/scanner.go | 8 ++++---- internal/backend/mem/mem_backend.go | 2 +- internal/backend/rclone/backend.go | 2 +- internal/checker/checker.go | 2 +- internal/fs/fs_track.go | 2 +- internal/fuse/snapshots_dirstruct.go | 2 +- internal/index/index_parallel.go | 2 +- internal/repository/key.go | 2 +- internal/repository/repair_pack.go | 2 +- internal/repository/repository.go | 4 ++-- internal/restic/backend_find.go | 2 +- internal/restic/lock.go | 4 ++-- internal/restic/snapshot.go | 2 +- internal/restorer/restorer.go | 6 +++--- internal/ui/backup/progress.go | 2 +- internal/walker/rewriter.go | 4 ++-- 36 files changed, 61 insertions(+), 56 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c08331401cb..7dc6a8e7fc3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -54,3 +54,8 @@ issues: # staticcheck: there's no easy way to replace these packages - "SA1019: \"golang.org/x/crypto/poly1305\" is deprecated" - "SA1019: \"golang.org/x/crypto/openpgp\" is deprecated" + + exclude-rules: + # revive: ignore unused parameters in tests + - path: (_test\.go|testing\.go|backend/.*/tests\.go) + text: "unused-parameter:" \ No newline at end of file diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index be7a2aa3fb9..318d17796ef 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -42,7 +42,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was a fatal error (no snapshot created). Exit status is 3 if some source data could not be read (incomplete snapshot created). `, - PreRun: func(cmd *cobra.Command, args []string) { + PreRun: func(_ *cobra.Command, _ []string) { if backupOptions.Host == "" { hostname, err := os.Hostname() if err != nil { diff --git a/cmd/restic/cmd_cache.go b/cmd/restic/cmd_cache.go index 4a10d102772..354cec2882c 100644 --- a/cmd/restic/cmd_cache.go +++ b/cmd/restic/cmd_cache.go @@ -28,7 +28,7 @@ EXIT STATUS Exit status is 0 if the command was successful, and non-zero if there was any error. `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return runCache(cacheOptions, globalOptions, args) }, } diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index f04a4fe71b1..22f462d7537 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -38,7 +38,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er RunE: func(cmd *cobra.Command, args []string) error { return runCheck(cmd.Context(), checkOptions, globalOptions, args) }, - PreRunE: func(cmd *cobra.Command, args []string) error { + PreRunE: func(_ *cobra.Command, _ []string) error { return checkFlags(checkOptions) }, } @@ -345,11 +345,11 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args if len(salvagePacks) > 0 { Warnf("\nThe repository contains pack files with damaged blobs. These blobs must be removed to repair the repository. This can be done using the following commands:\n\n") - var strIds []string + var strIDs []string for _, id := range salvagePacks { - strIds = append(strIds, id.String()) + strIDs = append(strIDs, id.String()) } - Warnf("RESTIC_FEATURES=repair-packs-v1 restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIds, " ")) + Warnf("RESTIC_FEATURES=repair-packs-v1 restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIDs, " ")) Warnf("Corrupted blobs are either caused by hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n") } } diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 06f2be2aeca..3bd29fa675d 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -418,7 +418,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args [] } if gopts.Quiet { - c.printChange = func(change *Change) {} + c.printChange = func(_ *Change) {} } stats := &DiffStatsContainer{ diff --git a/cmd/restic/cmd_generate.go b/cmd/restic/cmd_generate.go index 9d1652e936c..ba710e70830 100644 --- a/cmd/restic/cmd_generate.go +++ b/cmd/restic/cmd_generate.go @@ -21,7 +21,7 @@ EXIT STATUS Exit status is 0 if the command was successful, and non-zero if there was any error. `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return runGenerate(genOpts, args) }, } diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index 517b7c84b7b..2b3574281b8 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -69,7 +69,7 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions var m sync.Mutex var keys []keyInfo - err := restic.ParallelList(ctx, s, restic.KeyFile, s.Connections(), func(ctx context.Context, id restic.ID, size int64) error { + err := restic.ParallelList(ctx, s, restic.KeyFile, s.Connections(), func(ctx context.Context, id restic.ID, _ int64) error { k, err := repository.LoadKey(ctx, s, id) if err != nil { Warnf("LoadKey() failed: %v\n", err) diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 8be99234fc3..becad7f0deb 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -63,7 +63,7 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error { case "locks": t = restic.LockFile case "blobs": - return index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error { + return index.ForAllIndexes(ctx, repo, repo, func(_ restic.ID, idx *index.Index, _ bool, err error) error { if err != nil { return err } @@ -76,7 +76,7 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error { return errors.Fatal("invalid type") } - return repo.List(ctx, t, func(id restic.ID, size int64) error { + return repo.List(ctx, t, func(id restic.ID, _ int64) error { Printf("%s\n", id) return nil }) diff --git a/cmd/restic/cmd_options.go b/cmd/restic/cmd_options.go index 471319dfb4c..85e06222025 100644 --- a/cmd/restic/cmd_options.go +++ b/cmd/restic/cmd_options.go @@ -21,7 +21,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er `, Hidden: true, DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, _ []string) { fmt.Printf("All Extended Options:\n") var maxLen int for _, opt := range options.List() { diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index efd8f6e3a38..1b9352ea7f6 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -37,7 +37,7 @@ EXIT STATUS Exit status is 0 if the command was successful, and non-zero if there was any error. `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { return runPrune(cmd.Context(), pruneOptions, globalOptions) }, } @@ -101,7 +101,7 @@ func verifyPruneOptions(opts *PruneOptions) error { // parse MaxUnused either as unlimited, a percentage, or an absolute number of bytes switch { case maxUnused == "unlimited": - opts.maxUnusedBytes = func(used uint64) uint64 { + opts.maxUnusedBytes = func(_ uint64) uint64 { return math.MaxUint64 } @@ -130,7 +130,7 @@ func verifyPruneOptions(opts *PruneOptions) error { return errors.Fatalf("invalid number of bytes %q for --max-unused: %v", opts.MaxUnused, err) } - opts.maxUnusedBytes = func(used uint64) uint64 { + opts.maxUnusedBytes = func(_ uint64) uint64 { return uint64(size) } } @@ -798,7 +798,7 @@ func rebuildIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Rep DeleteProgress: func() *progress.Counter { return newProgressMax(!gopts.Quiet, 0, "old indexes deleted") }, - DeleteReport: func(id restic.ID, err error) { + DeleteReport: func(id restic.ID, _ error) { if gopts.verbosity > 2 { Verbosef("removed index %v\n", id.String()) } diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index 499abdf80f2..b97a7582b19 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -25,7 +25,7 @@ EXIT STATUS Exit status is 0 if the command was successful, and non-zero if there was any error. `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { return runRecover(cmd.Context(), globalOptions) }, } @@ -91,7 +91,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { bar.Done() Verbosef("load snapshots\n") - err = restic.ForAllSnapshots(ctx, snapshotLister, repo, nil, func(id restic.ID, sn *restic.Snapshot, err error) error { + err = restic.ForAllSnapshots(ctx, snapshotLister, repo, nil, func(_ restic.ID, sn *restic.Snapshot, _ error) error { trees[*sn.Tree] = true return nil }) diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index fc5506b340c..ea36f02f609 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -24,7 +24,7 @@ EXIT STATUS Exit status is 0 if the command was successful, and non-zero if there was any error. `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions) }, } @@ -78,7 +78,7 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti if opts.ReadAllPacks { // get list of old index files but start with empty index - err := repo.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error { + err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error { obsoleteIndexes = append(obsoleteIndexes, id) return nil }) @@ -88,7 +88,7 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti } else { Verbosef("loading indexes...\n") mi := index.NewMasterIndex() - err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error { + err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error { if err != nil { Warnf("removing invalid index %v: %v\n", id, err) obsoleteIndexes = append(obsoleteIndexes, id) diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index 19e457b1fff..cc3d0eb8598 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -125,7 +125,7 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt node.Size = newSize return node }, - RewriteFailedTree: func(nodeID restic.ID, path string, _ error) (restic.ID, error) { + RewriteFailedTree: func(_ restic.ID, path string, _ error) (restic.ID, error) { if path == "/" { Verbosef(" dir %q: not readable\n", path) // remove snapshots with invalid root node diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 1208d30eba4..37d3046721f 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -181,7 +181,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, excludePatterns := filter.ParsePatterns(opts.Exclude) insensitiveExcludePatterns := filter.ParsePatterns(opts.InsensitiveExclude) - selectExcludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + selectExcludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { matched, err := filter.List(excludePatterns, item) if err != nil { msg.E("error for exclude pattern: %v", err) @@ -204,7 +204,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, includePatterns := filter.ParsePatterns(opts.Include) insensitiveIncludePatterns := filter.ParsePatterns(opts.InsensitiveInclude) - selectIncludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + selectIncludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { matched, childMayMatch, err := filter.ListWithChild(includePatterns, item) if err != nil { msg.E("error for include pattern: %v", err) diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 8dad492afb4..62624e75c49 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -147,7 +147,7 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree) } } else { - filter = func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) { + filter = func(_ context.Context, sn *restic.Snapshot) (restic.ID, error) { return *sn.Tree, nil } } diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 1bece21d0af..d3078a419a4 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -367,7 +367,7 @@ func statsDebug(ctx context.Context, repo restic.Repository) error { func statsDebugFileType(ctx context.Context, repo restic.Lister, tpe restic.FileType) (*sizeHistogram, error) { hist := newSizeHistogram(2 * repository.MaxPackSize) - err := repo.List(ctx, tpe, func(id restic.ID, size int64) error { + err := repo.List(ctx, tpe, func(_ restic.ID, size int64) error { hist.Add(uint64(size)) return nil }) diff --git a/cmd/restic/cmd_unlock.go b/cmd/restic/cmd_unlock.go index 7b449d949da..6893f3365f8 100644 --- a/cmd/restic/cmd_unlock.go +++ b/cmd/restic/cmd_unlock.go @@ -19,7 +19,7 @@ EXIT STATUS Exit status is 0 if the command was successful, and non-zero if there was any error. `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { return runUnlock(cmd.Context(), unlockOptions, globalOptions) }, } diff --git a/cmd/restic/cmd_version.go b/cmd/restic/cmd_version.go index 73469750f75..e3f9b301039 100644 --- a/cmd/restic/cmd_version.go +++ b/cmd/restic/cmd_version.go @@ -21,7 +21,7 @@ EXIT STATUS Exit status is 0 if the command was successful, and non-zero if there was any error. `, DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, _ []string) { if globalOptions.JSON { type jsonVersion struct { Version string `json:"version"` diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 4595e81613b..b31ce1bb400 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -37,7 +37,7 @@ The full documentation can be found at https://restic.readthedocs.io/ . SilenceUsage: true, DisableAutoGenTag: true, - PersistentPreRunE: func(c *cobra.Command, args []string) error { + PersistentPreRunE: func(c *cobra.Command, _ []string) error { // set verbosity, default is one globalOptions.verbosity = 1 if globalOptions.Quiet && globalOptions.Verbose > 0 { diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index f2c481b3208..77ddba7c4c6 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -147,8 +147,8 @@ func (o Options) ApplyDefaults() Options { func New(repo restic.Repository, fs fs.FS, opts Options) *Archiver { arch := &Archiver{ Repo: repo, - SelectByName: func(item string) bool { return true }, - Select: func(item string, fi os.FileInfo) bool { return true }, + SelectByName: func(_ string) bool { return true }, + Select: func(_ string, _ os.FileInfo) bool { return true }, FS: fs, Options: opts.ApplyDefaults(), @@ -762,7 +762,7 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps arch.runWorkers(wgCtx, wg) debug.Log("starting snapshot") - fn, nodeCount, err := arch.SaveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(n *restic.Node, is ItemStats) { + fn, nodeCount, err := arch.SaveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) { arch.CompleteItem("/", nil, nil, is, time.Since(start)) }) if err != nil { diff --git a/internal/archiver/scanner.go b/internal/archiver/scanner.go index 6ce2a47000b..cc419b19eb9 100644 --- a/internal/archiver/scanner.go +++ b/internal/archiver/scanner.go @@ -25,10 +25,10 @@ type Scanner struct { func NewScanner(fs fs.FS) *Scanner { return &Scanner{ FS: fs, - SelectByName: func(item string) bool { return true }, - Select: func(item string, fi os.FileInfo) bool { return true }, - Error: func(item string, err error) error { return err }, - Result: func(item string, s ScanStats) {}, + SelectByName: func(_ string) bool { return true }, + Select: func(_ string, _ os.FileInfo) bool { return true }, + Error: func(_ string, err error) error { return err }, + Result: func(_ string, _ ScanStats) {}, } } diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go index 2698a82755a..eea5b060e4c 100644 --- a/internal/backend/mem/mem_backend.go +++ b/internal/backend/mem/mem_backend.go @@ -28,7 +28,7 @@ func NewFactory() location.Factory { return location.NewHTTPBackendFactory[struct{}, *MemoryBackend]( "mem", - func(s string) (*struct{}, error) { + func(_ string) (*struct{}, error) { return &struct{}{}, nil }, location.NoPassword, diff --git a/internal/backend/rclone/backend.go b/internal/backend/rclone/backend.go index 416162364d6..25082598f8c 100644 --- a/internal/backend/rclone/backend.go +++ b/internal/backend/rclone/backend.go @@ -183,7 +183,7 @@ func newBackend(ctx context.Context, cfg Config, lim limiter.Limiter) (*Backend, dialCount := 0 tr := &http2.Transport{ AllowHTTP: true, // this is not really HTTP, just stdin/stdout - DialTLS: func(network, address string, cfg *tls.Config) (net.Conn, error) { + DialTLS: func(network, address string, _ *tls.Config) (net.Conn, error) { debug.Log("new connection requested, %v %v", network, address) if dialCount > 0 { // the connection to the child process is already closed diff --git a/internal/checker/checker.go b/internal/checker/checker.go index df865cb412a..1e14a9e53f3 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -134,7 +134,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e if p != nil { var numIndexFiles uint64 - err := indexList.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error { + err := indexList.List(ctx, restic.IndexFile, func(_ restic.ID, _ int64) error { numIndexFiles++ return nil }) diff --git a/internal/fs/fs_track.go b/internal/fs/fs_track.go index 319fbfaff8a..0c65a856440 100644 --- a/internal/fs/fs_track.go +++ b/internal/fs/fs_track.go @@ -41,7 +41,7 @@ type trackFile struct { func newTrackFile(stack []byte, filename string, file File) *trackFile { f := &trackFile{file} - runtime.SetFinalizer(f, func(f *trackFile) { + runtime.SetFinalizer(f, func(_ *trackFile) { fmt.Fprintf(os.Stderr, "file %s not closed\n\nStacktrack:\n%s\n", filename, stack) panic("file " + filename + " not closed") }) diff --git a/internal/fuse/snapshots_dirstruct.go b/internal/fuse/snapshots_dirstruct.go index d40ae629809..049319c6f74 100644 --- a/internal/fuse/snapshots_dirstruct.go +++ b/internal/fuse/snapshots_dirstruct.go @@ -295,7 +295,7 @@ func (d *SnapshotsDirStructure) updateSnapshots(ctx context.Context) error { } var snapshots restic.Snapshots - err := d.root.cfg.Filter.FindAll(ctx, d.root.repo, d.root.repo, nil, func(id string, sn *restic.Snapshot, err error) error { + err := d.root.cfg.Filter.FindAll(ctx, d.root.repo, d.root.repo, nil, func(_ string, sn *restic.Snapshot, _ error) error { if sn != nil { snapshots = append(snapshots, sn) } diff --git a/internal/index/index_parallel.go b/internal/index/index_parallel.go index 3c16d049bd0..d51d5930fba 100644 --- a/internal/index/index_parallel.go +++ b/internal/index/index_parallel.go @@ -19,7 +19,7 @@ func ForAllIndexes(ctx context.Context, lister restic.Lister, repo restic.Lister workerCount := repo.Connections() + uint(runtime.GOMAXPROCS(0)) var m sync.Mutex - return restic.ParallelList(ctx, lister, restic.IndexFile, workerCount, func(ctx context.Context, id restic.ID, size int64) error { + return restic.ParallelList(ctx, lister, restic.IndexFile, workerCount, func(ctx context.Context, id restic.ID, _ int64) error { var err error var idx *Index oldFormat := false diff --git a/internal/repository/key.go b/internal/repository/key.go index 5f7a932970d..d9f8d8e1781 100644 --- a/internal/repository/key.go +++ b/internal/repository/key.go @@ -136,7 +136,7 @@ func SearchKey(ctx context.Context, s *Repository, password string, maxKeys int, defer cancel() // try at most maxKeys keys in repo - err = s.List(listCtx, restic.KeyFile, func(id restic.ID, size int64) error { + err = s.List(listCtx, restic.KeyFile, func(id restic.ID, _ int64) error { checked++ if maxKeys > 0 && checked > maxKeys { return ErrMaxKeysReached diff --git a/internal/repository/repair_pack.go b/internal/repository/repair_pack.go index 64279e7277a..2e036889953 100644 --- a/internal/repository/repair_pack.go +++ b/internal/repository/repair_pack.go @@ -68,7 +68,7 @@ func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, DeleteProgress: func() *progress.Counter { return printer.NewCounter("old indexes deleted") }, - DeleteReport: func(id restic.ID, err error) { + DeleteReport: func(id restic.ID, _ error) { printer.VV("removed index %v", id.String()) }, }) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index a20f71ab1ae..9a4494c4d67 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -651,7 +651,7 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { if p != nil { var numIndexFiles uint64 - err := indexList.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error { + err := indexList.List(ctx, restic.IndexFile, func(_ restic.ID, _ int64) error { numIndexFiles++ return nil }) @@ -662,7 +662,7 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { defer p.Done() } - err = index.ForAllIndexes(ctx, indexList, r, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error { + err = index.ForAllIndexes(ctx, indexList, r, func(_ restic.ID, idx *index.Index, _ bool, err error) error { if err != nil { return err } diff --git a/internal/restic/backend_find.go b/internal/restic/backend_find.go index a6eacabd0a9..2f00595c49e 100644 --- a/internal/restic/backend_find.go +++ b/internal/restic/backend_find.go @@ -30,7 +30,7 @@ func Find(ctx context.Context, be Lister, t FileType, prefix string) (ID, error) ctx, cancel := context.WithCancel(ctx) defer cancel() - err := be.List(ctx, t, func(id ID, size int64) error { + err := be.List(ctx, t, func(id ID, _ int64) error { name := id.String() if len(name) >= len(prefix) && prefix == name[:len(prefix)] { if match.IsNull() { diff --git a/internal/restic/lock.go b/internal/restic/lock.go index d98f8ec94f4..182a3442d9b 100644 --- a/internal/restic/lock.go +++ b/internal/restic/lock.go @@ -341,7 +341,7 @@ func (l *Lock) checkExistence(ctx context.Context) (bool, error) { exists := false - err := l.repo.List(ctx, LockFile, func(id ID, size int64) error { + err := l.repo.List(ctx, LockFile, func(id ID, _ int64) error { if id.Equal(*l.lockID) { exists = true } @@ -415,7 +415,7 @@ func RemoveStaleLocks(ctx context.Context, repo Repository) (uint, error) { // RemoveAllLocks removes all locks forcefully. func RemoveAllLocks(ctx context.Context, repo Repository) (uint, error) { var processed uint32 - err := ParallelList(ctx, repo, LockFile, repo.Connections(), func(ctx context.Context, id ID, size int64) error { + err := ParallelList(ctx, repo, LockFile, repo.Connections(), func(ctx context.Context, id ID, _ int64) error { err := repo.Backend().Remove(ctx, backend.Handle{Type: LockFile, Name: id.String()}) if err == nil { atomic.AddUint32(&processed, 1) diff --git a/internal/restic/snapshot.go b/internal/restic/snapshot.go index 88171a646ad..8cf651d96f0 100644 --- a/internal/restic/snapshot.go +++ b/internal/restic/snapshot.go @@ -83,7 +83,7 @@ func ForAllSnapshots(ctx context.Context, be Lister, loader LoaderUnpacked, excl var m sync.Mutex // For most snapshots decoding is nearly for free, thus just assume were only limited by IO - return ParallelList(ctx, be, SnapshotFile, loader.Connections(), func(ctx context.Context, id ID, size int64) error { + return ParallelList(ctx, be, SnapshotFile, loader.Connections(), func(ctx context.Context, id ID, _ int64) error { if excludeIDs.Has(id) { return nil } diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 2ce1ee98e77..3f4fb32e340 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -27,7 +27,7 @@ type Restorer struct { SelectFilter func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) } -var restorerAbortOnAllErrors = func(location string, err error) error { return err } +var restorerAbortOnAllErrors = func(_ string, err error) error { return err } // NewRestorer creates a restorer preloaded with the content from the snapshot id. func NewRestorer(repo restic.Repository, sn *restic.Snapshot, sparse bool, @@ -239,7 +239,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { // first tree pass: create directories and collect all files to restore _, err = res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ - enterDir: func(node *restic.Node, target, location string) error { + enterDir: func(_ *restic.Node, target, location string) error { debug.Log("first pass, enterDir: mkdir %q, leaveDir should restore metadata", location) if res.progress != nil { res.progress.AddFile(0) @@ -366,7 +366,7 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { defer close(work) _, err := res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ - visitNode: func(node *restic.Node, target, location string) error { + visitNode: func(node *restic.Node, target, _ string) error { if node.Type != "file" { return nil } diff --git a/internal/ui/backup/progress.go b/internal/ui/backup/progress.go index 4362a8c83e2..da0d401a3ab 100644 --- a/internal/ui/backup/progress.go +++ b/internal/ui/backup/progress.go @@ -63,7 +63,7 @@ func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress { printer: printer, estimator: *newRateEstimator(time.Now()), } - p.Updater = *progress.NewUpdater(interval, func(runtime time.Duration, final bool) { + p.Updater = *progress.NewUpdater(interval, func(_ time.Duration, final bool) { if final { p.printer.Reset() } else { diff --git a/internal/walker/rewriter.go b/internal/walker/rewriter.go index 64985703231..6d283a625b0 100644 --- a/internal/walker/rewriter.go +++ b/internal/walker/rewriter.go @@ -39,13 +39,13 @@ func NewTreeRewriter(opts RewriteOpts) *TreeRewriter { } // setup default implementations if rw.opts.RewriteNode == nil { - rw.opts.RewriteNode = func(node *restic.Node, path string) *restic.Node { + rw.opts.RewriteNode = func(node *restic.Node, _ string) *restic.Node { return node } } if rw.opts.RewriteFailedTree == nil { // fail with error by default - rw.opts.RewriteFailedTree = func(nodeID restic.ID, path string, err error) (restic.ID, error) { + rw.opts.RewriteFailedTree = func(_ restic.ID, _ string, err error) (restic.ID, error) { return restic.ID{}, err } } From 2ab18a92e6b00714b539439ea15656471bcb6b78 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 10 Feb 2024 23:42:34 +0100 Subject: [PATCH 007/893] CI: keep tests for Go 1.19 --- .github/workflows/tests.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index cc599491caa..091d42e8a8c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -54,6 +54,11 @@ jobs: os: ubuntu-latest test_fuse: true + - job_name: Linux + go: 1.19.x + os: ubuntu-latest + test_fuse: true + name: ${{ matrix.job_name }} Go ${{ matrix.go }} runs-on: ${{ matrix.os }} From ee1ff3c1d0f6d596ef559e49f9e11a1c53640b0b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 11 Feb 2024 22:26:13 +0100 Subject: [PATCH 008/893] backup: improve error message if exclude file cannot be loaded --- cmd/restic/exclude.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go index 09594461096..d9bb63aebfa 100644 --- a/cmd/restic/exclude.go +++ b/cmd/restic/exclude.go @@ -426,7 +426,7 @@ func readExcludePatternsFromFiles(excludeFiles []string) ([]string, error) { return scanner.Err() }() if err != nil { - return nil, err + return nil, fmt.Errorf("failed to read excludes from file %q: %w", filename, err) } } return excludes, nil From 5b5d506472876e3a5916d9943a911db2c5faa887 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 10 Feb 2024 23:41:11 +0100 Subject: [PATCH 009/893] backup: report files whose chunks failed to upload --- internal/archiver/blob_saver.go | 8 +++++--- internal/archiver/blob_saver_test.go | 11 ++++++----- internal/archiver/file_saver.go | 4 ++-- internal/archiver/file_saver_test.go | 2 +- internal/archiver/tree_saver.go | 6 +++--- internal/archiver/tree_saver_test.go | 2 +- 6 files changed, 18 insertions(+), 15 deletions(-) diff --git a/internal/archiver/blob_saver.go b/internal/archiver/blob_saver.go index ae4879ff430..d4347a16918 100644 --- a/internal/archiver/blob_saver.go +++ b/internal/archiver/blob_saver.go @@ -2,6 +2,7 @@ package archiver import ( "context" + "fmt" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" @@ -43,9 +44,9 @@ func (s *BlobSaver) TriggerShutdown() { // Save stores a blob in the repo. It checks the index and the known blobs // before saving anything. It takes ownership of the buffer passed in. -func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf *Buffer, cb func(res SaveBlobResponse)) { +func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf *Buffer, filename string, cb func(res SaveBlobResponse)) { select { - case s.ch <- saveBlobJob{BlobType: t, buf: buf, cb: cb}: + case s.ch <- saveBlobJob{BlobType: t, buf: buf, fn: filename, cb: cb}: case <-ctx.Done(): debug.Log("not sending job, context is cancelled") } @@ -54,6 +55,7 @@ func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf *Buffer, cb type saveBlobJob struct { restic.BlobType buf *Buffer + fn string cb func(res SaveBlobResponse) } @@ -95,7 +97,7 @@ func (s *BlobSaver) worker(ctx context.Context, jobs <-chan saveBlobJob) error { res, err := s.saveBlob(ctx, job.BlobType, job.buf.Data) if err != nil { debug.Log("saveBlob returned error, exiting: %v", err) - return err + return fmt.Errorf("failed to save blob from file %q: %w", job.fn, err) } job.cb(res) job.buf.Release() diff --git a/internal/archiver/blob_saver_test.go b/internal/archiver/blob_saver_test.go index 1996c35b843..180f95b3d7a 100644 --- a/internal/archiver/blob_saver_test.go +++ b/internal/archiver/blob_saver_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "runtime" + "strings" "sync" "sync/atomic" "testing" @@ -11,6 +12,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" ) @@ -57,7 +59,7 @@ func TestBlobSaver(t *testing.T) { lock.Lock() results = append(results, SaveBlobResponse{}) lock.Unlock() - b.Save(ctx, restic.DataBlob, buf, func(res SaveBlobResponse) { + b.Save(ctx, restic.DataBlob, buf, "file", func(res SaveBlobResponse) { lock.Lock() results[idx] = res lock.Unlock() @@ -106,7 +108,7 @@ func TestBlobSaverError(t *testing.T) { for i := 0; i < test.blobs; i++ { buf := &Buffer{Data: []byte(fmt.Sprintf("foo%d", i))} - b.Save(ctx, restic.DataBlob, buf, func(res SaveBlobResponse) {}) + b.Save(ctx, restic.DataBlob, buf, "errfile", func(res SaveBlobResponse) {}) } b.TriggerShutdown() @@ -116,9 +118,8 @@ func TestBlobSaverError(t *testing.T) { t.Errorf("expected error not found") } - if err != errTest { - t.Fatalf("unexpected error found: %v", err) - } + rtest.Assert(t, errors.Is(err, errTest), "unexpected error %v", err) + rtest.Assert(t, strings.Contains(err.Error(), "errfile"), "expected error to contain 'errfile' got: %v", err) }) } } diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index 724f5e620ce..7f11bff8a6b 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -16,7 +16,7 @@ import ( ) // SaveBlobFn saves a blob to a repo. -type SaveBlobFn func(context.Context, restic.BlobType, *Buffer, func(res SaveBlobResponse)) +type SaveBlobFn func(context.Context, restic.BlobType, *Buffer, string, func(res SaveBlobResponse)) // FileSaver concurrently saves incoming files to the repo. type FileSaver struct { @@ -205,7 +205,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat node.Content = append(node.Content, restic.ID{}) lock.Unlock() - s.saveBlob(ctx, restic.DataBlob, buf, func(sbr SaveBlobResponse) { + s.saveBlob(ctx, restic.DataBlob, buf, target, func(sbr SaveBlobResponse) { lock.Lock() if !sbr.known { fnr.stats.DataBlobs++ diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go index b088eeeedb1..ced9d796e05 100644 --- a/internal/archiver/file_saver_test.go +++ b/internal/archiver/file_saver_test.go @@ -33,7 +33,7 @@ func createTestFiles(t testing.TB, num int) (files []string) { func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Context, *errgroup.Group) { wg, ctx := errgroup.WithContext(ctx) - saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *Buffer, cb func(SaveBlobResponse)) { + saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *Buffer, _ string, cb func(SaveBlobResponse)) { cb(SaveBlobResponse{ id: restic.Hash(buf.Data), length: len(buf.Data), diff --git a/internal/archiver/tree_saver.go b/internal/archiver/tree_saver.go index a7dae387360..eae524a7805 100644 --- a/internal/archiver/tree_saver.go +++ b/internal/archiver/tree_saver.go @@ -11,7 +11,7 @@ import ( // TreeSaver concurrently saves incoming trees to the repo. type TreeSaver struct { - saveBlob func(ctx context.Context, t restic.BlobType, buf *Buffer, cb func(res SaveBlobResponse)) + saveBlob SaveBlobFn errFn ErrorFunc ch chan<- saveTreeJob @@ -19,7 +19,7 @@ type TreeSaver struct { // NewTreeSaver returns a new tree saver. A worker pool with treeWorkers is // started, it is stopped when ctx is cancelled. -func NewTreeSaver(ctx context.Context, wg *errgroup.Group, treeWorkers uint, saveBlob func(ctx context.Context, t restic.BlobType, buf *Buffer, cb func(res SaveBlobResponse)), errFn ErrorFunc) *TreeSaver { +func NewTreeSaver(ctx context.Context, wg *errgroup.Group, treeWorkers uint, saveBlob SaveBlobFn, errFn ErrorFunc) *TreeSaver { ch := make(chan saveTreeJob) s := &TreeSaver{ @@ -126,7 +126,7 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I b := &Buffer{Data: buf} ch := make(chan SaveBlobResponse, 1) - s.saveBlob(ctx, restic.TreeBlob, b, func(res SaveBlobResponse) { + s.saveBlob(ctx, restic.TreeBlob, b, job.target, func(res SaveBlobResponse) { ch <- res }) diff --git a/internal/archiver/tree_saver_test.go b/internal/archiver/tree_saver_test.go index 5de4375d695..47a3f3842c9 100644 --- a/internal/archiver/tree_saver_test.go +++ b/internal/archiver/tree_saver_test.go @@ -12,7 +12,7 @@ import ( "golang.org/x/sync/errgroup" ) -func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *Buffer, cb func(res SaveBlobResponse)) { +func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *Buffer, _ string, cb func(res SaveBlobResponse)) { cb(SaveBlobResponse{ id: restic.NewRandomID(), known: false, From 22a3cea1b30db58deaaabd430a286f0141850514 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 27 Jan 2024 18:59:32 +0100 Subject: [PATCH 010/893] checker: wrap all pack errors in ErrPackData --- internal/checker/checker.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 1e14a9e53f3..df126f539ce 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -521,7 +521,7 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r debug.Log("checking pack %v", id.String()) if len(blobs) == 0 { - return errors.Errorf("pack %v is empty or not indexed", id) + return &ErrPackData{PackID: id, errs: []error{errors.New("pack is empty or not indexed")}} } // sanity check blobs in index @@ -542,7 +542,7 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r var errs []error if nonContinuousPack { debug.Log("Index for pack contains gaps / overlaps, blobs: %v", blobs) - errs = append(errs, errors.New("Index for pack contains gaps / overlapping blobs")) + errs = append(errs, errors.New("index for pack contains gaps / overlapping blobs")) } // calculate hash on-the-fly while reading the pack and capture pack header @@ -591,21 +591,21 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r if err != nil { // failed to load the pack file, return as further checks cannot succeed anyways debug.Log(" error streaming pack: %v", err) - return errors.Errorf("pack %v failed to download: %v", id, err) + return &ErrPackData{PackID: id, errs: append(errs, errors.Errorf("download error: %w", err))} } if !hash.Equal(id) { - debug.Log("Pack ID does not match, want %v, got %v", id, hash) - return errors.Errorf("Pack ID does not match, want %v, got %v", id, hash) + debug.Log("pack ID does not match, want %v, got %v", id, hash) + return &ErrPackData{PackID: id, errs: append(errs, errors.Errorf("unexpected pack id %v", hash))} } blobs, hdrSize, err := pack.List(r.Key(), bytes.NewReader(hdrBuf), int64(len(hdrBuf))) if err != nil { - return err + return &ErrPackData{PackID: id, errs: append(errs, err)} } if uint32(idxHdrSize) != hdrSize { debug.Log("Pack header size does not match, want %v, got %v", idxHdrSize, hdrSize) - errs = append(errs, errors.Errorf("Pack header size does not match, want %v, got %v", idxHdrSize, hdrSize)) + errs = append(errs, errors.Errorf("pack header size does not match, want %v, got %v", idxHdrSize, hdrSize)) } idx := r.Index() @@ -619,7 +619,7 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r } } if !idxHas { - errs = append(errs, errors.Errorf("Blob %v is not contained in index or position is incorrect", blob.ID)) + errs = append(errs, errors.Errorf("blob %v is not contained in index or position is incorrect", blob.ID)) continue } } From 772e3416d1204bd43b539e9ffcfeb5c5602f02db Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 27 Jan 2024 18:59:54 +0100 Subject: [PATCH 011/893] repair pack: drop feature flag --- cmd/restic/cmd_check.go | 2 +- cmd/restic/cmd_repair_packs.go | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 22f462d7537..21c9cc899fd 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -349,7 +349,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args for _, id := range salvagePacks { strIDs = append(strIDs, id.String()) } - Warnf("RESTIC_FEATURES=repair-packs-v1 restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIDs, " ")) + Warnf("restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIDs, " ")) Warnf("Corrupted blobs are either caused by hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n") } } diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index 04b06c33b94..521b5859fd4 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -40,13 +40,6 @@ func init() { } func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { - // FIXME discuss and add proper feature flag mechanism - flag, _ := os.LookupEnv("RESTIC_FEATURES") - if flag != "repair-packs-v1" { - return errors.Fatal("This command is experimental and may change/be removed without notice between restic versions. " + - "Set the environment variable 'RESTIC_FEATURES=repair-packs-v1' to enable it.") - } - ids := restic.NewIDSet() for _, arg := range args { id, err := restic.ParseID(arg) From 544fe38786eeab7aa4077a7c692d2f5e7d994ae8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 27 Jan 2024 19:00:23 +0100 Subject: [PATCH 012/893] check: suggest repair pack for all damaged packs --- cmd/restic/cmd_check.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 21c9cc899fd..8302c72bcfd 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -336,9 +336,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args errorsFound = true Warnf("%v\n", err) if err, ok := err.(*checker.ErrPackData); ok { - if strings.Contains(err.Error(), "wrong data returned, hash is") { - salvagePacks = append(salvagePacks, err.PackID) - } + salvagePacks = append(salvagePacks, err.PackID) } } p.Done() From 6397615fbbca6425e71f4927e6b5c2188f363ed5 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 27 Jan 2024 19:04:45 +0100 Subject: [PATCH 013/893] check: document that check will show repair pack instructions --- doc/077_troubleshooting.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/077_troubleshooting.rst b/doc/077_troubleshooting.rst index 6a9a6ee15c0..512b41b0d83 100644 --- a/doc/077_troubleshooting.rst +++ b/doc/077_troubleshooting.rst @@ -76,6 +76,8 @@ Similarly, if a repository is repeatedly damaged, please open an `issue on Githu somewhere. Please include the check output and additional information that might help locate the problem. +If ``check`` detects damaged pack files, it will show instructions on how to repair +them. Please follow the steps from the next section first. 2. Backup the repository ************************ From 4073299a7cba6874cfe30467cea86405027d8c42 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 4 Feb 2024 17:22:09 +0100 Subject: [PATCH 014/893] check: fix missing error if blob is invalid --- internal/checker/checker.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index df126f539ce..0fdd3d942b8 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -563,8 +563,8 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r } debug.Log(" check blob %v: %v", val.Handle.ID, val.Handle) if val.Err != nil { - debug.Log(" error verifying blob %v: %v", val.Handle.ID, err) - errs = append(errs, errors.Errorf("blob %v: %v", val.Handle.ID, err)) + debug.Log(" error verifying blob %v: %v", val.Handle.ID, val.Err) + errs = append(errs, errors.Errorf("blob %v: %v", val.Handle.ID, val.Err)) } } From ed4a4f8748320a4aab2591c6d478fa442637f7de Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 10 Feb 2024 20:27:17 +0100 Subject: [PATCH 015/893] check: exclude inaccessible files from the repair pack suggestion --- internal/checker/checker.go | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 0fdd3d942b8..28f55ce3ad2 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -516,6 +516,14 @@ func (c *Checker) GetPacks() map[restic.ID]int64 { return c.packs } +type partialReadError struct { + err error +} + +func (e *partialReadError) Error() string { + return e.err.Error() +} + // checkPack reads a pack and checks the integrity of all blobs. func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { debug.Log("checking pack %v", id.String()) @@ -559,7 +567,7 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r if err == repository.ErrPackEOF { break } else if err != nil { - return err + return &partialReadError{err} } debug.Log(" check blob %v: %v", val.Handle.ID, val.Handle) if val.Err != nil { @@ -574,7 +582,7 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r if minHdrStart > curPos { _, err := bufRd.Discard(minHdrStart - curPos) if err != nil { - return err + return &partialReadError{err} } } @@ -582,16 +590,24 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r var err error hdrBuf, err = io.ReadAll(bufRd) if err != nil { - return err + return &partialReadError{err} } hash = restic.IDFromHash(hrd.Sum(nil)) return nil }) if err != nil { + var e *partialReadError + isPartialReadError := errors.As(err, &e) // failed to load the pack file, return as further checks cannot succeed anyways - debug.Log(" error streaming pack: %v", err) - return &ErrPackData{PackID: id, errs: append(errs, errors.Errorf("download error: %w", err))} + debug.Log(" error streaming pack (partial %v): %v", isPartialReadError, err) + if isPartialReadError { + return &ErrPackData{PackID: id, errs: append(errs, errors.Errorf("partial download error: %w", err))} + } + + // The check command suggests to repair files for which a `ErrPackData` is returned. However, this file + // completely failed to download such that there's no point in repairing anything. + return errors.Errorf("download error: %w", err) } if !hash.Equal(id) { debug.Log("pack ID does not match, want %v, got %v", id, hash) From 527a3ff2b218902d9d4e3c6eab7e67b8b2edea74 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 10 Feb 2024 20:27:47 +0100 Subject: [PATCH 016/893] check: link to troubleshooting guide --- cmd/restic/cmd_check.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 8302c72bcfd..990702b61d9 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -342,7 +342,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args p.Done() if len(salvagePacks) > 0 { - Warnf("\nThe repository contains pack files with damaged blobs. These blobs must be removed to repair the repository. This can be done using the following commands:\n\n") + Warnf("\nThe repository contains pack files with damaged blobs. These blobs must be removed to repair the repository. This can be done using the following commands. Please read the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html first.\n\n") var strIDs []string for _, id := range salvagePacks { strIDs = append(strIDs, id.String()) From 9e3703ded525b2d6755fa385b8f82b6f785d1392 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 12 Feb 2024 20:39:31 +0100 Subject: [PATCH 017/893] remove changelogs that are already included in restic 0.16.4 --- changelog/unreleased/issue-4529 | 18 ------------------ changelog/unreleased/issue-4677 | 19 ------------------- 2 files changed, 37 deletions(-) delete mode 100644 changelog/unreleased/issue-4529 delete mode 100644 changelog/unreleased/issue-4677 diff --git a/changelog/unreleased/issue-4529 b/changelog/unreleased/issue-4529 deleted file mode 100644 index fed726d2d1d..00000000000 --- a/changelog/unreleased/issue-4529 +++ /dev/null @@ -1,18 +0,0 @@ -Enhancement: Add extra verification of data integrity before upload - -Hardware issues, or a bug in restic or its dependencies, could previously cause -corruption in the files restic created and stored in the repository. Detecting -such corruption previously required explicitly running the `check --read-data` -or `check --read-data-subset` commands. - -To further ensure data integrity, even in the case of hardware issues or -software bugs, restic now performs additional verification of the files about -to be uploaded to the repository. - -These extra checks will increase CPU usage during backups. They can therefore, -if absolutely necessary, be disabled using the `--no-extra-verify` global -option. Please note that this should be combined with more active checking -using the previously mentioned check commands. - -https://github.com/restic/restic/issues/4529 -https://github.com/restic/restic/pull/4681 diff --git a/changelog/unreleased/issue-4677 b/changelog/unreleased/issue-4677 deleted file mode 100644 index 8fa6cf65be4..00000000000 --- a/changelog/unreleased/issue-4677 +++ /dev/null @@ -1,19 +0,0 @@ -Bugfix: Downgrade zstd library to fix rare data corruption at max. compression - -In restic 0.16.3, backups where the compression level was set to `max` (using -`--compression max`) could in rare and very specific circumstances result in -data corruption due to a bug in the library used for compressing data. Restic -0.16.1 and 0.16.2 were not affected. - -Restic now uses the previous version of the library used to compress data, the -same version used by restic 0.16.2. Please note that the `auto` compression -level (which restic uses by default) was never affected, and even if you used -`max` compression, chances of being affected by this issue are small. - -To check a repository for any corruption, run `restic check --read-data`. This -will download and verify the whole repository and can be used at any time to -completely verify the integrity of a repository. If the `check` command detects -anomalies, follow the suggested steps. - -https://github.com/restic/restic/issues/4677 -https://github.com/restic/restic/pull/4679 From 69304cd74f9853c36fb6d34a07ae1a42aa552ff4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 10 Feb 2024 20:29:11 +0100 Subject: [PATCH 018/893] check: clarify repair pack usage --- doc/077_troubleshooting.rst | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/doc/077_troubleshooting.rst b/doc/077_troubleshooting.rst index 512b41b0d83..f80df29b8c6 100644 --- a/doc/077_troubleshooting.rst +++ b/doc/077_troubleshooting.rst @@ -77,7 +77,9 @@ somewhere. Please include the check output and additional information that might help locate the problem. If ``check`` detects damaged pack files, it will show instructions on how to repair -them. Please follow the steps from the next section first. +them using the ``repair pack`` command. Use that command instead of the "Repair the +index" section in this guide. + 2. Backup the repository ************************ @@ -106,6 +108,11 @@ whether your issue is already known and solved. Please take a look at the 3. Repair the index ******************* +.. note:: + + If the `check` command tells you to run `restic repair pack`, then use that + command instead. It will repair the damaged pack files and also update the index. + Restic relies on its index to contain correct information about what data is stored in the repository. Thus, the first step to repair a repository is to repair the index: From 0a36d193d84a13dea9911fb7ad9e071f24db40ec Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 12 Feb 2024 20:38:20 +0100 Subject: [PATCH 019/893] add changelog for enhanced repair packs --- changelog/unreleased/pull-4644 | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 changelog/unreleased/pull-4644 diff --git a/changelog/unreleased/pull-4644 b/changelog/unreleased/pull-4644 new file mode 100644 index 00000000000..8000bce7eda --- /dev/null +++ b/changelog/unreleased/pull-4644 @@ -0,0 +1,10 @@ +Enhancement: Improve `repair packs` command + +The `repair packs` command has been improved to also be able to process +truncated pack files. The `check --read-data` command will provide instructions +on using the command if necessary to repair a repository. See the guide at +https://restic.readthedocs.io/en/stable/077_troubleshooting.html for further +instructions. + +https://github.com/restic/restic/pull/4644 +https://github.com/restic/restic/pull/4655 From 18b0bbbf420131f8d6194fbbe89217a100934ff4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 17 Feb 2024 19:37:32 +0100 Subject: [PATCH 020/893] repository: use fmt.Errorf in StreamPacks --- internal/repository/repository.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 9a4494c4d67..87eb0136738 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -1079,7 +1079,7 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) { skipBytes := int(entry.Offset - b.currentOffset) if skipBytes < 0 { - return PackBlobValue{}, errors.Errorf("overlapping blobs in pack %v", b.packID) + return PackBlobValue{}, fmt.Errorf("overlapping blobs in pack %v", b.packID) } _, err := b.rd.Discard(skipBytes) @@ -1099,18 +1099,18 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) { n, err := io.ReadFull(b.rd, b.buf) if err != nil { debug.Log(" read error %v", err) - return PackBlobValue{}, errors.Wrap(err, "ReadFull") + return PackBlobValue{}, fmt.Errorf("readFull: %w", err) } if n != len(b.buf) { - return PackBlobValue{}, errors.Errorf("read blob %v from %v: not enough bytes read, want %v, got %v", + return PackBlobValue{}, fmt.Errorf("read blob %v from %v: not enough bytes read, want %v, got %v", h, b.packID.Str(), len(b.buf), n) } b.currentOffset = entry.Offset + entry.Length if int(entry.Length) <= b.key.NonceSize() { debug.Log("%v", b.blobs) - return PackBlobValue{}, errors.Errorf("invalid blob length %v", entry) + return PackBlobValue{}, fmt.Errorf("invalid blob length %v", entry) } // decryption errors are likely permanent, give the caller a chance to skip them @@ -1130,7 +1130,7 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) { if !id.Equal(entry.ID) { debug.Log("read blob %v/%v from %v: wrong data returned, hash is %v", h.Type, h.ID, b.packID.Str(), id) - err = errors.Errorf("read blob %v from %v: wrong data returned, hash is %v", + err = fmt.Errorf("read blob %v from %v: wrong data returned, hash is %v", h, b.packID.Str(), id) } } From 4c3218ef9f35e6a8336a096e45d5df7b57244bc4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 17 Feb 2024 19:38:01 +0100 Subject: [PATCH 021/893] repository: include packID in StreamPack for decrypt/decompress errors --- internal/repository/repository.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 87eb0136738..8e34c712559 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -1116,13 +1116,16 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) { // decryption errors are likely permanent, give the caller a chance to skip them nonce, ciphertext := b.buf[:b.key.NonceSize()], b.buf[b.key.NonceSize():] plaintext, err := b.key.Open(ciphertext[:0], nonce, ciphertext, nil) + if err != nil { + err = fmt.Errorf("decrypting blob %v from %v failed: %w", h, b.packID.Str(), err) + } if err == nil && entry.IsCompressed() { // DecodeAll will allocate a slice if it is not large enough since it // knows the decompressed size (because we're using EncodeAll) b.decode, err = b.dec.DecodeAll(plaintext, b.decode[:0]) plaintext = b.decode if err != nil { - err = errors.Errorf("decompressing blob %v failed: %v", h, err) + err = fmt.Errorf("decompressing blob %v from %v failed: %w", h, b.packID.Str(), err) } } if err == nil { From 30e979d2527817eead2cd9786023aa3e247f9186 Mon Sep 17 00:00:00 2001 From: Brian Harring Date: Mon, 19 Feb 2024 11:09:25 +0100 Subject: [PATCH 022/893] Catch SIGTERM, run cleanup The previous code only ran cleanup (lock release for example) on SIGINT. For anyone running restic in a container, the signal is going to be SIGTERM which means containerized execution would leave locks behind. While this could be addressed via interposing dumb-init to translate the signal, a `kill` invocation is going to default to SIGTERM, so the same problem exists for non container users. Signed-off-by: Brian Harring --- cmd/restic/cleanup.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/restic/cleanup.go b/cmd/restic/cleanup.go index 75933fe966a..d1c8ecc1a2b 100644 --- a/cmd/restic/cleanup.go +++ b/cmd/restic/cleanup.go @@ -19,7 +19,7 @@ var cleanupHandlers struct { func init() { cleanupHandlers.ch = make(chan os.Signal, 1) go CleanupHandler(cleanupHandlers.ch) - signal.Notify(cleanupHandlers.ch, syscall.SIGINT) + signal.Notify(cleanupHandlers.ch, syscall.SIGINT, syscall.SIGTERM) } // AddCleanupHandler adds the function f to the list of cleanup handlers so @@ -70,7 +70,7 @@ func CleanupHandler(c <-chan os.Signal) { code := 0 - if s == syscall.SIGINT { + if s == syscall.SIGINT || s == syscall.SIGTERM { code = 130 } else { code = 1 From b41107dcaf214743b0bc6edc445a6d1882a3283c Mon Sep 17 00:00:00 2001 From: Brian Harring Date: Mon, 19 Feb 2024 11:31:48 +0100 Subject: [PATCH 023/893] Add changelog for SIGTERM bugfix. Signed-off-by: Brian Harring --- changelog/unreleased/pull-4703 | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 changelog/unreleased/pull-4703 diff --git a/changelog/unreleased/pull-4703 b/changelog/unreleased/pull-4703 new file mode 100644 index 00000000000..4df3385a019 --- /dev/null +++ b/changelog/unreleased/pull-4703 @@ -0,0 +1,9 @@ +Bugfix: Shutdown cleanly when SIGTERM is received + +Prior, if restic received SIGTERM it'd just immediately terminate skipping +cleanup- resulting in potential issues like stale locks being left behind. + +This primarily effected containerized restic invocations- they use SIGTERM- +but this could be triggered via a simple `killall restic` in addition. + +https://github.com/restic/restic/pull/4703 From 0a65a0f94fa6d22a32e227024f34c23126b754e3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 22 Feb 2024 22:00:42 +0100 Subject: [PATCH 024/893] update comment --- cmd/restic/cleanup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/cleanup.go b/cmd/restic/cleanup.go index d1c8ecc1a2b..5a6cf79e11d 100644 --- a/cmd/restic/cleanup.go +++ b/cmd/restic/cleanup.go @@ -56,7 +56,7 @@ func RunCleanupHandlers(code int) int { return code } -// CleanupHandler handles the SIGINT signals. +// CleanupHandler handles the SIGINT and SIGTERM signals. func CleanupHandler(c <-chan os.Signal) { for s := range c { debug.Log("signal %v received, cleaning up", s) From 94de87d4b7d6b12ffbe3726954ed4b02d4360a3a Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:57:00 -0700 Subject: [PATCH 025/893] Add CombineErrors helper function --- internal/errors/errors.go | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/internal/errors/errors.go b/internal/errors/errors.go index 0327ea0da40..3c669f86135 100644 --- a/internal/errors/errors.go +++ b/internal/errors/errors.go @@ -2,6 +2,7 @@ package errors import ( stderrors "errors" + "fmt" "github.com/pkg/errors" ) @@ -22,12 +23,42 @@ var Wrap = errors.Wrap // nil, Wrapf returns nil. var Wrapf = errors.Wrapf +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. var WithStack = errors.WithStack // Go 1.13-style error handling. +// As finds the first error in err's tree that matches target, and if one is found, +// sets target to that error value and returns true. Otherwise, it returns false. func As(err error, tgt interface{}) bool { return stderrors.As(err, tgt) } +// Is reports whether any error in err's tree matches target. func Is(x, y error) bool { return stderrors.Is(x, y) } +// Unwrap returns the result of calling the Unwrap method on err, if err's type contains +// an Unwrap method returning error. Otherwise, Unwrap returns nil. +// +// Unwrap only calls a method of the form "Unwrap() error". In particular Unwrap does not +// unwrap errors returned by [Join]. func Unwrap(err error) error { return stderrors.Unwrap(err) } + +// CombineErrors combines multiple errors into a single error. +func CombineErrors(errors ...error) error { + var combinedErrorMsg string + + for _, err := range errors { + if err != nil { + if combinedErrorMsg != "" { + combinedErrorMsg += "; " // Separate error messages with a delimiter + } + combinedErrorMsg += err.Error() + } + } + + if combinedErrorMsg == "" { + return nil // No errors, return nil + } + + return fmt.Errorf("multiple errors occurred: [%s]", combinedErrorMsg) +} From 62a8a599f1f4f36c55e17106a8e40bd956430058 Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:58:12 -0700 Subject: [PATCH 026/893] Add optional messages for Equals helper --- internal/test/helpers.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/internal/test/helpers.go b/internal/test/helpers.go index 65e3e36ec6c..242da607944 100644 --- a/internal/test/helpers.go +++ b/internal/test/helpers.go @@ -3,6 +3,7 @@ package test import ( "compress/bzip2" "compress/gzip" + "fmt" "io" "os" "os/exec" @@ -47,10 +48,22 @@ func OKs(tb testing.TB, errs []error) { } // Equals fails the test if exp is not equal to act. -func Equals(tb testing.TB, exp, act interface{}) { +// msg is optional message to be printed, first param being format string and rest being arguments. +func Equals(tb testing.TB, exp, act interface{}, msgs ...string) { tb.Helper() if !reflect.DeepEqual(exp, act) { - tb.Fatalf("\033[31m\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", exp, act) + var msgString string + length := len(msgs) + if length == 1 { + msgString = msgs[0] + } else if length > 1 { + args := make([]interface{}, length-1) + for i, msg := range msgs[1:] { + args[i] = msg + } + msgString = fmt.Sprintf(msgs[0], args...) + } + tb.Fatalf("\033[31m\n\n\t"+msgString+"\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", exp, act) } } From 0962917974952531347f82dffc0d6e82b1edf5f5 Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Thu, 22 Feb 2024 17:31:20 -0700 Subject: [PATCH 027/893] Support windows metadata using generic attribs Add new generic_attributes attribute in Node. Use the generic attributes to add support for creation time and file attributes like hidden, readonly, encrypted in windows. Handle permission errors for readonly files in windows. Handle backup and restore of encrypted attributes using windows system calls. --- cmd/restic/cmd_find.go | 1 + internal/fs/file.go | 14 ++ internal/fs/file_windows.go | 26 ++++ internal/restic/node.go | 247 +++++++++++++++++++++++++++++-- internal/restic/node_aix.go | 17 ++- internal/restic/node_netbsd.go | 24 ++- internal/restic/node_openbsd.go | 24 ++- internal/restic/node_test.go | 35 +++-- internal/restic/node_windows.go | 224 +++++++++++++++++++++++++++- internal/restic/node_xattr.go | 11 ++ internal/restorer/fileswriter.go | 28 ++-- 11 files changed, 591 insertions(+), 60 deletions(-) diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 04e6ae3ddb7..7ea7c425a7d 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -126,6 +126,7 @@ func (s *statefulOutput) PrintPatternJSON(path string, node *restic.Node) { // Make the following attributes disappear Name byte `json:"name,omitempty"` ExtendedAttributes byte `json:"extended_attributes,omitempty"` + GenericAttributes byte `json:"generic_attributes,omitempty"` Device byte `json:"device,omitempty"` Content byte `json:"content,omitempty"` Subtree byte `json:"subtree,omitempty"` diff --git a/internal/fs/file.go b/internal/fs/file.go index f35901c06ef..4a236ea090a 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -124,3 +124,17 @@ func RemoveIfExists(filename string) error { func Chtimes(name string, atime time.Time, mtime time.Time) error { return os.Chtimes(fixpath(name), atime, mtime) } + +// IsAccessDenied checks if the error is due to permission error. +func IsAccessDenied(err error) bool { + return os.IsPermission(err) +} + +// ResetPermissions resets the permissions of the file at the specified path +func ResetPermissions(path string) error { + // Set the default file permissions + if err := os.Chmod(path, 0600); err != nil { + return err + } + return nil +} diff --git a/internal/fs/file_windows.go b/internal/fs/file_windows.go index d19a744e1be..2f09698040c 100644 --- a/internal/fs/file_windows.go +++ b/internal/fs/file_windows.go @@ -77,3 +77,29 @@ func TempFile(dir, prefix string) (f *os.File, err error) { func Chmod(name string, mode os.FileMode) error { return os.Chmod(fixpath(name), mode) } + +// ClearSystem removes the system attribute from the file. +func ClearSystem(path string) error { + return ClearAttribute(path, windows.FILE_ATTRIBUTE_SYSTEM) +} + +// ClearAttribute removes the specified attribute from the file. +func ClearAttribute(path string, attribute uint32) error { + ptr, err := windows.UTF16PtrFromString(path) + if err != nil { + return err + } + fileAttributes, err := windows.GetFileAttributes(ptr) + if err != nil { + return err + } + if fileAttributes&attribute != 0 { + // Clear the attribute + fileAttributes &= ^uint32(attribute) + err = windows.SetFileAttributes(ptr, fileAttributes) + if err != nil { + return err + } + } + return nil +} diff --git a/internal/restic/node.go b/internal/restic/node.go index 1d5bb51af43..44ca52b0c05 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -6,7 +6,9 @@ import ( "fmt" "os" "os/user" + "reflect" "strconv" + "strings" "sync" "syscall" "time" @@ -20,12 +22,53 @@ import ( "github.com/restic/restic/internal/fs" ) -// ExtendedAttribute is a tuple storing the xattr name and value. +// ExtendedAttribute is a tuple storing the xattr name and value for various filesystems. type ExtendedAttribute struct { Name string `json:"name"` Value []byte `json:"value"` } +// GenericAttributeType can be used for OS specific functionalities by defining specific types +// in node.go to be used by the specific node_xx files. +// OS specific attribute types should follow the convention Attributes. +// GenericAttributeTypes should follow the convention . +// The attributes in OS specific attribute types must be pointers as we want to distinguish nil values +// and not create GenericAttributes for them. +type GenericAttributeType string + +// OSType is the type created to represent each specific OS +type OSType string + +const ( + // When new GenericAttributeType are defined, they must be added in the init function as well. + + // Below are windows specific attributes. + + // TypeCreationTime is the GenericAttributeType used for storing creation time for windows files within the generic attributes map. + TypeCreationTime GenericAttributeType = "windows.creation_time" + // TypeFileAttributes is the GenericAttributeType used for storing file attributes for windows files within the generic attributes map. + TypeFileAttributes GenericAttributeType = "windows.file_attributes" + + // Generic Attributes for other OS types should be defined here. +) + +// init is called when the package is initialized. Any new GenericAttributeTypes being created must be added here as well. +func init() { + storeGenericAttributeType(TypeCreationTime, TypeFileAttributes) +} + +// genericAttributesForOS maintains a map of known genericAttributesForOS to the OSType +var genericAttributesForOS = map[GenericAttributeType]OSType{} + +// storeGenericAttributeType adds and entry in genericAttributesForOS map +func storeGenericAttributeType(attributeTypes ...GenericAttributeType) { + for _, attributeType := range attributeTypes { + // Get the OS attribute type from the GenericAttributeType + osAttributeName := strings.Split(string(attributeType), ".")[0] + genericAttributesForOS[attributeType] = OSType(osAttributeName) + } +} + // Node is a file, directory or other item in a backup. type Node struct { Name string `json:"name"` @@ -47,11 +90,12 @@ type Node struct { // This allows storing arbitrary byte-sequences, which are possible as symlink targets on unix systems, // as LinkTarget without breaking backwards-compatibility. // Must only be set of the linktarget cannot be encoded as valid utf8. - LinkTargetRaw []byte `json:"linktarget_raw,omitempty"` - ExtendedAttributes []ExtendedAttribute `json:"extended_attributes,omitempty"` - Device uint64 `json:"device,omitempty"` // in case of Type == "dev", stat.st_rdev - Content IDs `json:"content"` - Subtree *ID `json:"subtree,omitempty"` + LinkTargetRaw []byte `json:"linktarget_raw,omitempty"` + ExtendedAttributes []ExtendedAttribute `json:"extended_attributes,omitempty"` + GenericAttributes map[GenericAttributeType]json.RawMessage `json:"generic_attributes,omitempty"` + Device uint64 `json:"device,omitempty"` // in case of Type == "dev", stat.st_rdev + Content IDs `json:"content"` + Subtree *ID `json:"subtree,omitempty"` Error string `json:"error,omitempty"` @@ -203,14 +247,6 @@ func (node Node) restoreMetadata(path string) error { } } - if node.Type != "symlink" { - if err := fs.Chmod(path, node.Mode); err != nil { - if firsterr != nil { - firsterr = errors.WithStack(err) - } - } - } - if err := node.RestoreTimestamps(path); err != nil { debug.Log("error restoring timestamps for dir %v: %v", path, err) if firsterr != nil { @@ -225,6 +261,24 @@ func (node Node) restoreMetadata(path string) error { } } + if err := node.restoreGenericAttributes(path); err != nil { + debug.Log("error restoring generic attributes for %v: %v", path, err) + if firsterr != nil { + firsterr = err + } + } + + // Moving RestoreTimestamps and restoreExtendedAttributes calls above as for readonly files in windows + // calling Chmod below will no longer allow any modifications to be made on the file and the + // calls above would fail. + if node.Type != "symlink" { + if err := fs.Chmod(path, node.Mode); err != nil { + if firsterr != nil { + firsterr = errors.WithStack(err) + } + } + } + return firsterr } @@ -438,6 +492,9 @@ func (node Node) Equals(other Node) bool { if !node.sameExtendedAttributes(other) { return false } + if !node.sameGenericAttributes(other) { + return false + } if node.Subtree != nil { if other.Subtree == nil { return false @@ -480,8 +537,13 @@ func (node Node) sameContent(other Node) bool { } func (node Node) sameExtendedAttributes(other Node) bool { - if len(node.ExtendedAttributes) != len(other.ExtendedAttributes) { + ln := len(node.ExtendedAttributes) + lo := len(other.ExtendedAttributes) + if ln != lo { return false + } else if ln == 0 { + // This means lo is also of length 0 + return true } // build a set of all attributes that node has @@ -525,6 +587,33 @@ func (node Node) sameExtendedAttributes(other Node) bool { return true } +func (node Node) sameGenericAttributes(other Node) bool { + return deepEqual(node.GenericAttributes, other.GenericAttributes) +} + +func deepEqual(map1, map2 map[GenericAttributeType]json.RawMessage) bool { + // Check if the maps have the same number of keys + if len(map1) != len(map2) { + return false + } + + // Iterate over each key-value pair in map1 + for key, value1 := range map1 { + // Check if the key exists in map2 + value2, ok := map2[key] + if !ok { + return false + } + + // Check if the JSON.RawMessage values are equal byte by byte + if !bytes.Equal(value1, value2) { + return false + } + } + + return true +} + func (node *Node) fillUser(stat *statT) { uid, gid := stat.uid(), stat.gid() node.UID, node.GID = uid, gid @@ -627,7 +716,17 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error { return errors.Errorf("unsupported file type %q", node.Type) } - return node.fillExtendedAttributes(path) + allowExtended, err := node.fillGenericAttributes(path, fi, stat) + if allowExtended { + // Skip processing ExtendedAttributes if allowExtended is false. + errEx := node.fillExtendedAttributes(path) + if err == nil { + err = errEx + } else { + debug.Log("Error filling extended attributes for %v at %v : %v", node.Name, path, errEx) + } + } + return err } func (node *Node) fillExtendedAttributes(path string) error { @@ -665,3 +764,119 @@ func (node *Node) fillTimes(stat *statT) { node.ChangeTime = time.Unix(ctim.Unix()) node.AccessTime = time.Unix(atim.Unix()) } + +// HandleUnknownGenericAttributesFound is used for handling and distinguing between scenarios related to future versions and cross-OS repositories +func HandleUnknownGenericAttributesFound(unknownAttribs []GenericAttributeType) { + for _, unknownAttrib := range unknownAttribs { + handleUnknownGenericAttributeFound(unknownAttrib) + } +} + +// handleUnknownGenericAttributeFound is used for handling and distinguing between scenarios related to future versions and cross-OS repositories +func handleUnknownGenericAttributeFound(genericAttributeType GenericAttributeType) { + if checkGenericAttributeNameNotHandledAndPut(genericAttributeType) { + // Print the unique error only once for a given execution + os, exists := genericAttributesForOS[genericAttributeType] + + if exists { + // If genericAttributesForOS contains an entry but we still got here, it means the specific node_xx.go for the current OS did not handle it and the repository may have been originally created on a different OS. + // The fact that node.go knows about the attribute, means it is not a new attribute. This may be a common situation if a repo is used across OSs. + debug.Log("Ignoring a generic attribute found in the repository: %s which may not be compatible with your OS. Compatible OS: %s", genericAttributeType, os) + } else { + // If genericAttributesForOS in node.go does not know about this attribute, then the repository may have been created by a newer version which has a newer GenericAttributeType. + debug.Log("Found an unrecognized generic attribute in the repository: %s. You may need to upgrade to latest version of restic.", genericAttributeType) + } + } +} + +// handleAllUnknownGenericAttributesFound performs validations for all generic attributes in the node. +// This is not used on windows currently because windows has handling for generic attributes. +// nolint:unused +func (node Node) handleAllUnknownGenericAttributesFound() error { + for name := range node.GenericAttributes { + handleUnknownGenericAttributeFound(name) + } + return nil +} + +var unknownGenericAttributesHandlingHistory sync.Map + +// checkGenericAttributeNameNotHandledAndPut checks if the GenericAttributeType name entry +// already exists and puts it in the map if not. +func checkGenericAttributeNameNotHandledAndPut(value GenericAttributeType) bool { + // If Key doesn't exist, put the value and return true because it is not already handled + _, exists := unknownGenericAttributesHandlingHistory.LoadOrStore(value, "") + // Key exists, then it is already handled so return false + return !exists +} + +// The functions below are common helper functions which can be used for generic attributes support +// across different OS. + +// genericAttributesToOSAttrs gets the os specific attribute from the generic attribute using reflection +// nolint:unused +func genericAttributesToOSAttrs(attrs map[GenericAttributeType]json.RawMessage, attributeType reflect.Type, attributeValuePtr *reflect.Value, keyPrefix string) (unknownAttribs []GenericAttributeType, err error) { + attributeValue := *attributeValuePtr + + for key, rawMsg := range attrs { + found := false + for i := 0; i < attributeType.NumField(); i++ { + if getFQKeyByIndex(attributeType, i, keyPrefix) == key { + found = true + fieldValue := attributeValue.Field(i) + // For directly supported types, use json.Unmarshal directly + if err := json.Unmarshal(rawMsg, fieldValue.Addr().Interface()); err != nil { + return unknownAttribs, errors.Wrap(err, "Unmarshal") + } + break + } + } + if !found { + unknownAttribs = append(unknownAttribs, key) + } + } + return unknownAttribs, nil +} + +// getFQKey gets the fully qualified key for the field +// nolint:unused +func getFQKey(field reflect.StructField, keyPrefix string) GenericAttributeType { + return GenericAttributeType(fmt.Sprintf("%s.%s", keyPrefix, field.Tag.Get("generic"))) +} + +// getFQKeyByIndex gets the fully qualified key for the field index +// nolint:unused +func getFQKeyByIndex(attributeType reflect.Type, index int, keyPrefix string) GenericAttributeType { + return getFQKey(attributeType.Field(index), keyPrefix) +} + +// osAttrsToGenericAttributes gets the generic attribute from the os specific attribute using reflection +// nolint:unused +func osAttrsToGenericAttributes(attributeType reflect.Type, attributeValuePtr *reflect.Value, keyPrefix string) (attrs map[GenericAttributeType]json.RawMessage, err error) { + attributeValue := *attributeValuePtr + attrs = make(map[GenericAttributeType]json.RawMessage) + + // Iterate over the fields of the struct + for i := 0; i < attributeType.NumField(); i++ { + field := attributeType.Field(i) + + // Get the field value using reflection + fieldValue := attributeValue.FieldByName(field.Name) + + // Check if the field is nil + if fieldValue.IsNil() { + // If it's nil, skip this field + continue + } + + // Marshal the field value into a json.RawMessage + var fieldBytes []byte + if fieldBytes, err = json.Marshal(fieldValue.Interface()); err != nil { + return attrs, errors.Wrap(err, "Marshal") + } + + // Insert the field into the map + attrs[getFQKey(field, keyPrefix)] = json.RawMessage(fieldBytes) + } + return attrs, nil +} diff --git a/internal/restic/node_aix.go b/internal/restic/node_aix.go index 572e33a6508..4d8c248de68 100644 --- a/internal/restic/node_aix.go +++ b/internal/restic/node_aix.go @@ -3,9 +3,12 @@ package restic -import "syscall" +import ( + "os" + "syscall" +) -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { +func (node Node) restoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { return nil } @@ -34,3 +37,13 @@ func Listxattr(path string) ([]string, error) { func Setxattr(path, name string, data []byte) error { return nil } + +// restoreGenericAttributes is no-op on AIX. +func (node *Node) restoreGenericAttributes(_ string) error { + return node.handleAllUnknownGenericAttributesFound() +} + +// fillGenericAttributes is a no-op on AIX. +func (node *Node) fillGenericAttributes(_ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { + return true, nil +} diff --git a/internal/restic/node_netbsd.go b/internal/restic/node_netbsd.go index 0eade2f37f9..be4afa3ae9b 100644 --- a/internal/restic/node_netbsd.go +++ b/internal/restic/node_netbsd.go @@ -1,8 +1,11 @@ package restic -import "syscall" +import ( + "os" + "syscall" +) -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { +func (node Node) restoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { return nil } @@ -10,18 +13,27 @@ func (s statT) atim() syscall.Timespec { return s.Atimespec } func (s statT) mtim() syscall.Timespec { return s.Mtimespec } func (s statT) ctim() syscall.Timespec { return s.Ctimespec } -// Getxattr retrieves extended attribute data associated with path. +// Getxattr is a no-op on netbsd. func Getxattr(path, name string) ([]byte, error) { return nil, nil } -// Listxattr retrieves a list of names of extended attributes associated with the -// given path in the file system. +// Listxattr is a no-op on netbsd. func Listxattr(path string) ([]string, error) { return nil, nil } -// Setxattr associates name and data together as an attribute of path. +// Setxattr is a no-op on netbsd. func Setxattr(path, name string, data []byte) error { return nil } + +// restoreGenericAttributes is no-op on netbsd. +func (node *Node) restoreGenericAttributes(_ string) error { + return node.handleAllUnknownGenericAttributesFound() +} + +// fillGenericAttributes is a no-op on netbsd. +func (node *Node) fillGenericAttributes(_ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { + return true, nil +} diff --git a/internal/restic/node_openbsd.go b/internal/restic/node_openbsd.go index a4ccc72113b..bfff8f8aa22 100644 --- a/internal/restic/node_openbsd.go +++ b/internal/restic/node_openbsd.go @@ -1,8 +1,11 @@ package restic -import "syscall" +import ( + "os" + "syscall" +) -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { +func (node Node) restoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { return nil } @@ -10,18 +13,27 @@ func (s statT) atim() syscall.Timespec { return s.Atim } func (s statT) mtim() syscall.Timespec { return s.Mtim } func (s statT) ctim() syscall.Timespec { return s.Ctim } -// Getxattr retrieves extended attribute data associated with path. +// Getxattr is a no-op on openbsd. func Getxattr(path, name string) ([]byte, error) { return nil, nil } -// Listxattr retrieves a list of names of extended attributes associated with the -// given path in the file system. +// Listxattr is a no-op on openbsd. func Listxattr(path string) ([]string, error) { return nil, nil } -// Setxattr associates name and data together as an attribute of path. +// Setxattr is a no-op on openbsd. func Setxattr(path, name string, data []byte) error { return nil } + +// restoreGenericAttributes is no-op on openbsd. +func (node *Node) restoreGenericAttributes(_ string) error { + return node.handleAllUnknownGenericAttributesFound() +} + +// fillGenericAttributes is a no-op on openbsd. +func (node *Node) fillGenericAttributes(_ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { + return true, nil +} diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index aae010421b8..c2c7306b73e 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -1,4 +1,4 @@ -package restic_test +package restic import ( "context" @@ -11,7 +11,6 @@ import ( "testing" "time" - "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" ) @@ -32,7 +31,7 @@ func BenchmarkNodeFillUser(t *testing.B) { t.ResetTimer() for i := 0; i < t.N; i++ { - _, err := restic.NodeFromFileInfo(path, fi) + _, err := NodeFromFileInfo(path, fi) rtest.OK(t, err) } @@ -56,7 +55,7 @@ func BenchmarkNodeFromFileInfo(t *testing.B) { t.ResetTimer() for i := 0; i < t.N; i++ { - _, err := restic.NodeFromFileInfo(path, fi) + _, err := NodeFromFileInfo(path, fi) if err != nil { t.Fatal(err) } @@ -75,11 +74,11 @@ func parseTime(s string) time.Time { return t.Local() } -var nodeTests = []restic.Node{ +var nodeTests = []Node{ { Name: "testFile", Type: "file", - Content: restic.IDs{}, + Content: IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0604, @@ -90,7 +89,7 @@ var nodeTests = []restic.Node{ { Name: "testSuidFile", Type: "file", - Content: restic.IDs{}, + Content: IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0755 | os.ModeSetuid, @@ -101,7 +100,7 @@ var nodeTests = []restic.Node{ { Name: "testSuidFile2", Type: "file", - Content: restic.IDs{}, + Content: IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0755 | os.ModeSetgid, @@ -112,7 +111,7 @@ var nodeTests = []restic.Node{ { Name: "testSticky", Type: "file", - Content: restic.IDs{}, + Content: IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0755 | os.ModeSticky, @@ -148,7 +147,7 @@ var nodeTests = []restic.Node{ { Name: "testFile", Type: "file", - Content: restic.IDs{}, + Content: IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0604, @@ -170,14 +169,14 @@ var nodeTests = []restic.Node{ { Name: "testXattrFile", Type: "file", - Content: restic.IDs{}, + Content: IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0604, ModTime: parseTime("2005-05-14 21:07:03.111"), AccessTime: parseTime("2005-05-14 21:07:04.222"), ChangeTime: parseTime("2005-05-14 21:07:05.333"), - ExtendedAttributes: []restic.ExtendedAttribute{ + ExtendedAttributes: []ExtendedAttribute{ {"user.foo", []byte("bar")}, }, }, @@ -191,7 +190,7 @@ var nodeTests = []restic.Node{ ModTime: parseTime("2005-05-14 21:07:03.111"), AccessTime: parseTime("2005-05-14 21:07:04.222"), ChangeTime: parseTime("2005-05-14 21:07:05.333"), - ExtendedAttributes: []restic.ExtendedAttribute{ + ExtendedAttributes: []ExtendedAttribute{ {"user.foo", []byte("bar")}, }, }, @@ -228,7 +227,7 @@ func TestNodeRestoreAt(t *testing.T) { fi, err := os.Lstat(nodePath) rtest.OK(t, err) - n2, err := restic.NodeFromFileInfo(nodePath, fi) + n2, err := NodeFromFileInfo(nodePath, fi) rtest.OK(t, err) rtest.Assert(t, test.Name == n2.Name, @@ -330,7 +329,7 @@ func TestFixTime(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - res := restic.FixTime(test.src) + res := FixTime(test.src) if !res.Equal(test.want) { t.Fatalf("wrong result for %v, want:\n %v\ngot:\n %v", test.src, test.want, res) } @@ -343,12 +342,12 @@ func TestSymlinkSerialization(t *testing.T) { "válîd \t Üñi¢òde \n śẗŕinǵ", string([]byte{0, 1, 2, 0xfa, 0xfb, 0xfc}), } { - n := restic.Node{ + n := Node{ LinkTarget: link, } ser, err := json.Marshal(n) test.OK(t, err) - var n2 restic.Node + var n2 Node err = json.Unmarshal(ser, &n2) test.OK(t, err) fmt.Println(string(ser)) @@ -365,7 +364,7 @@ func TestSymlinkSerializationFormat(t *testing.T) { {`{"linktarget":"test"}`, "test"}, {`{"linktarget":"\u0000\u0001\u0002\ufffd\ufffd\ufffd","linktarget_raw":"AAEC+vv8"}`, string([]byte{0, 1, 2, 0xfa, 0xfb, 0xfc})}, } { - var n2 restic.Node + var n2 Node err := json.Unmarshal([]byte(d.ser), &n2) test.OK(t, err) test.Equals(t, d.linkTarget, n2.LinkTarget) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index fc6439b4045..a2b8c75e555 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -1,21 +1,47 @@ package restic import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" "syscall" + "unsafe" + "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "golang.org/x/sys/windows" +) + +// WindowsAttributes are the genericAttributes for Windows OS +type WindowsAttributes struct { + // CreationTime is used for storing creation time for windows files. + CreationTime *syscall.Filetime `generic:"creation_time"` + // FileAttributes is used for storing file attributes for windows files. + FileAttributes *uint32 `generic:"file_attributes"` +} + +var ( + modAdvapi32 = syscall.NewLazyDLL("advapi32.dll") + procEncryptFile = modAdvapi32.NewProc("EncryptFileW") + procDecryptFile = modAdvapi32.NewProc("DecryptFileW") ) // mknod is not supported on Windows. -func mknod(path string, mode uint32, dev uint64) (err error) { +func mknod(_ string, mode uint32, dev uint64) (err error) { return errors.New("device nodes cannot be created on windows") } // Windows doesn't need lchown -func lchown(path string, uid int, gid int) (err error) { +func lchown(_ string, uid int, gid int) (err error) { return nil } +// restoreSymlinkTimestamps restores timestamps for symlinks func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { // tweaked version of UtimesNano from go/src/syscall/syscall_windows.go pathp, e := syscall.UTF16PtrFromString(path) @@ -28,7 +54,14 @@ func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespe if e != nil { return e } - defer syscall.Close(h) + + defer func() { + err := syscall.Close(h) + if err != nil { + debug.Log("Error closing file handle for %s: %v\n", path, err) + } + }() + a := syscall.NsecToFiletime(syscall.TimespecToNsec(utimes[0])) w := syscall.NsecToFiletime(syscall.TimespecToNsec(utimes[1])) return syscall.SetFileTime(h, nil, &a, &w) @@ -83,3 +116,188 @@ func (s statT) ctim() syscall.Timespec { // Windows does not have the concept of a "change time" in the sense Unix uses it, so we're using the LastWriteTime here. return syscall.NsecToTimespec(s.LastWriteTime.Nanoseconds()) } + +// restoreGenericAttributes restores generic attributes for Windows +func (node Node) restoreGenericAttributes(path string) (err error) { + if len(node.GenericAttributes) == 0 { + return nil + } + var errs []error + windowsAttributes, unknownAttribs, err := genericAttributesToWindowsAttrs(node.GenericAttributes) + if err != nil { + return fmt.Errorf("error parsing generic attribute for: %s : %v", path, err) + } + if windowsAttributes.CreationTime != nil { + if err := restoreCreationTime(path, windowsAttributes.CreationTime); err != nil { + errs = append(errs, fmt.Errorf("error restoring creation time for: %s : %v", path, err)) + } + } + if windowsAttributes.FileAttributes != nil { + if err := restoreFileAttributes(path, windowsAttributes.FileAttributes); err != nil { + errs = append(errs, fmt.Errorf("error restoring file attributes for: %s : %v", path, err)) + } + } + + HandleUnknownGenericAttributesFound(unknownAttribs) + return errors.CombineErrors(errs...) +} + +// genericAttributesToWindowsAttrs converts the generic attributes map to a WindowsAttributes and also returns a string of unkown attributes that it could not convert. +func genericAttributesToWindowsAttrs(attrs map[GenericAttributeType]json.RawMessage) (windowsAttributes WindowsAttributes, unknownAttribs []GenericAttributeType, err error) { + waValue := reflect.ValueOf(&windowsAttributes).Elem() + unknownAttribs, err = genericAttributesToOSAttrs(attrs, reflect.TypeOf(windowsAttributes), &waValue, "windows") + return windowsAttributes, unknownAttribs, err +} + +// restoreCreationTime gets the creation time from the data and sets it to the file/folder at +// the specified path. +func restoreCreationTime(path string, creationTime *syscall.Filetime) (err error) { + pathPointer, err := syscall.UTF16PtrFromString(path) + if err != nil { + return err + } + handle, err := syscall.CreateFile(pathPointer, + syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, + syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return err + } + defer func() { + if err := syscall.Close(handle); err != nil { + debug.Log("Error closing file handle for %s: %v\n", path, err) + } + }() + return syscall.SetFileTime(handle, creationTime, nil, nil) +} + +// restoreFileAttributes gets the File Attributes from the data and sets them to the file/folder +// at the specified path. +func restoreFileAttributes(path string, fileAttributes *uint32) (err error) { + pathPointer, err := syscall.UTF16PtrFromString(path) + if err != nil { + return err + } + err = fixEncryptionAttribute(path, fileAttributes, pathPointer) + if err != nil { + debug.Log("Could not change encryption attribute for path: %s: %v", path, err) + } + return syscall.SetFileAttributes(pathPointer, *fileAttributes) +} + +// fixEncryptionAttribute checks if a file needs to be marked encrypted and is not already encrypted, it sets +// the FILE_ATTRIBUTE_ENCRYPTED. Conversely, if the file needs to be marked unencrypted and it is already +// marked encrypted, it removes the FILE_ATTRIBUTE_ENCRYPTED. +func fixEncryptionAttribute(path string, attrs *uint32, pathPointer *uint16) (err error) { + if *attrs&windows.FILE_ATTRIBUTE_ENCRYPTED != 0 { + // File should be encrypted. + err = encryptFile(pathPointer) + if err != nil { + if fs.IsAccessDenied(err) { + // If existing file already has readonly or system flag, encrypt file call fails. + // We have already cleared readonly flag, clearing system flag if needed. + // The readonly and system flags will be set again at the end of this func if they are needed. + err = fs.ClearSystem(path) + if err != nil { + return fmt.Errorf("failed to encrypt file: failed to clear system flag: %s : %v", path, err) + } + err = encryptFile(pathPointer) + if err != nil { + return fmt.Errorf("failed to encrypt file: %s : %v", path, err) + } + } else { + return fmt.Errorf("failed to encrypt file: %s : %v", path, err) + } + } + } else { + existingAttrs, err := windows.GetFileAttributes(pathPointer) + if err != nil { + return fmt.Errorf("failed to get file attributes for existing file: %s : %v", path, err) + } + if existingAttrs&windows.FILE_ATTRIBUTE_ENCRYPTED != 0 { + // File should not be encrypted, but its already encrypted. Decrypt it. + err = decryptFile(pathPointer) + if err != nil { + if fs.IsAccessDenied(err) { + // If existing file already has readonly or system flag, decrypt file call fails. + // We have already cleared readonly flag, clearing system flag if needed. + // The readonly and system flags will be set again after this func if they are needed. + err = fs.ClearSystem(path) + if err != nil { + return fmt.Errorf("failed to decrypt file: failed to clear system flag: %s : %v", path, err) + } + err = decryptFile(pathPointer) + if err != nil { + return fmt.Errorf("failed to decrypt file: %s : %v", path, err) + } + } else { + return fmt.Errorf("failed to decrypt file: %s : %v", path, err) + } + } + } + } + return err +} + +// encryptFile set the encrypted flag on the file. +func encryptFile(pathPointer *uint16) error { + // Call EncryptFile function + ret, _, err := procEncryptFile.Call(uintptr(unsafe.Pointer(pathPointer))) + if ret == 0 { + return err + } + return nil +} + +// decryptFile removes the encrypted flag from the file. +func decryptFile(pathPointer *uint16) error { + // Call DecryptFile function + ret, _, err := procDecryptFile.Call(uintptr(unsafe.Pointer(pathPointer))) + if ret == 0 { + return err + } + return nil +} + +// fillGenericAttributes fills in the generic attributes for windows like File Attributes, +// Created time etc. +func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT) (allowExtended bool, err error) { + if strings.Contains(filepath.Base(path), ":") { + //Do not process for Alternate Data Streams in Windows + // Also do not allow processing of extended attributes for ADS. + return false, nil + } + if !strings.HasSuffix(filepath.Clean(path), `\`) { + // Do not process file attributes and created time for windows directories like + // C:, D: + // Filepath.Clean(path) ends with '\' for Windows root drives only. + + // Add Windows attributes + node.GenericAttributes, err = WindowsAttrsToGenericAttributes(WindowsAttributes{ + CreationTime: getCreationTime(fi, path), + FileAttributes: &stat.FileAttributes, + }) + } + return true, err +} + +// windowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection +func WindowsAttrsToGenericAttributes(windowsAttributes WindowsAttributes) (attrs map[GenericAttributeType]json.RawMessage, err error) { + // Get the value of the WindowsAttributes + windowsAttributesValue := reflect.ValueOf(windowsAttributes) + return osAttrsToGenericAttributes(reflect.TypeOf(windowsAttributes), &windowsAttributesValue, runtime.GOOS) +} + +// getCreationTime gets the value for the WindowsAttribute CreationTime in a windows specific time format. +// The value is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601 (UTC) +// split into two 32-bit parts: the low-order DWORD and the high-order DWORD for efficiency and interoperability. +// The low-order DWORD represents the number of 100-nanosecond intervals elapsed since January 1, 1601, modulo +// 2^32. The high-order DWORD represents the number of times the low-order DWORD has overflowed. +func getCreationTime(fi os.FileInfo, path string) (creationTimeAttribute *syscall.Filetime) { + attrib, success := fi.Sys().(*syscall.Win32FileAttributeData) + if success && attrib != nil { + return &attrib.CreationTime + } else { + debug.Log("Could not get create time for path: %s", path) + return nil + } +} diff --git a/internal/restic/node_xattr.go b/internal/restic/node_xattr.go index ea9eafe94b2..826b8b74a21 100644 --- a/internal/restic/node_xattr.go +++ b/internal/restic/node_xattr.go @@ -4,6 +4,7 @@ package restic import ( + "os" "syscall" "github.com/restic/restic/internal/errors" @@ -47,3 +48,13 @@ func handleXattrErr(err error) error { return errors.WithStack(e) } } + +// restoreGenericAttributes is no-op. +func (node *Node) restoreGenericAttributes(_ string) error { + return node.handleAllUnknownGenericAttributesFound() +} + +// fillGenericAttributes is a no-op. +func (node *Node) fillGenericAttributes(_ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { + return true, nil +} diff --git a/internal/restorer/fileswriter.go b/internal/restorer/fileswriter.go index 589aa502aa8..cbe89c30cc4 100644 --- a/internal/restorer/fileswriter.go +++ b/internal/restorer/fileswriter.go @@ -50,16 +50,26 @@ func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, create bucket.files[path].users++ return wr, nil } - - var flags int + var f *os.File + var err error if createSize >= 0 { - flags = os.O_CREATE | os.O_TRUNC | os.O_WRONLY - } else { - flags = os.O_WRONLY - } - - f, err := os.OpenFile(path, flags, 0600) - if err != nil { + if f, err = os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600); err != nil { + if fs.IsAccessDenied(err) { + // If file is readonly, clear the readonly flag by resetting the + // permissions of the file and try again + // as the metadata will be set again in the second pass and the + // readonly flag will be applied again if needed. + if err = fs.ResetPermissions(path); err != nil { + return nil, err + } + if f, err = os.OpenFile(path, os.O_TRUNC|os.O_WRONLY, 0600); err != nil { + return nil, err + } + } else { + return nil, err + } + } + } else if f, err = os.OpenFile(path, os.O_WRONLY, 0600); err != nil { return nil, err } From eeb1aa5388693c8e6b693cb3941045588080ee69 Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Thu, 22 Feb 2024 17:52:26 -0700 Subject: [PATCH 028/893] Add ability to report warnings to terminal Report warnings to terminal when unrecognized generic attributes are found in the repository. --- cmd/restic/cmd_restore.go | 3 +++ internal/restic/node.go | 20 ++++++++++---------- internal/restic/node_aix.go | 4 ++-- internal/restic/node_netbsd.go | 4 ++-- internal/restic/node_openbsd.go | 4 ++-- internal/restic/node_test.go | 2 +- internal/restic/node_windows.go | 4 ++-- internal/restic/node_xattr.go | 4 ++-- internal/restorer/restorer.go | 3 ++- 9 files changed, 26 insertions(+), 22 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 37d3046721f..58f257541fe 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -178,6 +178,9 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, totalErrors++ return nil } + res.Warn = func(message string) { + msg.E("Warning: %s\n", message) + } excludePatterns := filter.ParsePatterns(opts.Exclude) insensitiveExcludePatterns := filter.ParsePatterns(opts.InsensitiveExclude) diff --git a/internal/restic/node.go b/internal/restic/node.go index 44ca52b0c05..cbe9ef36394 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -224,8 +224,8 @@ func (node *Node) CreateAt(ctx context.Context, path string, repo BlobLoader) er } // RestoreMetadata restores node metadata -func (node Node) RestoreMetadata(path string) error { - err := node.restoreMetadata(path) +func (node Node) RestoreMetadata(path string, warn func(msg string)) error { + err := node.restoreMetadata(path, warn) if err != nil { debug.Log("restoreMetadata(%s) error %v", path, err) } @@ -233,7 +233,7 @@ func (node Node) RestoreMetadata(path string) error { return err } -func (node Node) restoreMetadata(path string) error { +func (node Node) restoreMetadata(path string, warn func(msg string)) error { var firsterr error if err := lchown(path, int(node.UID), int(node.GID)); err != nil { @@ -261,7 +261,7 @@ func (node Node) restoreMetadata(path string) error { } } - if err := node.restoreGenericAttributes(path); err != nil { + if err := node.restoreGenericAttributes(path, warn); err != nil { debug.Log("error restoring generic attributes for %v: %v", path, err) if firsterr != nil { firsterr = err @@ -766,14 +766,14 @@ func (node *Node) fillTimes(stat *statT) { } // HandleUnknownGenericAttributesFound is used for handling and distinguing between scenarios related to future versions and cross-OS repositories -func HandleUnknownGenericAttributesFound(unknownAttribs []GenericAttributeType) { +func HandleUnknownGenericAttributesFound(unknownAttribs []GenericAttributeType, warn func(msg string)) { for _, unknownAttrib := range unknownAttribs { - handleUnknownGenericAttributeFound(unknownAttrib) + handleUnknownGenericAttributeFound(unknownAttrib, warn) } } // handleUnknownGenericAttributeFound is used for handling and distinguing between scenarios related to future versions and cross-OS repositories -func handleUnknownGenericAttributeFound(genericAttributeType GenericAttributeType) { +func handleUnknownGenericAttributeFound(genericAttributeType GenericAttributeType, warn func(msg string)) { if checkGenericAttributeNameNotHandledAndPut(genericAttributeType) { // Print the unique error only once for a given execution os, exists := genericAttributesForOS[genericAttributeType] @@ -784,7 +784,7 @@ func handleUnknownGenericAttributeFound(genericAttributeType GenericAttributeTyp debug.Log("Ignoring a generic attribute found in the repository: %s which may not be compatible with your OS. Compatible OS: %s", genericAttributeType, os) } else { // If genericAttributesForOS in node.go does not know about this attribute, then the repository may have been created by a newer version which has a newer GenericAttributeType. - debug.Log("Found an unrecognized generic attribute in the repository: %s. You may need to upgrade to latest version of restic.", genericAttributeType) + warn(fmt.Sprintf("Found an unrecognized generic attribute in the repository: %s. You may need to upgrade to latest version of restic.", genericAttributeType)) } } } @@ -792,9 +792,9 @@ func handleUnknownGenericAttributeFound(genericAttributeType GenericAttributeTyp // handleAllUnknownGenericAttributesFound performs validations for all generic attributes in the node. // This is not used on windows currently because windows has handling for generic attributes. // nolint:unused -func (node Node) handleAllUnknownGenericAttributesFound() error { +func (node Node) handleAllUnknownGenericAttributesFound(warn func(msg string)) error { for name := range node.GenericAttributes { - handleUnknownGenericAttributeFound(name) + handleUnknownGenericAttributeFound(name, warn) } return nil } diff --git a/internal/restic/node_aix.go b/internal/restic/node_aix.go index 4d8c248de68..def46bd6055 100644 --- a/internal/restic/node_aix.go +++ b/internal/restic/node_aix.go @@ -39,8 +39,8 @@ func Setxattr(path, name string, data []byte) error { } // restoreGenericAttributes is no-op on AIX. -func (node *Node) restoreGenericAttributes(_ string) error { - return node.handleAllUnknownGenericAttributesFound() +func (node *Node) restoreGenericAttributes(_ string, warn func(msg string)) error { + return node.handleAllUnknownGenericAttributesFound(warn) } // fillGenericAttributes is a no-op on AIX. diff --git a/internal/restic/node_netbsd.go b/internal/restic/node_netbsd.go index be4afa3ae9b..1a47299be93 100644 --- a/internal/restic/node_netbsd.go +++ b/internal/restic/node_netbsd.go @@ -29,8 +29,8 @@ func Setxattr(path, name string, data []byte) error { } // restoreGenericAttributes is no-op on netbsd. -func (node *Node) restoreGenericAttributes(_ string) error { - return node.handleAllUnknownGenericAttributesFound() +func (node *Node) restoreGenericAttributes(_ string, warn func(msg string)) error { + return node.handleAllUnknownGenericAttributesFound(warn) } // fillGenericAttributes is a no-op on netbsd. diff --git a/internal/restic/node_openbsd.go b/internal/restic/node_openbsd.go index bfff8f8aa22..e60eb9dc831 100644 --- a/internal/restic/node_openbsd.go +++ b/internal/restic/node_openbsd.go @@ -29,8 +29,8 @@ func Setxattr(path, name string, data []byte) error { } // restoreGenericAttributes is no-op on openbsd. -func (node *Node) restoreGenericAttributes(_ string) error { - return node.handleAllUnknownGenericAttributesFound() +func (node *Node) restoreGenericAttributes(_ string, warn func(msg string)) error { + return node.handleAllUnknownGenericAttributesFound(warn) } // fillGenericAttributes is a no-op on openbsd. diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index c2c7306b73e..d9fa02ac8eb 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -218,7 +218,7 @@ func TestNodeRestoreAt(t *testing.T) { nodePath = filepath.Join(tempdir, test.Name) } rtest.OK(t, test.CreateAt(context.TODO(), nodePath, nil)) - rtest.OK(t, test.RestoreMetadata(nodePath)) + rtest.OK(t, test.RestoreMetadata(nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) })) if test.Type == "dir" { rtest.OK(t, test.RestoreTimestamps(nodePath)) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index a2b8c75e555..5875c3ccd89 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -118,7 +118,7 @@ func (s statT) ctim() syscall.Timespec { } // restoreGenericAttributes restores generic attributes for Windows -func (node Node) restoreGenericAttributes(path string) (err error) { +func (node Node) restoreGenericAttributes(path string, warn func(msg string)) (err error) { if len(node.GenericAttributes) == 0 { return nil } @@ -138,7 +138,7 @@ func (node Node) restoreGenericAttributes(path string) (err error) { } } - HandleUnknownGenericAttributesFound(unknownAttribs) + HandleUnknownGenericAttributesFound(unknownAttribs, warn) return errors.CombineErrors(errs...) } diff --git a/internal/restic/node_xattr.go b/internal/restic/node_xattr.go index 826b8b74a21..0b2d5d5523b 100644 --- a/internal/restic/node_xattr.go +++ b/internal/restic/node_xattr.go @@ -50,8 +50,8 @@ func handleXattrErr(err error) error { } // restoreGenericAttributes is no-op. -func (node *Node) restoreGenericAttributes(_ string) error { - return node.handleAllUnknownGenericAttributesFound() +func (node *Node) restoreGenericAttributes(_ string, warn func(msg string)) error { + return node.handleAllUnknownGenericAttributesFound(warn) } // fillGenericAttributes is a no-op. diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 3f4fb32e340..0aeb636d0b9 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -24,6 +24,7 @@ type Restorer struct { progress *restoreui.Progress Error func(location string, err error) error + Warn func(message string) SelectFilter func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) } @@ -178,7 +179,7 @@ func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, targe func (res *Restorer) restoreNodeMetadataTo(node *restic.Node, target, location string) error { debug.Log("restoreNodeMetadata %v %v %v", node.Name, target, location) - err := node.RestoreMetadata(target) + err := node.RestoreMetadata(target, res.Warn) if err != nil { debug.Log("node.RestoreMetadata(%s) error %v", target, err) } From d4be734c73565f822a4965c835bf302330631464 Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Thu, 22 Feb 2024 17:54:43 -0700 Subject: [PATCH 029/893] Handle readonly empty files in windows --- internal/restorer/restorer.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 0aeb636d0b9..9f41f5cf2c8 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -205,11 +205,19 @@ func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location func (res *Restorer) restoreEmptyFileAt(node *restic.Node, target, location string) error { wr, err := os.OpenFile(target, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) - if err != nil { - return err + if fs.IsAccessDenied(err) { + // If file is readonly, clear the readonly flag by resetting the + // permissions of the file and try again + // as the metadata will be set again in the second pass and the + // readonly flag will be applied again if needed. + if err = fs.ResetPermissions(target); err != nil { + return err + } + if wr, err = os.OpenFile(target, os.O_TRUNC|os.O_WRONLY, 0600); err != nil { + return err + } } - err = wr.Close() - if err != nil { + if err = wr.Close(); err != nil { return err } From 4bbd25a37fb4554b01af7b567ac7b4f16147a588 Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Thu, 22 Feb 2024 17:55:50 -0700 Subject: [PATCH 030/893] Add tests for generic attribute changes --- internal/restic/node_windows_test.go | 210 +++++++++ internal/restorer/restorer_test.go | 86 ++-- internal/restorer/restorer_unix_test.go | 4 +- internal/restorer/restorer_windows_test.go | 506 +++++++++++++++++++++ 4 files changed, 770 insertions(+), 36 deletions(-) create mode 100644 internal/restic/node_windows_test.go diff --git a/internal/restic/node_windows_test.go b/internal/restic/node_windows_test.go new file mode 100644 index 00000000000..501d5a98a26 --- /dev/null +++ b/internal/restic/node_windows_test.go @@ -0,0 +1,210 @@ +//go:build windows +// +build windows + +package restic + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "syscall" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/test" + "golang.org/x/sys/windows" +) + +func TestRestoreCreationTime(t *testing.T) { + t.Parallel() + path := t.TempDir() + fi, err := os.Lstat(path) + test.OK(t, errors.Wrapf(err, "Could not Lstat for path: %s", path)) + creationTimeAttribute := getCreationTime(fi, path) + test.OK(t, errors.Wrapf(err, "Could not get creation time for path: %s", path)) + //Using the temp dir creation time as the test creation time for the test file and folder + runGenericAttributesTest(t, path, TypeCreationTime, WindowsAttributes{CreationTime: creationTimeAttribute}, false) +} + +func TestRestoreFileAttributes(t *testing.T) { + t.Parallel() + genericAttributeName := TypeFileAttributes + tempDir := t.TempDir() + normal := uint32(syscall.FILE_ATTRIBUTE_NORMAL) + hidden := uint32(syscall.FILE_ATTRIBUTE_HIDDEN) + system := uint32(syscall.FILE_ATTRIBUTE_SYSTEM) + archive := uint32(syscall.FILE_ATTRIBUTE_ARCHIVE) + encrypted := uint32(windows.FILE_ATTRIBUTE_ENCRYPTED) + fileAttributes := []WindowsAttributes{ + //normal + {FileAttributes: &normal}, + //hidden + {FileAttributes: &hidden}, + //system + {FileAttributes: &system}, + //archive + {FileAttributes: &archive}, + //encrypted + {FileAttributes: &encrypted}, + } + for i, fileAttr := range fileAttributes { + genericAttrs, err := WindowsAttrsToGenericAttributes(fileAttr) + test.OK(t, err) + expectedNodes := []Node{ + { + Name: fmt.Sprintf("testfile%d", i), + Type: "file", + Mode: 0655, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + GenericAttributes: genericAttrs, + }, + } + runGenericAttributesTestForNodes(t, expectedNodes, tempDir, genericAttributeName, fileAttr, false) + } + normal = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY) + hidden = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY | syscall.FILE_ATTRIBUTE_HIDDEN) + system = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY | windows.FILE_ATTRIBUTE_SYSTEM) + archive = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY | windows.FILE_ATTRIBUTE_ARCHIVE) + encrypted = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY | windows.FILE_ATTRIBUTE_ENCRYPTED) + folderAttributes := []WindowsAttributes{ + //normal + {FileAttributes: &normal}, + //hidden + {FileAttributes: &hidden}, + //system + {FileAttributes: &system}, + //archive + {FileAttributes: &archive}, + //encrypted + {FileAttributes: &encrypted}, + } + for i, folderAttr := range folderAttributes { + genericAttrs, err := WindowsAttrsToGenericAttributes(folderAttr) + test.OK(t, err) + expectedNodes := []Node{ + { + Name: fmt.Sprintf("testdirectory%d", i), + Type: "dir", + Mode: 0755, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + GenericAttributes: genericAttrs, + }, + } + runGenericAttributesTestForNodes(t, expectedNodes, tempDir, genericAttributeName, folderAttr, false) + } +} + +func runGenericAttributesTest(t *testing.T, tempDir string, genericAttributeName GenericAttributeType, genericAttributeExpected WindowsAttributes, warningExpected bool) { + genericAttributes, err := WindowsAttrsToGenericAttributes(genericAttributeExpected) + test.OK(t, err) + expectedNodes := []Node{ + { + Name: "testfile", + Type: "file", + Mode: 0644, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + GenericAttributes: genericAttributes, + }, + { + Name: "testdirectory", + Type: "dir", + Mode: 0755, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + GenericAttributes: genericAttributes, + }, + } + runGenericAttributesTestForNodes(t, expectedNodes, tempDir, genericAttributeName, genericAttributeExpected, warningExpected) +} +func runGenericAttributesTestForNodes(t *testing.T, expectedNodes []Node, tempDir string, genericAttr GenericAttributeType, genericAttributeExpected WindowsAttributes, warningExpected bool) { + + for _, testNode := range expectedNodes { + testPath, node := restoreAndGetNode(t, tempDir, testNode, warningExpected) + rawMessage := node.GenericAttributes[genericAttr] + genericAttrsExpected, err := WindowsAttrsToGenericAttributes(genericAttributeExpected) + test.OK(t, err) + rawMessageExpected := genericAttrsExpected[genericAttr] + test.Equals(t, rawMessageExpected, rawMessage, "Generic attribute: %s got from NodeFromFileInfo not equal for path: %s", string(genericAttr), testPath) + } +} + +func restoreAndGetNode(t *testing.T, tempDir string, testNode Node, warningExpected bool) (string, *Node) { + testPath := filepath.Join(tempDir, "001", testNode.Name) + err := os.MkdirAll(filepath.Dir(testPath), testNode.Mode) + test.OK(t, errors.Wrapf(err, "Failed to create parent directories for: %s", testPath)) + + if testNode.Type == "file" { + + testFile, err := os.Create(testPath) + test.OK(t, errors.Wrapf(err, "Failed to create test file: %s", testPath)) + testFile.Close() + } else if testNode.Type == "dir" { + + err := os.Mkdir(testPath, testNode.Mode) + test.OK(t, errors.Wrapf(err, "Failed to create test directory: %s", testPath)) + } + + err = testNode.RestoreMetadata(testPath, func(msg string) { + if warningExpected { + test.Assert(t, warningExpected, "Warning triggered as expected: %s", msg) + } else { + // If warning is not expected, this code should not get triggered. + test.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", testPath, msg)) + } + }) + test.OK(t, errors.Wrapf(err, "Failed to restore metadata for: %s", testPath)) + + fi, err := os.Lstat(testPath) + test.OK(t, errors.Wrapf(err, "Could not Lstat for path: %s", testPath)) + + nodeFromFileInfo, err := NodeFromFileInfo(testPath, fi) + test.OK(t, errors.Wrapf(err, "Could not get NodeFromFileInfo for path: %s", testPath)) + + return testPath, nodeFromFileInfo +} + +const TypeSomeNewAttribute GenericAttributeType = "MockAttributes.SomeNewAttribute" + +func TestNewGenericAttributeType(t *testing.T) { + t.Parallel() + + newGenericAttribute := map[GenericAttributeType]json.RawMessage{} + newGenericAttribute[TypeSomeNewAttribute] = []byte("any value") + + tempDir := t.TempDir() + expectedNodes := []Node{ + { + Name: "testfile", + Type: "file", + Mode: 0644, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + GenericAttributes: newGenericAttribute, + }, + { + Name: "testdirectory", + Type: "dir", + Mode: 0755, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + GenericAttributes: newGenericAttribute, + }, + } + for _, testNode := range expectedNodes { + testPath, node := restoreAndGetNode(t, tempDir, testNode, true) + _, ua, err := genericAttributesToWindowsAttrs(node.GenericAttributes) + test.OK(t, err) + // Since this GenericAttribute is unknown to this version of the software, it will not get set on the file. + test.Assert(t, len(ua) == 0, "Unkown attributes: %s found for path: %s", ua, testPath) + } +} diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index c33214bc3cd..5742d7663d0 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -3,6 +3,7 @@ package restorer import ( "bytes" "context" + "encoding/json" "io" "math" "os" @@ -27,17 +28,27 @@ type Snapshot struct { } type File struct { - Data string - Links uint64 - Inode uint64 - Mode os.FileMode - ModTime time.Time + Data string + Links uint64 + Inode uint64 + Mode os.FileMode + ModTime time.Time + attributes *FileAttributes } type Dir struct { - Nodes map[string]Node - Mode os.FileMode - ModTime time.Time + Nodes map[string]Node + Mode os.FileMode + ModTime time.Time + attributes *FileAttributes +} + +type FileAttributes struct { + ReadOnly bool + Hidden bool + System bool + Archive bool + Encrypted bool } func saveFile(t testing.TB, repo restic.BlobSaver, node File) restic.ID { @@ -52,7 +63,7 @@ func saveFile(t testing.TB, repo restic.BlobSaver, node File) restic.ID { return id } -func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode uint64) restic.ID { +func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode uint64, getGenericAttributes func(attr *FileAttributes, isDir bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage)) restic.ID { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -78,20 +89,21 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u mode = 0644 } err := tree.Insert(&restic.Node{ - Type: "file", - Mode: mode, - ModTime: node.ModTime, - Name: name, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Content: fc, - Size: uint64(len(n.(File).Data)), - Inode: fi, - Links: lc, + Type: "file", + Mode: mode, + ModTime: node.ModTime, + Name: name, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Content: fc, + Size: uint64(len(n.(File).Data)), + Inode: fi, + Links: lc, + GenericAttributes: getGenericAttributes(node.attributes, false), }) rtest.OK(t, err) case Dir: - id := saveDir(t, repo, node.Nodes, inode) + id := saveDir(t, repo, node.Nodes, inode, getGenericAttributes) mode := node.Mode if mode == 0 { @@ -99,13 +111,14 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u } err := tree.Insert(&restic.Node{ - Type: "dir", - Mode: mode, - ModTime: node.ModTime, - Name: name, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Subtree: &id, + Type: "dir", + Mode: mode, + ModTime: node.ModTime, + Name: name, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Subtree: &id, + GenericAttributes: getGenericAttributes(node.attributes, false), }) rtest.OK(t, err) default: @@ -121,13 +134,13 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u return id } -func saveSnapshot(t testing.TB, repo restic.Repository, snapshot Snapshot) (*restic.Snapshot, restic.ID) { +func saveSnapshot(t testing.TB, repo restic.Repository, snapshot Snapshot, getGenericAttributes func(attr *FileAttributes, isDir bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage)) (*restic.Snapshot, restic.ID) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() wg, wgCtx := errgroup.WithContext(ctx) repo.StartPackUploader(wgCtx, wg) - treeID := saveDir(t, repo, snapshot.Nodes, 1000) + treeID := saveDir(t, repo, snapshot.Nodes, 1000, getGenericAttributes) err := repo.Flush(ctx) if err != nil { t.Fatal(err) @@ -147,6 +160,11 @@ func saveSnapshot(t testing.TB, repo restic.Repository, snapshot Snapshot) (*res return sn, id } +var noopGetGenericAttributes = func(attr *FileAttributes, isDir bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage) { + // No-op + return nil +} + func TestRestorer(t *testing.T) { var tests = []struct { Snapshot @@ -322,7 +340,7 @@ func TestRestorer(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { repo := repository.TestRepository(t) - sn, id := saveSnapshot(t, repo, test.Snapshot) + sn, id := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes) t.Logf("snapshot saved as %v", id.Str()) res := NewRestorer(repo, sn, false, nil) @@ -439,7 +457,7 @@ func TestRestorerRelative(t *testing.T) { t.Run("", func(t *testing.T) { repo := repository.TestRepository(t) - sn, id := saveSnapshot(t, repo, test.Snapshot) + sn, id := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes) t.Logf("snapshot saved as %v", id.Str()) res := NewRestorer(repo, sn, false, nil) @@ -669,7 +687,7 @@ func TestRestorerTraverseTree(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { repo := repository.TestRepository(t) - sn, _ := saveSnapshot(t, repo, test.Snapshot) + sn, _ := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes) res := NewRestorer(repo, sn, false, nil) @@ -745,7 +763,7 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) { }, }, }, - }) + }, noopGetGenericAttributes) res := NewRestorer(repo, sn, false, nil) @@ -800,7 +818,7 @@ func TestVerifyCancel(t *testing.T) { } repo := repository.TestRepository(t) - sn, _ := saveSnapshot(t, repo, snapshot) + sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) res := NewRestorer(repo, sn, false, nil) diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go index 2c30a6b6446..0cbfefa921f 100644 --- a/internal/restorer/restorer_unix_test.go +++ b/internal/restorer/restorer_unix_test.go @@ -29,7 +29,7 @@ func TestRestorerRestoreEmptyHardlinkedFileds(t *testing.T) { }, }, }, - }) + }, noopGetGenericAttributes) res := NewRestorer(repo, sn, false, nil) @@ -95,7 +95,7 @@ func TestRestorerProgressBar(t *testing.T) { }, "file2": File{Links: 1, Inode: 2, Data: "example"}, }, - }) + }, noopGetGenericAttributes) mock := &printerMock{} progress := restoreui.NewProgress(mock, 0) diff --git a/internal/restorer/restorer_windows_test.go b/internal/restorer/restorer_windows_test.go index 3ec4b1f11e0..684d51ace21 100644 --- a/internal/restorer/restorer_windows_test.go +++ b/internal/restorer/restorer_windows_test.go @@ -4,11 +4,20 @@ package restorer import ( + "context" + "encoding/json" "math" + "os" + "path" "syscall" "testing" + "time" "unsafe" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" "golang.org/x/sys/windows" ) @@ -33,3 +42,500 @@ func getBlockCount(t *testing.T, filename string) int64 { return int64(math.Ceil(float64(result) / 512)) } + +type DataStreamInfo struct { + name string + data string +} + +type NodeInfo struct { + DataStreamInfo + parentDir string + attributes FileAttributes + Exists bool + IsDirectory bool +} + +func TestFileAttributeCombination(t *testing.T) { + testFileAttributeCombination(t, false) +} + +func TestEmptyFileAttributeCombination(t *testing.T) { + testFileAttributeCombination(t, true) +} + +func testFileAttributeCombination(t *testing.T, isEmpty bool) { + t.Parallel() + //Generate combination of 5 attributes. + attributeCombinations := generateCombinations(5, []bool{}) + + fileName := "TestFile.txt" + // Iterate through each attribute combination + for _, attr1 := range attributeCombinations { + + //Set up the required file information + fileInfo := NodeInfo{ + DataStreamInfo: getDataStreamInfo(isEmpty, fileName), + parentDir: "dir", + attributes: getFileAttributes(attr1), + Exists: false, + } + + //Get the current test name + testName := getCombinationTestName(fileInfo, fileName, fileInfo.attributes) + + //Run test + t.Run(testName, func(t *testing.T) { + mainFilePath := runAttributeTests(t, fileInfo, fileInfo.attributes) + + verifyFileRestores(isEmpty, mainFilePath, t, fileInfo) + }) + } +} + +func generateCombinations(n int, prefix []bool) [][]bool { + if n == 0 { + // Return a slice containing the current permutation + return [][]bool{append([]bool{}, prefix...)} + } + + // Generate combinations with True + prefixTrue := append(prefix, true) + permsTrue := generateCombinations(n-1, prefixTrue) + + // Generate combinations with False + prefixFalse := append(prefix, false) + permsFalse := generateCombinations(n-1, prefixFalse) + + // Combine combinations with True and False + return append(permsTrue, permsFalse...) +} + +func getDataStreamInfo(isEmpty bool, fileName string) DataStreamInfo { + var dataStreamInfo DataStreamInfo + if isEmpty { + dataStreamInfo = DataStreamInfo{ + name: fileName, + } + } else { + dataStreamInfo = DataStreamInfo{ + name: fileName, + data: "Main file data stream.", + } + } + return dataStreamInfo +} + +func getFileAttributes(values []bool) FileAttributes { + return FileAttributes{ + ReadOnly: values[0], + Hidden: values[1], + System: values[2], + Archive: values[3], + Encrypted: values[4], + } +} + +func getCombinationTestName(fi NodeInfo, fileName string, overwriteAttr FileAttributes) string { + if fi.attributes.ReadOnly { + fileName += "-ReadOnly" + } + if fi.attributes.Hidden { + fileName += "-Hidden" + } + if fi.attributes.System { + fileName += "-System" + } + if fi.attributes.Archive { + fileName += "-Archive" + } + if fi.attributes.Encrypted { + fileName += "-Encrypted" + } + if fi.Exists { + fileName += "-Overwrite" + if overwriteAttr.ReadOnly { + fileName += "-R" + } + if overwriteAttr.Hidden { + fileName += "-H" + } + if overwriteAttr.System { + fileName += "-S" + } + if overwriteAttr.Archive { + fileName += "-A" + } + if overwriteAttr.Encrypted { + fileName += "-E" + } + } + return fileName +} + +func runAttributeTests(t *testing.T, fileInfo NodeInfo, existingFileAttr FileAttributes) string { + testDir := t.TempDir() + res, _ := setupWithFileAttributes(t, fileInfo, testDir, existingFileAttr) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := res.RestoreTo(ctx, testDir) + rtest.OK(t, err) + + mainFilePath := path.Join(testDir, fileInfo.parentDir, fileInfo.name) + //Verify restore + verifyFileAttributes(t, mainFilePath, fileInfo.attributes) + return mainFilePath +} + +func setupWithFileAttributes(t *testing.T, nodeInfo NodeInfo, testDir string, existingFileAttr FileAttributes) (*Restorer, []int) { + t.Helper() + if nodeInfo.Exists { + if !nodeInfo.IsDirectory { + err := os.MkdirAll(path.Join(testDir, nodeInfo.parentDir), os.ModeDir) + rtest.OK(t, err) + filepath := path.Join(testDir, nodeInfo.parentDir, nodeInfo.name) + if existingFileAttr.Encrypted { + err := createEncryptedFileWriteData(filepath, nodeInfo) + rtest.OK(t, err) + } else { + // Write the data to the file + file, err := os.OpenFile(path.Clean(filepath), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) + rtest.OK(t, err) + _, err = file.Write([]byte(nodeInfo.data)) + rtest.OK(t, err) + + err = file.Close() + rtest.OK(t, err) + } + } else { + err := os.MkdirAll(path.Join(testDir, nodeInfo.parentDir, nodeInfo.name), os.ModeDir) + rtest.OK(t, err) + } + + pathPointer, err := syscall.UTF16PtrFromString(path.Join(testDir, nodeInfo.parentDir, nodeInfo.name)) + rtest.OK(t, err) + syscall.SetFileAttributes(pathPointer, getAttributeValue(&existingFileAttr)) + } + + index := 0 + + order := []int{} + streams := []DataStreamInfo{} + if !nodeInfo.IsDirectory { + order = append(order, index) + index++ + streams = append(streams, nodeInfo.DataStreamInfo) + } + return setup(t, getNodes(nodeInfo.parentDir, nodeInfo.name, order, streams, nodeInfo.IsDirectory, &nodeInfo.attributes)), order +} + +func createEncryptedFileWriteData(filepath string, fileInfo NodeInfo) (err error) { + var ptr *uint16 + if ptr, err = windows.UTF16PtrFromString(filepath); err != nil { + return err + } + var handle windows.Handle + //Create the file with encrypted flag + if handle, err = windows.CreateFile(ptr, uint32(windows.GENERIC_READ|windows.GENERIC_WRITE), uint32(windows.FILE_SHARE_READ), nil, uint32(windows.CREATE_ALWAYS), windows.FILE_ATTRIBUTE_ENCRYPTED, 0); err != nil { + return err + } + //Write data to file + if _, err = windows.Write(handle, []byte(fileInfo.data)); err != nil { + return err + } + //Close handle + return windows.CloseHandle(handle) +} + +func setup(t *testing.T, nodesMap map[string]Node) *Restorer { + repo := repository.TestRepository(t) + getFileAttributes := func(attr *FileAttributes, isDir bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage) { + if attr == nil { + return + } + + fileattr := getAttributeValue(attr) + + if isDir { + //If the node is a directory add FILE_ATTRIBUTE_DIRECTORY to attributes + fileattr |= windows.FILE_ATTRIBUTE_DIRECTORY + } + attrs, err := restic.WindowsAttrsToGenericAttributes(restic.WindowsAttributes{FileAttributes: &fileattr}) + test.OK(t, err) + return attrs + } + sn, _ := saveSnapshot(t, repo, Snapshot{ + Nodes: nodesMap, + }, getFileAttributes) + res := NewRestorer(repo, sn, false, nil) + return res +} + +func getAttributeValue(attr *FileAttributes) uint32 { + var fileattr uint32 + if attr.ReadOnly { + fileattr |= windows.FILE_ATTRIBUTE_READONLY + } + if attr.Hidden { + fileattr |= windows.FILE_ATTRIBUTE_HIDDEN + } + if attr.Encrypted { + fileattr |= windows.FILE_ATTRIBUTE_ENCRYPTED + } + if attr.Archive { + fileattr |= windows.FILE_ATTRIBUTE_ARCHIVE + } + if attr.System { + fileattr |= windows.FILE_ATTRIBUTE_SYSTEM + } + return fileattr +} + +func getNodes(dir string, mainNodeName string, order []int, streams []DataStreamInfo, isDirectory bool, attributes *FileAttributes) map[string]Node { + var mode os.FileMode + if isDirectory { + mode = os.FileMode(2147484159) + } else { + if attributes != nil && attributes.ReadOnly { + mode = os.FileMode(0o444) + } else { + mode = os.FileMode(0o666) + } + } + + getFileNodes := func() map[string]Node { + nodes := map[string]Node{} + if isDirectory { + //Add a directory node at the same level as the other streams + nodes[mainNodeName] = Dir{ + ModTime: time.Now(), + attributes: attributes, + Mode: mode, + } + } + + if len(streams) > 0 { + for _, index := range order { + stream := streams[index] + + var attr *FileAttributes = nil + if mainNodeName == stream.name { + attr = attributes + } else if attributes != nil && attributes.Encrypted { + //Set encrypted attribute + attr = &FileAttributes{Encrypted: true} + } + + nodes[stream.name] = File{ + ModTime: time.Now(), + Data: stream.data, + Mode: mode, + attributes: attr, + } + } + } + return nodes + } + + return map[string]Node{ + dir: Dir{ + Mode: normalizeFileMode(0750 | mode), + ModTime: time.Now(), + Nodes: getFileNodes(), + }, + } +} + +func verifyFileAttributes(t *testing.T, mainFilePath string, attr FileAttributes) { + ptr, err := windows.UTF16PtrFromString(mainFilePath) + rtest.OK(t, err) + //Get file attributes using syscall + fileAttributes, err := syscall.GetFileAttributes(ptr) + rtest.OK(t, err) + //Test positive and negative scenarios + if attr.ReadOnly { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY != 0, "Expected read only attibute.") + } else { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY == 0, "Unexpected read only attibute.") + } + if attr.Hidden { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN != 0, "Expected hidden attibute.") + } else { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN == 0, "Unexpected hidden attibute.") + } + if attr.System { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM != 0, "Expected system attibute.") + } else { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM == 0, "Unexpected system attibute.") + } + if attr.Archive { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE != 0, "Expected archive attibute.") + } else { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE == 0, "Unexpected archive attibute.") + } + if attr.Encrypted { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED != 0, "Expected encrypted attibute.") + } else { + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED == 0, "Unexpected encrypted attibute.") + } +} + +func verifyFileRestores(isEmpty bool, mainFilePath string, t *testing.T, fileInfo NodeInfo) { + if isEmpty { + _, err1 := os.Stat(mainFilePath) + rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The file "+fileInfo.name+" does not exist") + } else { + + verifyMainFileRestore(t, mainFilePath, fileInfo) + } +} + +func verifyMainFileRestore(t *testing.T, mainFilePath string, fileInfo NodeInfo) { + fi, err1 := os.Stat(mainFilePath) + rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The file "+fileInfo.name+" does not exist") + + size := fi.Size() + rtest.Assert(t, size > 0, "The file "+fileInfo.name+" exists but is empty") + + content, err := os.ReadFile(mainFilePath) + rtest.OK(t, err) + rtest.Assert(t, string(content) == fileInfo.data, "The file "+fileInfo.name+" exists but the content is not overwritten") +} + +func TestDirAttributeCombination(t *testing.T) { + t.Parallel() + attributeCombinations := generateCombinations(4, []bool{}) + + dirName := "TestDir" + // Iterate through each attribute combination + for _, attr1 := range attributeCombinations { + + //Set up the required directory information + dirInfo := NodeInfo{ + DataStreamInfo: DataStreamInfo{ + name: dirName, + }, + parentDir: "dir", + attributes: getDirFileAttributes(attr1), + Exists: false, + IsDirectory: true, + } + + //Get the current test name + testName := getCombinationTestName(dirInfo, dirName, dirInfo.attributes) + + //Run test + t.Run(testName, func(t *testing.T) { + mainDirPath := runAttributeTests(t, dirInfo, dirInfo.attributes) + + //Check directory exists + _, err1 := os.Stat(mainDirPath) + rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The directory "+dirInfo.name+" does not exist") + }) + } +} + +func getDirFileAttributes(values []bool) FileAttributes { + return FileAttributes{ + // readonly not valid for directories + Hidden: values[0], + System: values[1], + Archive: values[2], + Encrypted: values[3], + } +} + +func TestFileAttributeCombinationsOverwrite(t *testing.T) { + testFileAttributeCombinationsOverwrite(t, false) +} + +func TestEmptyFileAttributeCombinationsOverwrite(t *testing.T) { + testFileAttributeCombinationsOverwrite(t, true) +} + +func testFileAttributeCombinationsOverwrite(t *testing.T, isEmpty bool) { + t.Parallel() + //Get attribute combinations + attributeCombinations := generateCombinations(5, []bool{}) + //Get overwrite file attribute combinations + overwriteCombinations := generateCombinations(5, []bool{}) + + fileName := "TestOverwriteFile" + + //Iterate through each attribute combination + for _, attr1 := range attributeCombinations { + + fileInfo := NodeInfo{ + DataStreamInfo: getDataStreamInfo(isEmpty, fileName), + parentDir: "dir", + attributes: getFileAttributes(attr1), + Exists: true, + } + + overwriteFileAttributes := []FileAttributes{} + + for _, overwrite := range overwriteCombinations { + overwriteFileAttributes = append(overwriteFileAttributes, getFileAttributes(overwrite)) + } + + //Iterate through each overwrite attribute combination + for _, overwriteFileAttr := range overwriteFileAttributes { + //Get the test name + testName := getCombinationTestName(fileInfo, fileName, overwriteFileAttr) + + //Run test + t.Run(testName, func(t *testing.T) { + mainFilePath := runAttributeTests(t, fileInfo, overwriteFileAttr) + + verifyFileRestores(isEmpty, mainFilePath, t, fileInfo) + }) + } + } +} + +func TestDirAttributeCombinationsOverwrite(t *testing.T) { + t.Parallel() + //Get attribute combinations + attributeCombinations := generateCombinations(4, []bool{}) + //Get overwrite dir attribute combinations + overwriteCombinations := generateCombinations(4, []bool{}) + + dirName := "TestOverwriteDir" + + //Iterate through each attribute combination + for _, attr1 := range attributeCombinations { + + dirInfo := NodeInfo{ + DataStreamInfo: DataStreamInfo{ + name: dirName, + }, + parentDir: "dir", + attributes: getDirFileAttributes(attr1), + Exists: true, + IsDirectory: true, + } + + overwriteDirFileAttributes := []FileAttributes{} + + for _, overwrite := range overwriteCombinations { + overwriteDirFileAttributes = append(overwriteDirFileAttributes, getDirFileAttributes(overwrite)) + } + + //Iterate through each overwrite attribute combinations + for _, overwriteDirAttr := range overwriteDirFileAttributes { + //Get the test name + testName := getCombinationTestName(dirInfo, dirName, overwriteDirAttr) + + //Run test + t.Run(testName, func(t *testing.T) { + mainDirPath := runAttributeTests(t, dirInfo, dirInfo.attributes) + + //Check directory exists + _, err1 := os.Stat(mainDirPath) + rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The directory "+dirInfo.name+" does not exist") + }) + } + } +} From e8211cb64a80fc4f949b5088c167d51dc4ef45a1 Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Thu, 22 Feb 2024 17:59:56 -0700 Subject: [PATCH 031/893] Add changelog and update docs for windows attr --- changelog/unreleased/pull-4611 | 7 +++++++ doc/040_backup.rst | 1 - 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/pull-4611 diff --git a/changelog/unreleased/pull-4611 b/changelog/unreleased/pull-4611 new file mode 100644 index 00000000000..940de9c26fc --- /dev/null +++ b/changelog/unreleased/pull-4611 @@ -0,0 +1,7 @@ +Enhancement: Back up windows created time and file attributes like hidden flag + +Restic did not back up windows-specific meta-data like created time and file attributes like hidden flag. +Restic now backs up file created time and file attributes like hidden, readonly and encrypted flag when backing up files and folders on windows. + +https://github.com/restic/restic/pull/4611 + diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 550957eeb75..d0bd4b2e2c6 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -487,7 +487,6 @@ particular note are: * File creation date on Unix platforms * Inode flags on Unix platforms * File ownership and ACLs on Windows -* The "hidden" flag on Windows Reading data from a command *************************** From 8b1a85711fc936fef1c908eaa8b10533f880dff6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Feb 2024 20:22:14 +0100 Subject: [PATCH 032/893] archiver: unexport save/saveDir/saveTree methods --- internal/archiver/archiver.go | 22 +++++++++++----------- internal/archiver/archiver_test.go | 12 ++++++------ 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 77ddba7c4c6..9d7d6891352 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -222,9 +222,9 @@ func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { return err } -// SaveDir stores a directory in the repo and returns the node. snPath is the +// saveDir stores a directory in the repo and returns the node. snPath is the // path within the current snapshot. -func (arch *Archiver) SaveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) { +func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) { debug.Log("%v %v", snPath, dir) treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi) @@ -250,7 +250,7 @@ func (arch *Archiver) SaveDir(ctx context.Context, snPath string, dir string, fi pathname := arch.FS.Join(dir, name) oldNode := previous.Find(name) snItem := join(snPath, name) - fn, excluded, err := arch.Save(ctx, snItem, pathname, oldNode) + fn, excluded, err := arch.save(ctx, snItem, pathname, oldNode) // return error early if possible if err != nil { @@ -334,14 +334,14 @@ func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool { return true } -// Save saves a target (file or directory) to the repo. If the item is +// save saves a target (file or directory) to the repo. If the item is // excluded, this function returns a nil node and error, with excluded set to // true. // // Errors and completion needs to be handled by the caller. // // snPath is the path within the current snapshot. -func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) { +func (arch *Archiver) save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) { start := time.Now() debug.Log("%v target %q, previous %v", snPath, target, previous) @@ -462,7 +462,7 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous return FutureNode{}, false, err } - fn, err = arch.SaveDir(ctx, snPath, target, fi, oldSubtree, + fn, err = arch.saveDir(ctx, snPath, target, fi, oldSubtree, func(node *restic.Node, stats ItemStats) { arch.CompleteItem(snItem, previous, node, stats, time.Since(start)) }) @@ -545,9 +545,9 @@ func (arch *Archiver) statDir(dir string) (os.FileInfo, error) { return fi, nil } -// SaveTree stores a Tree in the repo, returned is the tree. snPath is the path +// saveTree stores a Tree in the repo, returned is the tree. snPath is the path // within the current snapshot. -func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree, complete CompleteFunc) (FutureNode, int, error) { +func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree, complete CompleteFunc) (FutureNode, int, error) { var node *restic.Node if snPath != "/" { @@ -585,7 +585,7 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, // this is a leaf node if subatree.Leaf() { - fn, excluded, err := arch.Save(ctx, join(snPath, name), subatree.Path, previous.Find(name)) + fn, excluded, err := arch.save(ctx, join(snPath, name), subatree.Path, previous.Find(name)) if err != nil { err = arch.error(subatree.Path, err) @@ -619,7 +619,7 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, } // not a leaf node, archive subtree - fn, _, err := arch.SaveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) { + fn, _, err := arch.saveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) { arch.CompleteItem(snItem, oldNode, n, is, time.Since(start)) }) if err != nil { @@ -762,7 +762,7 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps arch.runWorkers(wgCtx, wg) debug.Log("starting snapshot") - fn, nodeCount, err := arch.SaveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) { + fn, nodeCount, err := arch.saveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) { arch.CompleteItem("/", nil, nil, is, time.Since(start)) }) if err != nil { diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 46ef4425184..158768323ca 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -227,7 +227,7 @@ func TestArchiverSave(t *testing.T) { } arch.runWorkers(ctx, wg) - node, excluded, err := arch.Save(ctx, "/", filepath.Join(tempdir, "file"), nil) + node, excluded, err := arch.save(ctx, "/", filepath.Join(tempdir, "file"), nil) if err != nil { t.Fatal(err) } @@ -304,7 +304,7 @@ func TestArchiverSaveReaderFS(t *testing.T) { } arch.runWorkers(ctx, wg) - node, excluded, err := arch.Save(ctx, "/", filename, nil) + node, excluded, err := arch.save(ctx, "/", filename, nil) t.Logf("Save returned %v %v", node, err) if err != nil { t.Fatal(err) @@ -845,7 +845,7 @@ func TestArchiverSaveDir(t *testing.T) { t.Fatal(err) } - ft, err := arch.SaveDir(ctx, "/", test.target, fi, nil, nil) + ft, err := arch.saveDir(ctx, "/", test.target, fi, nil, nil) if err != nil { t.Fatal(err) } @@ -918,7 +918,7 @@ func TestArchiverSaveDirIncremental(t *testing.T) { t.Fatal(err) } - ft, err := arch.SaveDir(ctx, "/", tempdir, fi, nil, nil) + ft, err := arch.saveDir(ctx, "/", tempdir, fi, nil, nil) if err != nil { t.Fatal(err) } @@ -1107,7 +1107,7 @@ func TestArchiverSaveTree(t *testing.T) { t.Fatal(err) } - fn, _, err := arch.SaveTree(ctx, "/", atree, nil, nil) + fn, _, err := arch.saveTree(ctx, "/", atree, nil, nil) if err != nil { t.Fatal(err) } @@ -2236,7 +2236,7 @@ func TestRacyFileSwap(t *testing.T) { arch.runWorkers(ctx, wg) // fs.Track will panic if the file was not closed - _, excluded, err := arch.Save(ctx, "/", tempfile, nil) + _, excluded, err := arch.save(ctx, "/", tempfile, nil) if err == nil { t.Errorf("Save() should have failed") } From a59f654fa68dfe5b1a186b1fb81265ba8268c623 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 22 Feb 2024 22:14:48 +0100 Subject: [PATCH 033/893] archiver: refactor summary collection from ui into the archiver --- cmd/restic/cmd_backup.go | 4 +- internal/archiver/archiver.go | 79 ++++++++++++++++++++++++----- internal/archiver/archiver_test.go | 21 +++++--- internal/archiver/testing.go | 2 +- internal/archiver/testing_test.go | 2 +- internal/dump/common_test.go | 2 +- internal/restorer/restorer_test.go | 2 +- internal/ui/backup/json.go | 2 +- internal/ui/backup/progress.go | 49 ++---------------- internal/ui/backup/progress_test.go | 5 +- internal/ui/backup/text.go | 2 +- 11 files changed, 92 insertions(+), 78 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 318d17796ef..4c03b7e8c2c 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -649,7 +649,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter if !gopts.JSON { progressPrinter.V("start backup on %v", targets) } - _, id, err := arch.Snapshot(ctx, targets, snapshotOpts) + _, id, summary, err := arch.Snapshot(ctx, targets, snapshotOpts) // cleanly shutdown all running goroutines cancel() @@ -663,7 +663,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } // Report finished execution - progressReporter.Finish(id, opts.DryRun) + progressReporter.Finish(id, summary, opts.DryRun) if !gopts.JSON && !opts.DryRun { progressPrinter.P("snapshot %s saved\n", id.Str()) } diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 9d7d6891352..b8d1d45bbd4 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -8,6 +8,7 @@ import ( "runtime" "sort" "strings" + "sync" "time" "github.com/restic/restic/internal/debug" @@ -40,6 +41,16 @@ type ItemStats struct { TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead) } +type Summary struct { + Files, Dirs struct { + New uint + Changed uint + Unchanged uint + } + ProcessedBytes uint64 + ItemStats +} + // Add adds other to the current ItemStats. func (s *ItemStats) Add(other ItemStats) { s.DataBlobs += other.DataBlobs @@ -61,6 +72,8 @@ type Archiver struct { blobSaver *BlobSaver fileSaver *FileSaver treeSaver *TreeSaver + mu sync.Mutex + summary *Summary // Error is called for all errors that occur during backup. Error ErrorFunc @@ -182,6 +195,44 @@ func (arch *Archiver) error(item string, err error) error { return errf } +func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s ItemStats, d time.Duration) { + arch.CompleteItem(item, previous, current, s, d) + + arch.mu.Lock() + defer arch.mu.Unlock() + + arch.summary.ItemStats.Add(s) + + if current != nil { + arch.summary.ProcessedBytes += current.Size + } else { + // last item or an error occurred + return + } + + switch current.Type { + case "dir": + switch { + case previous == nil: + arch.summary.Dirs.New++ + case previous.Equals(*current): + arch.summary.Dirs.Unchanged++ + default: + arch.summary.Dirs.Changed++ + } + + case "file": + switch { + case previous == nil: + arch.summary.Files.New++ + case previous.Equals(*current): + arch.summary.Files.Unchanged++ + default: + arch.summary.Files.Changed++ + } + } +} + // nodeFromFileInfo returns the restic node from an os.FileInfo. func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo) (*restic.Node, error) { node, err := restic.NodeFromFileInfo(filename, fi) @@ -380,7 +431,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous if previous != nil && !fileChanged(fi, previous, arch.ChangeIgnoreFlags) { if arch.allBlobsPresent(previous) { debug.Log("%v hasn't changed, using old list of blobs", target) - arch.CompleteItem(snPath, previous, previous, ItemStats{}, time.Since(start)) + arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start)) arch.CompleteBlob(previous.Size) node, err := arch.nodeFromFileInfo(snPath, target, fi) if err != nil { @@ -445,9 +496,9 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous fn = arch.fileSaver.Save(ctx, snPath, target, file, fi, func() { arch.StartFile(snPath) }, func() { - arch.CompleteItem(snPath, nil, nil, ItemStats{}, 0) + arch.trackItem(snPath, nil, nil, ItemStats{}, 0) }, func(node *restic.Node, stats ItemStats) { - arch.CompleteItem(snPath, previous, node, stats, time.Since(start)) + arch.trackItem(snPath, previous, node, stats, time.Since(start)) }) case fi.IsDir(): @@ -464,7 +515,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous fn, err = arch.saveDir(ctx, snPath, target, fi, oldSubtree, func(node *restic.Node, stats ItemStats) { - arch.CompleteItem(snItem, previous, node, stats, time.Since(start)) + arch.trackItem(snItem, previous, node, stats, time.Since(start)) }) if err != nil { debug.Log("SaveDir for %v returned error: %v", snPath, err) @@ -620,7 +671,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, // not a leaf node, archive subtree fn, _, err := arch.saveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) { - arch.CompleteItem(snItem, oldNode, n, is, time.Since(start)) + arch.trackItem(snItem, oldNode, n, is, time.Since(start)) }) if err != nil { return FutureNode{}, 0, err @@ -738,15 +789,17 @@ func (arch *Archiver) stopWorkers() { } // Snapshot saves several targets and returns a snapshot. -func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, error) { +func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, *Summary, error) { + arch.summary = &Summary{} + cleanTargets, err := resolveRelativeTargets(arch.FS, targets) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } atree, err := NewTree(arch.FS, cleanTargets) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } var rootTreeID restic.ID @@ -763,7 +816,7 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps debug.Log("starting snapshot") fn, nodeCount, err := arch.saveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) { - arch.CompleteItem("/", nil, nil, is, time.Since(start)) + arch.trackItem("/", nil, nil, is, time.Since(start)) }) if err != nil { return err @@ -799,12 +852,12 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps }) err = wgUp.Wait() if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } sn, err := restic.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } sn.ProgramVersion = opts.ProgramVersion @@ -816,8 +869,8 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps id, err := restic.SaveSnapshot(ctx, arch.Repo, sn) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } - return sn, id, nil + return sn, id, arch.summary, nil } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 158768323ca..0ae7ca05f06 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -226,6 +226,7 @@ func TestArchiverSave(t *testing.T) { return err } arch.runWorkers(ctx, wg) + arch.summary = &Summary{} node, excluded, err := arch.save(ctx, "/", filepath.Join(tempdir, "file"), nil) if err != nil { @@ -303,6 +304,7 @@ func TestArchiverSaveReaderFS(t *testing.T) { return err } arch.runWorkers(ctx, wg) + arch.summary = &Summary{} node, excluded, err := arch.save(ctx, "/", filename, nil) t.Logf("Save returned %v %v", node, err) @@ -831,6 +833,7 @@ func TestArchiverSaveDir(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.runWorkers(ctx, wg) + arch.summary = &Summary{} chdir := tempdir if test.chdir != "" { @@ -912,6 +915,7 @@ func TestArchiverSaveDirIncremental(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.runWorkers(ctx, wg) + arch.summary = &Summary{} fi, err := fs.Lstat(tempdir) if err != nil { @@ -1094,6 +1098,7 @@ func TestArchiverSaveTree(t *testing.T) { repo.StartPackUploader(ctx, wg) arch.runWorkers(ctx, wg) + arch.summary = &Summary{} back := restictest.Chdir(t, tempdir) defer back() @@ -1395,7 +1400,7 @@ func TestArchiverSnapshot(t *testing.T) { } t.Logf("targets: %v", targets) - sn, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) + sn, snapshotID, _, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) if err != nil { t.Fatal(err) } @@ -1543,7 +1548,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { defer back() targets := []string{"."} - _, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) + _, snapshotID, _, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) if test.err != "" { if err == nil { t.Fatalf("expected error not found, got %v, wanted %q", err, test.err) @@ -1648,7 +1653,7 @@ func TestArchiverParent(t *testing.T) { back := restictest.Chdir(t, tempdir) defer back() - firstSnapshot, firstSnapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + firstSnapshot, firstSnapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if err != nil { t.Fatal(err) } @@ -1678,7 +1683,7 @@ func TestArchiverParent(t *testing.T) { Time: time.Now(), ParentSnapshot: firstSnapshot, } - _, secondSnapshotID, err := arch.Snapshot(ctx, []string{"."}, opts) + _, secondSnapshotID, _, err := arch.Snapshot(ctx, []string{"."}, opts) if err != nil { t.Fatal(err) } @@ -1814,7 +1819,7 @@ func TestArchiverErrorReporting(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.Error = test.errFn - _, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + _, snapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if test.mustError { if err != nil { t.Logf("found expected error (%v), skipping further checks", err) @@ -1887,7 +1892,7 @@ func TestArchiverContextCanceled(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) - _, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + _, snapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if err != nil { t.Logf("found expected error (%v)", err) @@ -2026,7 +2031,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { SaveBlobConcurrency: 1, }) - _, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + _, _, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if !errors.Is(err, test.err) { t.Errorf("expected error (%v) not found, got %v", test.err, err) } @@ -2054,7 +2059,7 @@ func snapshot(t testing.TB, repo restic.Repository, fs fs.FS, parent *restic.Sna Time: time.Now(), ParentSnapshot: parent, } - snapshot, _, err := arch.Snapshot(ctx, []string{filename}, sopts) + snapshot, _, _, err := arch.Snapshot(ctx, []string{filename}, sopts) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go index 111c1e68cdc..1feaa82fce2 100644 --- a/internal/archiver/testing.go +++ b/internal/archiver/testing.go @@ -31,7 +31,7 @@ func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *res } opts.ParentSnapshot = sn } - sn, _, err := arch.Snapshot(context.TODO(), []string{path}, opts) + sn, _, _, err := arch.Snapshot(context.TODO(), []string{path}, opts) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/testing_test.go b/internal/archiver/testing_test.go index ada7261f15d..e48b41ec796 100644 --- a/internal/archiver/testing_test.go +++ b/internal/archiver/testing_test.go @@ -473,7 +473,7 @@ func TestTestEnsureSnapshot(t *testing.T) { Hostname: "localhost", Tags: []string{"test"}, } - _, id, err := arch.Snapshot(ctx, []string{"."}, opts) + _, id, _, err := arch.Snapshot(ctx, []string{"."}, opts) if err != nil { t.Fatal(err) } diff --git a/internal/dump/common_test.go b/internal/dump/common_test.go index 3ee9112af34..afd19df6372 100644 --- a/internal/dump/common_test.go +++ b/internal/dump/common_test.go @@ -78,7 +78,7 @@ func WriteTest(t *testing.T, format string, cd CheckDump) { back := rtest.Chdir(t, tmpdir) defer back() - sn, _, err := arch.Snapshot(ctx, []string{"."}, archiver.SnapshotOptions{}) + sn, _, _, err := arch.Snapshot(ctx, []string{"."}, archiver.SnapshotOptions{}) rtest.OK(t, err) tree, err := restic.LoadTree(ctx, repo, *sn.Tree) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index c33214bc3cd..29f8920c547 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -840,7 +840,7 @@ func TestRestorerSparseFiles(t *testing.T) { rtest.OK(t, err) arch := archiver.New(repo, target, archiver.Options{}) - sn, _, err := arch.Snapshot(context.Background(), []string{"/zeros"}, + sn, _, _, err := arch.Snapshot(context.Background(), []string{"/zeros"}, archiver.SnapshotOptions{}) rtest.OK(t, err) diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index 10f0e91fad8..3393a3d48c2 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -163,7 +163,7 @@ func (b *JSONProgress) ReportTotal(start time.Time, s archiver.ScanStats) { } // Finish prints the finishing messages. -func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *Summary, dryRun bool) { +func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { b.print(summaryOutput{ MessageType: "summary", FilesNew: summary.Files.New, diff --git a/internal/ui/backup/progress.go b/internal/ui/backup/progress.go index da0d401a3ab..1d494bf142a 100644 --- a/internal/ui/backup/progress.go +++ b/internal/ui/backup/progress.go @@ -17,7 +17,7 @@ type ProgressPrinter interface { ScannerError(item string, err error) error CompleteItem(messageType string, item string, s archiver.ItemStats, d time.Duration) ReportTotal(start time.Time, s archiver.ScanStats) - Finish(snapshotID restic.ID, start time.Time, summary *Summary, dryRun bool) + Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) Reset() P(msg string, args ...interface{}) @@ -28,16 +28,6 @@ type Counter struct { Files, Dirs, Bytes uint64 } -type Summary struct { - Files, Dirs struct { - New uint - Changed uint - Unchanged uint - } - ProcessedBytes uint64 - archiver.ItemStats -} - // Progress reports progress for the `backup` command. type Progress struct { progress.Updater @@ -52,7 +42,6 @@ type Progress struct { processed, total Counter errors uint - summary Summary printer ProgressPrinter } @@ -126,16 +115,6 @@ func (p *Progress) CompleteBlob(bytes uint64) { // CompleteItem is the status callback function for the archiver when a // file/dir has been saved successfully. func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) { - p.mu.Lock() - p.summary.ItemStats.Add(s) - - // for the last item "/", current is nil - if current != nil { - p.summary.ProcessedBytes += current.Size - } - - p.mu.Unlock() - if current == nil { // error occurred, tell the status display to remove the line p.mu.Lock() @@ -153,21 +132,10 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a switch { case previous == nil: p.printer.CompleteItem("dir new", item, s, d) - p.mu.Lock() - p.summary.Dirs.New++ - p.mu.Unlock() - case previous.Equals(*current): p.printer.CompleteItem("dir unchanged", item, s, d) - p.mu.Lock() - p.summary.Dirs.Unchanged++ - p.mu.Unlock() - default: p.printer.CompleteItem("dir modified", item, s, d) - p.mu.Lock() - p.summary.Dirs.Changed++ - p.mu.Unlock() } case "file": @@ -179,21 +147,10 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a switch { case previous == nil: p.printer.CompleteItem("file new", item, s, d) - p.mu.Lock() - p.summary.Files.New++ - p.mu.Unlock() - case previous.Equals(*current): p.printer.CompleteItem("file unchanged", item, s, d) - p.mu.Lock() - p.summary.Files.Unchanged++ - p.mu.Unlock() - default: p.printer.CompleteItem("file modified", item, s, d) - p.mu.Lock() - p.summary.Files.Changed++ - p.mu.Unlock() } } } @@ -213,8 +170,8 @@ func (p *Progress) ReportTotal(item string, s archiver.ScanStats) { } // Finish prints the finishing messages. -func (p *Progress) Finish(snapshotID restic.ID, dryrun bool) { +func (p *Progress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryrun bool) { // wait for the status update goroutine to shut down p.Updater.Done() - p.printer.Finish(snapshotID, p.start, &p.summary, dryrun) + p.printer.Finish(snapshotID, p.start, summary, dryrun) } diff --git a/internal/ui/backup/progress_test.go b/internal/ui/backup/progress_test.go index 79a56c91ee7..6b242a0f333 100644 --- a/internal/ui/backup/progress_test.go +++ b/internal/ui/backup/progress_test.go @@ -33,11 +33,10 @@ func (p *mockPrinter) CompleteItem(messageType string, _ string, _ archiver.Item } func (p *mockPrinter) ReportTotal(_ time.Time, _ archiver.ScanStats) {} -func (p *mockPrinter) Finish(id restic.ID, _ time.Time, summary *Summary, _ bool) { +func (p *mockPrinter) Finish(id restic.ID, _ time.Time, _ *archiver.Summary, _ bool) { p.Lock() defer p.Unlock() - _ = *summary // Should not be nil. p.id = id } @@ -64,7 +63,7 @@ func TestProgress(t *testing.T) { time.Sleep(10 * time.Millisecond) id := restic.NewRandomID() - prog.Finish(id, false) + prog.Finish(id, nil, false) if !prnt.dirUnchanged { t.Error(`"dir unchanged" event not seen`) diff --git a/internal/ui/backup/text.go b/internal/ui/backup/text.go index 215982cd484..00d025e51f8 100644 --- a/internal/ui/backup/text.go +++ b/internal/ui/backup/text.go @@ -126,7 +126,7 @@ func (b *TextProgress) Reset() { } // Finish prints the finishing messages. -func (b *TextProgress) Finish(_ restic.ID, start time.Time, summary *Summary, dryRun bool) { +func (b *TextProgress) Finish(_ restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { b.P("\n") b.P("Files: %5d new, %5d changed, %5d unmodified\n", summary.Files.New, summary.Files.Changed, summary.Files.Unchanged) b.P("Dirs: %5d new, %5d changed, %5d unmodified\n", summary.Dirs.New, summary.Dirs.Changed, summary.Dirs.Unchanged) From 86897314d54f68164c9fee330deb9772b0f4f6fa Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 22 Feb 2024 22:17:54 +0100 Subject: [PATCH 034/893] backup: expose data_added_in_repo in JSON output The value describes how much data was added after compression. Previously, it was only available in the text output. --- doc/075_scripting.rst | 4 +++- internal/ui/backup/json.go | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index fda4b2d532a..1c0f7a34ffc 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -163,7 +163,9 @@ Summary is the last output line in a successful backup. +---------------------------+---------------------------------------------------------+ | ``tree_blobs`` | Number of tree blobs | +---------------------------+---------------------------------------------------------+ -| ``data_added`` | Amount of data added, in bytes | +| ``data_added`` | Amount of (uncompressed) data added, in bytes | ++---------------------------+---------------------------------------------------------+ +| ``data_added_in_repo`` | Amount of data added (after compression), in bytes | +---------------------------+---------------------------------------------------------+ | ``total_files_processed`` | Total number of files processed | +---------------------------+---------------------------------------------------------+ diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index 3393a3d48c2..b3a8b44a792 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -175,6 +175,7 @@ func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *ar DataBlobs: summary.ItemStats.DataBlobs, TreeBlobs: summary.ItemStats.TreeBlobs, DataAdded: summary.ItemStats.DataSize + summary.ItemStats.TreeSize, + DataAddedInRepo: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo, TotalFilesProcessed: summary.Files.New + summary.Files.Changed + summary.Files.Unchanged, TotalBytesProcessed: summary.ProcessedBytes, TotalDuration: time.Since(start).Seconds(), @@ -230,6 +231,7 @@ type summaryOutput struct { DataBlobs int `json:"data_blobs"` TreeBlobs int `json:"tree_blobs"` DataAdded uint64 `json:"data_added"` + DataAddedInRepo uint64 `json:"data_added_in_repo"` TotalFilesProcessed uint `json:"total_files_processed"` TotalBytesProcessed uint64 `json:"total_bytes_processed"` TotalDuration float64 `json:"total_duration"` // in seconds From 38f91d3b5e045ada840aa4a1c91deb942e9ccdbe Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 22 Feb 2024 22:25:14 +0100 Subject: [PATCH 035/893] backup: store statistics in snapshot --- cmd/restic/cmd_backup.go | 2 ++ internal/archiver/archiver.go | 18 ++++++++++++++++++ internal/restic/snapshot.go | 22 +++++++++++++++++++++- 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 4c03b7e8c2c..acc4bddb11e 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -451,6 +451,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } timeStamp := time.Now() + backupStart := timeStamp if opts.TimeStamp != "" { timeStamp, err = time.ParseInLocation(TimeFormat, opts.TimeStamp, time.Local) if err != nil { @@ -640,6 +641,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter snapshotOpts := archiver.SnapshotOptions{ Excludes: opts.Excludes, Tags: opts.Tags.Flatten(), + BackupStart: backupStart, Time: timeStamp, Hostname: opts.Host, ParentSnapshot: parentSnapshot, diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index b8d1d45bbd4..2be7dddd551 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -739,6 +739,7 @@ type SnapshotOptions struct { Tags restic.TagList Hostname string Excludes []string + BackupStart time.Time Time time.Time ParentSnapshot *restic.Snapshot ProgramVersion string @@ -866,6 +867,23 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps sn.Parent = opts.ParentSnapshot.ID() } sn.Tree = &rootTreeID + sn.Summary = &restic.SnapshotSummary{ + BackupStart: opts.BackupStart, + BackupEnd: time.Now(), + + FilesNew: arch.summary.Files.New, + FilesChanged: arch.summary.Files.Changed, + FilesUnmodified: arch.summary.Files.Unchanged, + DirsNew: arch.summary.Dirs.New, + DirsChanged: arch.summary.Dirs.Changed, + DirsUnmodified: arch.summary.Dirs.Unchanged, + DataBlobs: arch.summary.ItemStats.DataBlobs, + TreeBlobs: arch.summary.ItemStats.TreeBlobs, + DataAdded: arch.summary.ItemStats.DataSize + arch.summary.ItemStats.TreeSize, + DataAddedInRepo: arch.summary.ItemStats.DataSizeInRepo + arch.summary.ItemStats.TreeSizeInRepo, + TotalFilesProcessed: arch.summary.Files.New + arch.summary.Files.Changed + arch.summary.Files.Unchanged, + TotalBytesProcessed: arch.summary.ProcessedBytes, + } id, err := restic.SaveSnapshot(ctx, arch.Repo, sn) if err != nil { diff --git a/internal/restic/snapshot.go b/internal/restic/snapshot.go index 8cf651d96f0..5ee3088798f 100644 --- a/internal/restic/snapshot.go +++ b/internal/restic/snapshot.go @@ -25,11 +25,31 @@ type Snapshot struct { Tags []string `json:"tags,omitempty"` Original *ID `json:"original,omitempty"` - ProgramVersion string `json:"program_version,omitempty"` + ProgramVersion string `json:"program_version,omitempty"` + Summary *SnapshotSummary `json:"summary,omitempty"` id *ID // plaintext ID, used during restore } +type SnapshotSummary struct { + BackupStart time.Time `json:"backup_start"` + BackupEnd time.Time `json:"backup_end"` + + // statistics from the backup json output + FilesNew uint `json:"files_new"` + FilesChanged uint `json:"files_changed"` + FilesUnmodified uint `json:"files_unmodified"` + DirsNew uint `json:"dirs_new"` + DirsChanged uint `json:"dirs_changed"` + DirsUnmodified uint `json:"dirs_unmodified"` + DataBlobs int `json:"data_blobs"` + TreeBlobs int `json:"tree_blobs"` + DataAdded uint64 `json:"data_added"` + DataAddedInRepo uint64 `json:"data_added_in_repo"` + TotalFilesProcessed uint `json:"total_files_processed"` + TotalBytesProcessed uint64 `json:"total_bytes_processed"` +} + // NewSnapshot returns an initialized snapshot struct for the current user and // time. func NewSnapshot(paths []string, tags []string, hostname string, time time.Time) (*Snapshot, error) { From b6520038fd882a075cf86310e98125d7ab01e5c8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 22 Feb 2024 22:43:54 +0100 Subject: [PATCH 036/893] snapshots: Print snapshot size stored in snapshots --- cmd/restic/cmd_snapshots.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index e94f2ed9ba3..d6199d47a27 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/table" "github.com/spf13/cobra" ) @@ -163,6 +164,11 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke keepReasons[*id] = reasons[i] } } + // check if any snapshot contains a summary + hasSize := false + for _, sn := range list { + hasSize = hasSize || (sn.Summary != nil) + } // always sort the snapshots so that the newer ones are listed last sort.SliceStable(list, func(i, j int) bool { @@ -198,6 +204,9 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke tab.AddColumn("Reasons", `{{ join .Reasons "\n" }}`) } tab.AddColumn("Paths", `{{ join .Paths "\n" }}`) + if hasSize { + tab.AddColumn("Size", `{{ .Size }}`) + } } type snapshot struct { @@ -207,6 +216,7 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke Tags []string Reasons []string Paths []string + Size string } var multiline bool @@ -228,6 +238,10 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke multiline = true } + if sn.Summary != nil { + data.Size = ui.FormatBytes(sn.Summary.TotalBytesProcessed) + } + tab.AddRow(data) } From 681395955e9ae836b333420bc6697a6edbdd8922 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Feb 2024 21:46:39 +0100 Subject: [PATCH 037/893] archiver: test backup summary calculation --- internal/archiver/archiver.go | 12 +-- internal/archiver/archiver_test.go | 139 +++++++++++++++++++++-------- 2 files changed, 108 insertions(+), 43 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 2be7dddd551..39b05d0e606 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -41,12 +41,14 @@ type ItemStats struct { TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead) } +type ChangeStats struct { + New uint + Changed uint + Unchanged uint +} + type Summary struct { - Files, Dirs struct { - New uint - Changed uint - Unchanged uint - } + Files, Dirs ChangeStats ProcessedBytes uint64 ItemStats } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 0ae7ca05f06..4430f852870 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -986,9 +986,9 @@ func TestArchiverSaveDirIncremental(t *testing.T) { // bothZeroOrNeither fails the test if only one of exp, act is zero. func bothZeroOrNeither(tb testing.TB, exp, act uint64) { + tb.Helper() if (exp == 0 && act != 0) || (exp != 0 && act == 0) { - _, file, line, _ := runtime.Caller(1) - tb.Fatalf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + restictest.Equals(tb, exp, act) } } @@ -1008,7 +1008,7 @@ func TestArchiverSaveTree(t *testing.T) { prepare func(t testing.TB) targets []string want TestDir - stat ItemStats + stat Summary }{ { src: TestDir{ @@ -1018,7 +1018,12 @@ func TestArchiverSaveTree(t *testing.T) { want: TestDir{ "targetfile": TestFile{Content: string("foobar")}, }, - stat: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + stat: Summary{ + ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + ProcessedBytes: 6, + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{0, 0, 0}, + }, }, { src: TestDir{ @@ -1030,7 +1035,12 @@ func TestArchiverSaveTree(t *testing.T) { "targetfile": TestFile{Content: string("foobar")}, "filesymlink": TestSymlink{Target: "targetfile"}, }, - stat: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + stat: Summary{ + ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + ProcessedBytes: 6, + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{0, 0, 0}, + }, }, { src: TestDir{ @@ -1050,7 +1060,12 @@ func TestArchiverSaveTree(t *testing.T) { "symlink": TestSymlink{Target: "subdir"}, }, }, - stat: ItemStats{0, 0, 0, 1, 0x154, 0x16a}, + stat: Summary{ + ItemStats: ItemStats{0, 0, 0, 1, 0x154, 0x16a}, + ProcessedBytes: 0, + Files: ChangeStats{0, 0, 0}, + Dirs: ChangeStats{1, 0, 0}, + }, }, { src: TestDir{ @@ -1074,7 +1089,12 @@ func TestArchiverSaveTree(t *testing.T) { }, }, }, - stat: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1}, + stat: Summary{ + ItemStats: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1}, + ProcessedBytes: 6, + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{3, 0, 0}, + }, }, } @@ -1086,14 +1106,6 @@ func TestArchiverSaveTree(t *testing.T) { arch := New(repo, testFS, Options{}) - var stat ItemStats - lock := &sync.Mutex{} - arch.CompleteItem = func(item string, previous, current *restic.Node, s ItemStats, d time.Duration) { - lock.Lock() - defer lock.Unlock() - stat.Add(s) - } - wg, ctx := errgroup.WithContext(context.TODO()) repo.StartPackUploader(ctx, wg) @@ -1139,11 +1151,15 @@ func TestArchiverSaveTree(t *testing.T) { want = test.src } TestEnsureTree(context.TODO(), t, "/", repo, treeID, want) + stat := arch.summary bothZeroOrNeither(t, uint64(test.stat.DataBlobs), uint64(stat.DataBlobs)) bothZeroOrNeither(t, uint64(test.stat.TreeBlobs), uint64(stat.TreeBlobs)) bothZeroOrNeither(t, test.stat.DataSize, stat.DataSize) bothZeroOrNeither(t, test.stat.DataSizeInRepo, stat.DataSizeInRepo) bothZeroOrNeither(t, test.stat.TreeSizeInRepo, stat.TreeSizeInRepo) + restictest.Equals(t, test.stat.ProcessedBytes, stat.ProcessedBytes) + restictest.Equals(t, test.stat.Files, stat.Files) + restictest.Equals(t, test.stat.Dirs, stat.Dirs) }) } } @@ -1623,15 +1639,64 @@ func (f MockFile) Read(p []byte) (int, error) { func TestArchiverParent(t *testing.T) { var tests = []struct { - src TestDir - read map[string]int // tracks number of times a file must have been read + src TestDir + modify func(path string) + statInitial Summary + statSecond Summary }{ { src: TestDir{ "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, }, - read: map[string]int{ - "targetfile": 1, + statInitial: Summary{ + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{0, 0, 0}, + ProcessedBytes: 2102152, + }, + statSecond: Summary{ + Files: ChangeStats{0, 0, 1}, + Dirs: ChangeStats{0, 0, 0}, + ProcessedBytes: 2102152, + }, + }, + { + src: TestDir{ + "targetDir": TestDir{ + "targetfile": TestFile{Content: string(restictest.Random(888, 1234))}, + "targetfile2": TestFile{Content: string(restictest.Random(888, 1235))}, + }, + }, + statInitial: Summary{ + Files: ChangeStats{2, 0, 0}, + Dirs: ChangeStats{1, 0, 0}, + ProcessedBytes: 2469, + }, + statSecond: Summary{ + Files: ChangeStats{0, 0, 2}, + Dirs: ChangeStats{0, 0, 1}, + ProcessedBytes: 2469, + }, + }, + { + src: TestDir{ + "targetDir": TestDir{ + "targetfile": TestFile{Content: string(restictest.Random(888, 1234))}, + }, + "targetfile2": TestFile{Content: string(restictest.Random(888, 1235))}, + }, + modify: func(path string) { + remove(t, filepath.Join(path, "targetDir", "targetfile")) + save(t, filepath.Join(path, "targetfile2"), []byte("foobar")) + }, + statInitial: Summary{ + Files: ChangeStats{2, 0, 0}, + Dirs: ChangeStats{1, 0, 0}, + ProcessedBytes: 2469, + }, + statSecond: Summary{ + Files: ChangeStats{0, 1, 0}, + Dirs: ChangeStats{0, 1, 0}, + ProcessedBytes: 6, }, }, } @@ -1653,7 +1718,7 @@ func TestArchiverParent(t *testing.T) { back := restictest.Chdir(t, tempdir) defer back() - firstSnapshot, firstSnapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + firstSnapshot, firstSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if err != nil { t.Fatal(err) } @@ -1678,33 +1743,31 @@ func TestArchiverParent(t *testing.T) { } return nil }) + restictest.Equals(t, test.statInitial.Files, summary.Files) + restictest.Equals(t, test.statInitial.Dirs, summary.Dirs) + restictest.Equals(t, test.statInitial.ProcessedBytes, summary.ProcessedBytes) + + if test.modify != nil { + test.modify(tempdir) + } opts := SnapshotOptions{ Time: time.Now(), ParentSnapshot: firstSnapshot, } - _, secondSnapshotID, _, err := arch.Snapshot(ctx, []string{"."}, opts) + testFS.bytesRead = map[string]int{} + _, secondSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, opts) if err != nil { t.Fatal(err) } - // check that all files still been read exactly once - TestWalkFiles(t, ".", test.src, func(filename string, item interface{}) error { - file, ok := item.(TestFile) - if !ok { - return nil - } - - n, ok := testFS.bytesRead[filename] - if !ok { - t.Fatalf("file %v was not read at all", filename) - } - - if n != len(file.Content) { - t.Fatalf("file %v: read %v bytes, wanted %v bytes", filename, n, len(file.Content)) - } - return nil - }) + if test.modify == nil { + // check that no files were read this time + restictest.Equals(t, map[string]int{}, testFS.bytesRead) + } + restictest.Equals(t, test.statSecond.Files, summary.Files) + restictest.Equals(t, test.statSecond.Dirs, summary.Dirs) + restictest.Equals(t, test.statSecond.ProcessedBytes, summary.ProcessedBytes) t.Logf("second backup saved as %v", secondSnapshotID.Str()) t.Logf("testfs: %v", testFS) From a8f5684f68a8a8a9b301aae413fed9e444e0c753 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Feb 2024 22:05:15 +0100 Subject: [PATCH 038/893] archiver: test statistics in snapshot --- internal/archiver/archiver_test.go | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 4430f852870..ac0ffa6d08f 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1637,6 +1637,21 @@ func (f MockFile) Read(p []byte) (int, error) { return n, err } +func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) { + restictest.Equals(t, stat.Files.New, sn.Summary.FilesNew) + restictest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged) + restictest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified) + restictest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew) + restictest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged) + restictest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified) + restictest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed) + restictest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed) + bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs)) + bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs)) + bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded)) + bothZeroOrNeither(t, uint64(stat.DataSizeInRepo+stat.TreeSizeInRepo), uint64(sn.Summary.DataAddedInRepo)) +} + func TestArchiverParent(t *testing.T) { var tests = []struct { src TestDir @@ -1652,6 +1667,7 @@ func TestArchiverParent(t *testing.T) { Files: ChangeStats{1, 0, 0}, Dirs: ChangeStats{0, 0, 0}, ProcessedBytes: 2102152, + ItemStats: ItemStats{3, 0x201593, 0x201632, 1, 0, 0}, }, statSecond: Summary{ Files: ChangeStats{0, 0, 1}, @@ -1670,6 +1686,7 @@ func TestArchiverParent(t *testing.T) { Files: ChangeStats{2, 0, 0}, Dirs: ChangeStats{1, 0, 0}, ProcessedBytes: 2469, + ItemStats: ItemStats{2, 0xe1c, 0xcd9, 2, 0, 0}, }, statSecond: Summary{ Files: ChangeStats{0, 0, 2}, @@ -1692,11 +1709,13 @@ func TestArchiverParent(t *testing.T) { Files: ChangeStats{2, 0, 0}, Dirs: ChangeStats{1, 0, 0}, ProcessedBytes: 2469, + ItemStats: ItemStats{2, 0xe13, 0xcf8, 2, 0, 0}, }, statSecond: Summary{ Files: ChangeStats{0, 1, 0}, Dirs: ChangeStats{0, 1, 0}, ProcessedBytes: 6, + ItemStats: ItemStats{1, 0x305, 0x233, 2, 0, 0}, }, }, } @@ -1746,6 +1765,7 @@ func TestArchiverParent(t *testing.T) { restictest.Equals(t, test.statInitial.Files, summary.Files) restictest.Equals(t, test.statInitial.Dirs, summary.Dirs) restictest.Equals(t, test.statInitial.ProcessedBytes, summary.ProcessedBytes) + checkSnapshotStats(t, firstSnapshot, test.statInitial) if test.modify != nil { test.modify(tempdir) @@ -1756,7 +1776,7 @@ func TestArchiverParent(t *testing.T) { ParentSnapshot: firstSnapshot, } testFS.bytesRead = map[string]int{} - _, secondSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, opts) + secondSnapshot, secondSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, opts) if err != nil { t.Fatal(err) } @@ -1768,6 +1788,7 @@ func TestArchiverParent(t *testing.T) { restictest.Equals(t, test.statSecond.Files, summary.Files) restictest.Equals(t, test.statSecond.Dirs, summary.Dirs) restictest.Equals(t, test.statSecond.ProcessedBytes, summary.ProcessedBytes) + checkSnapshotStats(t, secondSnapshot, test.statSecond) t.Logf("second backup saved as %v", secondSnapshotID.Str()) t.Logf("testfs: %v", testFS) From 6a13e451b14406a7cb3d1e3fc1dbb37b5ee16819 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Feb 2024 22:29:10 +0100 Subject: [PATCH 039/893] document snapshot statistics --- changelog/unreleased/issue-693 | 12 ++++++++ doc/045_working_with_repos.rst | 56 ++++++++++++++++++---------------- doc/075_scripting.rst | 37 ++++++++++++++++++++++ 3 files changed, 78 insertions(+), 27 deletions(-) create mode 100644 changelog/unreleased/issue-693 diff --git a/changelog/unreleased/issue-693 b/changelog/unreleased/issue-693 new file mode 100644 index 00000000000..054ae42ed0b --- /dev/null +++ b/changelog/unreleased/issue-693 @@ -0,0 +1,12 @@ +Enhancement: Support printing snapshot size in `snapshots` command + +The `snapshots` command now supports printing the snapshot size for snapshots +created using this or a future restic version. For this, the `backup` command +now stores the backup summary statistics in the snapshot. + +The text output of the `snapshots` command only shows the snapshot size. The +other statistics are only included in the JSON output. To inspect these +statistics use `restic snapshots --json` or `restic cat snapshot `. + +https://github.com/restic/restic/issues/693 +https://github.com/restic/restic/pull/4705 diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 48e5985dc41..85c022580cc 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -18,19 +18,21 @@ Working with repositories Listing all snapshots ===================== -Now, you can list all the snapshots stored in the repository: +Now, you can list all the snapshots stored in the repository. The size column +only exists for snapshots created using restic 0.17.0 or later. It reflects the +size of the contained files at the time when the snapshot was created. .. code-block:: console $ restic -r /srv/restic-repo snapshots enter password for repository: - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work - 79766175 2015-05-08 21:40:19 kasimir /home/user/work - bdbd3439 2015-05-08 21:45:17 luigi /home/art - 590c8fc8 2015-05-08 21:47:38 kazik /srv - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------------- + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work 20.643GiB + 79766175 2015-05-08 21:40:19 kasimir /home/user/work 20.645GiB + bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB + 590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB You can filter the listing by directory path: @@ -38,10 +40,10 @@ You can filter the listing by directory path: $ restic -r /srv/restic-repo snapshots --path="/srv" enter password for repository: - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 590c8fc8 2015-05-08 21:47:38 kazik /srv - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + 590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB Or filter by host: @@ -49,10 +51,10 @@ Or filter by host: $ restic -r /srv/restic-repo snapshots --host luigi enter password for repository: - ID Date Host Tags Directory - ---------------------------------------------------------------------- - bdbd3439 2015-05-08 21:45:17 luigi /home/art - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB Combining filters is also possible. @@ -64,21 +66,21 @@ Furthermore you can group the output by the same filters (host, paths, tags): enter password for repository: snapshots for (host [kasimir]) - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work - 79766175 2015-05-08 21:40:19 kasimir /home/user/work + ID Date Host Tags Directory Size + ------------------------------------------------------------------------ + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work 20.643GiB + 79766175 2015-05-08 21:40:19 kasimir /home/user/work 20.645GiB 2 snapshots snapshots for (host [luigi]) - ID Date Host Tags Directory - ---------------------------------------------------------------------- - bdbd3439 2015-05-08 21:45:17 luigi /home/art - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB 2 snapshots snapshots for (host [kazik]) - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 590c8fc8 2015-05-08 21:47:38 kazik /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + 590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB 1 snapshots diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 1c0f7a34ffc..86851c54b48 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -553,11 +553,48 @@ The snapshots command returns a single JSON object, an array with objects of the +---------------------+--------------------------------------------------+ | ``program_version`` | restic version used to create snapshot | +---------------------+--------------------------------------------------+ +| ``summary`` | Snapshot statistics, see "Summary object" | ++---------------------+--------------------------------------------------+ | ``id`` | Snapshot ID | +---------------------+--------------------------------------------------+ | ``short_id`` | Snapshot ID, short form | +---------------------+--------------------------------------------------+ +Summary object + +The contained statistics reflect the information at the point in time when the snapshot +was created. + ++---------------------------+---------------------------------------------------------+ +| ``backup_start`` | Time at which the backup was started | ++---------------------------+---------------------------------------------------------+ +| ``backup_end`` | Time at which the backup was completed | ++---------------------------+---------------------------------------------------------+ +| ``files_new`` | Number of new files | ++---------------------------+---------------------------------------------------------+ +| ``files_changed`` | Number of files that changed | ++---------------------------+---------------------------------------------------------+ +| ``files_unmodified`` | Number of files that did not change | ++---------------------------+---------------------------------------------------------+ +| ``dirs_new`` | Number of new directories | ++---------------------------+---------------------------------------------------------+ +| ``dirs_changed`` | Number of directories that changed | ++---------------------------+---------------------------------------------------------+ +| ``dirs_unmodified`` | Number of directories that did not change | ++---------------------------+---------------------------------------------------------+ +| ``data_blobs`` | Number of data blobs | ++---------------------------+---------------------------------------------------------+ +| ``tree_blobs`` | Number of tree blobs | ++---------------------------+---------------------------------------------------------+ +| ``data_added`` | Amount of (uncompressed) data added, in bytes | ++---------------------------+---------------------------------------------------------+ +| ``data_added_in_repo`` | Amount of data added (after compression), in bytes | ++---------------------------+---------------------------------------------------------+ +| ``total_files_processed`` | Total number of files processed | ++---------------------------+---------------------------------------------------------+ +| ``total_bytes_processed`` | Total number of bytes processed | ++---------------------------+---------------------------------------------------------+ + stats ----- From 09ce1b4e5811bc5e988511172198fc5ea1a3a9d9 Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Sat, 24 Feb 2024 13:16:25 -0700 Subject: [PATCH 040/893] Create helper for SecurityDescriptor related functions --- internal/fs/sd_windows.go | 471 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 471 insertions(+) create mode 100644 internal/fs/sd_windows.go diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go new file mode 100644 index 00000000000..3616317661e --- /dev/null +++ b/internal/fs/sd_windows.go @@ -0,0 +1,471 @@ +package fs + +import ( + "bytes" + "encoding/binary" + "fmt" + "sync" + "syscall" + "unicode/utf16" + "unsafe" + + "github.com/restic/restic/internal/errors" + "golang.org/x/sys/windows" +) + +// GetSecurityDescriptor takes the path of the file and returns the SecurityDescriptor for the file. +// This needs admin permissions or SeBackupPrivilege for getting the full SD. +// If there are no admin permissions, only the current user's owner, group and DACL will be got. +func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err error) { + onceBackup.Do(enableBackupPrivilege) + + var sd *windows.SECURITY_DESCRIPTOR + + if lowerPrivileges { + sd, err = getNamedSecurityInfoLow(sd, err, filePath) + } else { + sd, err = getNamedSecurityInfoHigh(sd, err, filePath) + } + if err != nil { + if isHandlePrivilegeNotHeldError(err) { + lowerPrivileges = true + sd, err = getNamedSecurityInfoLow(sd, err, filePath) + if err != nil { + return nil, fmt.Errorf("get low-level named security info failed with: %w", err) + } + } else { + return nil, fmt.Errorf("get named security info failed with: %w", err) + } + } + + sdBytes, err := securityDescriptorStructToBytes(sd) + if err != nil { + return nil, fmt.Errorf("convert security descriptor to bytes failed: %w", err) + } + return &sdBytes, nil +} + +// SetSecurityDescriptor sets the SecurityDescriptor for the file at the specified path. +// This needs admin permissions or SeRestorePrivilege, SeSecurityPrivilege and SeTakeOwnershipPrivilege +// for setting the full SD. +// If there are no admin permissions/required privileges, only the DACL from the SD can be set and +// owner and group will be set based on the current user. +func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { + onceRestore.Do(enableRestorePrivilege) + // Set the security descriptor on the file + sd, err := SecurityDescriptorBytesToStruct(*securityDescriptor) + if err != nil { + return fmt.Errorf("error converting bytes to security descriptor: %w", err) + } + + owner, _, err := sd.Owner() + if err != nil { + //Do not set partial values. + owner = nil + } + group, _, err := sd.Group() + if err != nil { + //Do not set partial values. + group = nil + } + dacl, _, err := sd.DACL() + if err != nil { + //Do not set partial values. + dacl = nil + } + sacl, _, err := sd.SACL() + if err != nil { + //Do not set partial values. + sacl = nil + } + + if lowerPrivileges { + err = setNamedSecurityInfoLow(filePath, dacl) + } else { + err = setNamedSecurityInfoHigh(filePath, owner, group, dacl, sacl) + } + + if err != nil { + if isHandlePrivilegeNotHeldError(err) { + lowerPrivileges = true + err = setNamedSecurityInfoLow(filePath, dacl) + if err != nil { + return fmt.Errorf("set low-level named security info failed with: %w", err) + } + } else { + return fmt.Errorf("set named security info failed with: %w", err) + } + } + return nil +} + +var ( + onceBackup sync.Once + onceRestore sync.Once + + // SeBackupPrivilege allows the application to bypass file and directory ACLs to back up files and directories. + SeBackupPrivilege = "SeBackupPrivilege" + // SeRestorePrivilege allows the application to bypass file and directory ACLs to restore files and directories. + SeRestorePrivilege = "SeRestorePrivilege" + // SeSecurityPrivilege allows read and write access to all SACLs. + SeSecurityPrivilege = "SeSecurityPrivilege" + // SeTakeOwnershipPrivilege allows the application to take ownership of files and directories, regardless of the permissions set on them. + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" + + backupPrivilegeError error + restorePrivilegeError error + lowerPrivileges bool +) + +// Flags for backup and restore with admin permissions +var highSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.SACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.BACKUP_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION | windows.PROTECTED_SACL_SECURITY_INFORMATION + +// Flags for backup without admin permissions. If there are no admin permissions, only the current user's owner, group and DACL will be backed up. +var lowBackupSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION + +// Flags for restore without admin permissions. If there are no admin permissions, only the DACL from the SD can be restored and owner and group will be set based on the current user. +var lowRestoreSecurityFlags windows.SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION + +// getNamedSecurityInfoHigh gets the higher level SecurityDescriptor which requires admin permissions. +func getNamedSecurityInfoHigh(sd *windows.SECURITY_DESCRIPTOR, err error, filePath string) (*windows.SECURITY_DESCRIPTOR, error) { + return windows.GetNamedSecurityInfo(filePath, windows.SE_FILE_OBJECT, highSecurityFlags) +} + +// getNamedSecurityInfoLow gets the lower level SecurityDescriptor which requires no admin permissions. +func getNamedSecurityInfoLow(sd *windows.SECURITY_DESCRIPTOR, err error, filePath string) (*windows.SECURITY_DESCRIPTOR, error) { + return windows.GetNamedSecurityInfo(filePath, windows.SE_FILE_OBJECT, lowBackupSecurityFlags) +} + +// setNamedSecurityInfoHigh sets the higher level SecurityDescriptor which requires admin permissions. +func setNamedSecurityInfoHigh(filePath string, owner *windows.SID, group *windows.SID, dacl *windows.ACL, sacl *windows.ACL) error { + return windows.SetNamedSecurityInfo(filePath, windows.SE_FILE_OBJECT, highSecurityFlags, owner, group, dacl, sacl) +} + +// setNamedSecurityInfoLow sets the lower level SecurityDescriptor which requires no admin permissions. +func setNamedSecurityInfoLow(filePath string, dacl *windows.ACL) error { + return windows.SetNamedSecurityInfo(filePath, windows.SE_FILE_OBJECT, lowRestoreSecurityFlags, nil, nil, dacl, nil) +} + +// enableBackupPrivilege enables privilege for backing up security descriptors +func enableBackupPrivilege() { + err := enableProcessPrivileges([]string{SeBackupPrivilege}) + if err != nil { + backupPrivilegeError = fmt.Errorf("error enabling backup privilege: %w", err) + } +} + +// enableBackupPrivilege enables privilege for restoring security descriptors +func enableRestorePrivilege() { + err := enableProcessPrivileges([]string{SeRestorePrivilege, SeSecurityPrivilege, SeTakeOwnershipPrivilege}) + if err != nil { + restorePrivilegeError = fmt.Errorf("error enabling restore/security privilege: %w", err) + } +} + +// DisableBackupPrivileges disables privileges that are needed for backup operations. +// They may be reenabled if GetSecurityDescriptor is called again. +func DisableBackupPrivileges() error { + //Reset the once so that backup privileges can be enabled again if needed. + onceBackup = sync.Once{} + return enableDisableProcessPrivilege([]string{SeBackupPrivilege}, 0) +} + +// DisableRestorePrivileges disables privileges that are needed for restore operations. +// They may be reenabled if SetSecurityDescriptor is called again. +func DisableRestorePrivileges() error { + //Reset the once so that restore privileges can be enabled again if needed. + onceRestore = sync.Once{} + return enableDisableProcessPrivilege([]string{SeRestorePrivilege, SeSecurityPrivilege}, 0) +} + +// isHandlePrivilegeNotHeldError checks if the error is ERROR_PRIVILEGE_NOT_HELD +func isHandlePrivilegeNotHeldError(err error) bool { + // Use a type assertion to check if the error is of type syscall.Errno + if errno, ok := err.(syscall.Errno); ok { + // Compare the error code to the expected value + return errno == windows.ERROR_PRIVILEGE_NOT_HELD + } + return false +} + +// IsAdmin checks if current user is an administrator. +func IsAdmin() (isAdmin bool, err error) { + var sid *windows.SID + err = windows.AllocateAndInitializeSid(&windows.SECURITY_NT_AUTHORITY, 2, windows.SECURITY_BUILTIN_DOMAIN_RID, windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, &sid) + if err != nil { + return false, errors.Errorf("sid error: %s", err) + } + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false, errors.Errorf("token membership error: %s", err) + } + return member, nil +} + +// The code below was adapted from github.com/Microsoft/go-winio under MIT license. + +// The MIT License (MIT) + +// Copyright (c) 2015 Microsoft + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") +) + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoErrorIOPending = 997 + + //revive:disable-next-line:var-naming ALL_CAPS + SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED + + //revive:disable-next-line:var-naming ALL_CAPS + ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED +) + +var ( + errErrorIOPending error = syscall.Errno(errnoErrorIOPending) + errErrorEinval error = syscall.EINVAL + + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +// SecurityDescriptorBytesToStruct converts the security descriptor bytes representation +// into a pointer to windows SECURITY_DESCRIPTOR. +func SecurityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, error) { + if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { + return nil, fmt.Errorf("securityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) + } + s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) + return s, nil +} + +// securityDescriptorStructToBytes converts the pointer to windows SECURITY_DESCRIPTOR +// into a security descriptor bytes representation. +func securityDescriptorStructToBytes(sd *windows.SECURITY_DESCRIPTOR) ([]byte, error) { + b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) + return b, nil +} + +// Error returns the string message for the error. +func (e *PrivilegeError) Error() string { + s := "Could not enable privilege " + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + if backupPrivilegeError != nil { + s += " backupPrivilegeError:" + backupPrivilegeError.Error() + } + if restorePrivilegeError != nil { + s += " restorePrivilegeError:" + restorePrivilegeError.Error() + } + return s +} + +func mapPrivileges(names []string) ([]uint64, error) { + privileges := make([]uint64, 0, len(names)) + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// enableProcessPrivileges enables privileges globally for the process. +func enableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p := windows.CurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer func() { + _ = token.Close() + }() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + _ = binary.Write(&b, binary.LittleEndian, p) + _ = binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } + r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if !success { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageID *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageID) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageID *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errErrorEinval + case errnoErrorIOPending: + return errErrorIOPending + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} From e3e59fef242ead47d2a227f88d73a7c638c4e9a5 Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Sat, 24 Feb 2024 13:22:34 -0700 Subject: [PATCH 041/893] Fix CombineErrors and fillExtendedAttr error handling --- internal/errors/errors.go | 27 +++++++++++++++++---------- internal/restic/node.go | 7 +------ 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/internal/errors/errors.go b/internal/errors/errors.go index 3c669f86135..68a48b325d9 100644 --- a/internal/errors/errors.go +++ b/internal/errors/errors.go @@ -43,22 +43,29 @@ func Is(x, y error) bool { return stderrors.Is(x, y) } // unwrap errors returned by [Join]. func Unwrap(err error) error { return stderrors.Unwrap(err) } -// CombineErrors combines multiple errors into a single error. -func CombineErrors(errors ...error) error { +// CombineErrors combines multiple errors into a single error after filtering out any nil values. +// If no errors are passed, it returns nil. +// If one error is passed, it simply returns that same error. +func CombineErrors(errors ...error) (err error) { var combinedErrorMsg string - - for _, err := range errors { - if err != nil { + var multipleErrors bool + for _, errVal := range errors { + if errVal != nil { if combinedErrorMsg != "" { combinedErrorMsg += "; " // Separate error messages with a delimiter + multipleErrors = true + } else { + // Set the first error + err = errVal } - combinedErrorMsg += err.Error() + combinedErrorMsg += errVal.Error() } } - if combinedErrorMsg == "" { - return nil // No errors, return nil + return nil // If no errors, return nil + } else if !multipleErrors { + return err // If only one error, return that first error + } else { + return fmt.Errorf("multiple errors occurred: [%s]", combinedErrorMsg) } - - return fmt.Errorf("multiple errors occurred: [%s]", combinedErrorMsg) } diff --git a/internal/restic/node.go b/internal/restic/node.go index cbe9ef36394..8fc06df7999 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -719,12 +719,7 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error { allowExtended, err := node.fillGenericAttributes(path, fi, stat) if allowExtended { // Skip processing ExtendedAttributes if allowExtended is false. - errEx := node.fillExtendedAttributes(path) - if err == nil { - err = errEx - } else { - debug.Log("Error filling extended attributes for %v at %v : %v", node.Name, path, errEx) - } + err = errors.CombineErrors(err, node.fillExtendedAttributes(path)) } return err } From 70cf8e37887cd574c055337f37427eca94d54052 Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Sat, 24 Feb 2024 13:25:28 -0700 Subject: [PATCH 042/893] Add support for backup/restore of security descriptors --- internal/restic/node.go | 4 +++- internal/restic/node_windows.go | 21 ++++++++++++++++++--- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/internal/restic/node.go b/internal/restic/node.go index 8fc06df7999..a0e658b9b2b 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -48,13 +48,15 @@ const ( TypeCreationTime GenericAttributeType = "windows.creation_time" // TypeFileAttributes is the GenericAttributeType used for storing file attributes for windows files within the generic attributes map. TypeFileAttributes GenericAttributeType = "windows.file_attributes" + // TypeSecurityDescriptor is the GenericAttributeType used for storing security descriptors including owner, group, discretionary access control list (DACL), system access control list (SACL)) for windows files within the generic attributes map. + TypeSecurityDescriptor GenericAttributeType = "windows.security_descriptor" // Generic Attributes for other OS types should be defined here. ) // init is called when the package is initialized. Any new GenericAttributeTypes being created must be added here as well. func init() { - storeGenericAttributeType(TypeCreationTime, TypeFileAttributes) + storeGenericAttributeType(TypeCreationTime, TypeFileAttributes, TypeSecurityDescriptor) } // genericAttributesForOS maintains a map of known genericAttributesForOS to the OSType diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 5875c3ccd89..f4797c0d73f 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -23,6 +23,9 @@ type WindowsAttributes struct { CreationTime *syscall.Filetime `generic:"creation_time"` // FileAttributes is used for storing file attributes for windows files. FileAttributes *uint32 `generic:"file_attributes"` + // SecurityDescriptor is used for storing security descriptors which includes + // owner, group, discretionary access control list (DACL), system access control list (SACL)) + SecurityDescriptor *[]byte `generic:"security_descriptor"` } var ( @@ -114,7 +117,7 @@ func (s statT) mtim() syscall.Timespec { func (s statT) ctim() syscall.Timespec { // Windows does not have the concept of a "change time" in the sense Unix uses it, so we're using the LastWriteTime here. - return syscall.NsecToTimespec(s.LastWriteTime.Nanoseconds()) + return s.mtim() } // restoreGenericAttributes restores generic attributes for Windows @@ -137,6 +140,11 @@ func (node Node) restoreGenericAttributes(path string, warn func(msg string)) (e errs = append(errs, fmt.Errorf("error restoring file attributes for: %s : %v", path, err)) } } + if windowsAttributes.SecurityDescriptor != nil { + if err := fs.SetSecurityDescriptor(path, windowsAttributes.SecurityDescriptor); err != nil { + errs = append(errs, fmt.Errorf("error restoring security descriptor for: %s : %v", path, err)) + } + } HandleUnknownGenericAttributesFound(unknownAttribs, warn) return errors.CombineErrors(errs...) @@ -270,11 +278,18 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT // Do not process file attributes and created time for windows directories like // C:, D: // Filepath.Clean(path) ends with '\' for Windows root drives only. + var sd *[]byte + if node.Type == "file" || node.Type == "dir" { + if sd, err = fs.GetSecurityDescriptor(path); err != nil { + return true, err + } + } // Add Windows attributes node.GenericAttributes, err = WindowsAttrsToGenericAttributes(WindowsAttributes{ - CreationTime: getCreationTime(fi, path), - FileAttributes: &stat.FileAttributes, + CreationTime: getCreationTime(fi, path), + FileAttributes: &stat.FileAttributes, + SecurityDescriptor: sd, }) } return true, err From 90916f53ded94c95a858458a192c0b91f96e01be Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Sat, 24 Feb 2024 13:27:01 -0700 Subject: [PATCH 043/893] Add test cases for security descriptors --- internal/fs/sd_windows_test.go | 60 ++++++++++++++ internal/fs/sd_windows_test_helpers.go | 109 +++++++++++++++++++++++++ internal/restic/node_windows_test.go | 57 +++++++++++++ 3 files changed, 226 insertions(+) create mode 100644 internal/fs/sd_windows_test.go create mode 100644 internal/fs/sd_windows_test_helpers.go diff --git a/internal/fs/sd_windows_test.go b/internal/fs/sd_windows_test.go new file mode 100644 index 00000000000..e4e37cb4a6e --- /dev/null +++ b/internal/fs/sd_windows_test.go @@ -0,0 +1,60 @@ +//go:build windows +// +build windows + +package fs + +import ( + "encoding/base64" + "os" + "path/filepath" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/test" +) + +func Test_SetGetFileSecurityDescriptors(t *testing.T) { + tempDir := t.TempDir() + testfilePath := filepath.Join(tempDir, "testfile.txt") + // create temp file + testfile, err := os.Create(testfilePath) + if err != nil { + t.Fatalf("failed to create temporary file: %s", err) + } + defer func() { + err := testfile.Close() + if err != nil { + t.Logf("Error closing file %s: %v\n", testfilePath, err) + } + }() + + testSecurityDescriptors(t, TestFileSDs, testfilePath) +} + +func Test_SetGetFolderSecurityDescriptors(t *testing.T) { + tempDir := t.TempDir() + testfolderPath := filepath.Join(tempDir, "testfolder") + // create temp folder + err := os.Mkdir(testfolderPath, os.ModeDir) + if err != nil { + t.Fatalf("failed to create temporary file: %s", err) + } + + testSecurityDescriptors(t, TestDirSDs, testfolderPath) +} + +func testSecurityDescriptors(t *testing.T, testSDs []string, testPath string) { + for _, testSD := range testSDs { + sdInputBytes, err := base64.StdEncoding.DecodeString(testSD) + test.OK(t, errors.Wrapf(err, "Error decoding SD: %s", testPath)) + + err = SetSecurityDescriptor(testPath, &sdInputBytes) + test.OK(t, errors.Wrapf(err, "Error setting file security descriptor for: %s", testPath)) + + var sdOutputBytes *[]byte + sdOutputBytes, err = GetSecurityDescriptor(testPath) + test.OK(t, errors.Wrapf(err, "Error getting file security descriptor for: %s", testPath)) + + CompareSecurityDescriptors(t, testPath, sdInputBytes, *sdOutputBytes) + } +} diff --git a/internal/fs/sd_windows_test_helpers.go b/internal/fs/sd_windows_test_helpers.go new file mode 100644 index 00000000000..8774087963e --- /dev/null +++ b/internal/fs/sd_windows_test_helpers.go @@ -0,0 +1,109 @@ +//go:build windows +// +build windows + +package fs + +import ( + "os/user" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/test" + "golang.org/x/sys/windows" +) + +var ( + TestFileSDs = []string{"AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAfAAEAAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABAUAP8BHwABAQAAAAAABRIAAAAAEBgA/wEfAAECAAAAAAAFIAAAACACAAAAECQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAA=", + "AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAyAAHAAAAAAAUAKkAEgABAQAAAAAABQcAAAAAABQAiQASAAEBAAAAAAAFBwAAAAAAJACpABIAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSar7QMAAAAAJAC/ARMAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSar6gMAAAAAFAD/AR8AAQEAAAAAAAUSAAAAAAAYAP8BHwABAgAAAAAABSAAAAAgAgAAAAAkAP8BHwABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAA", + "AQAUvBQAAAAwAAAA7AAAAEwAAAABBQAAAAAABRUAAAAvr7t03PyHGk2FokNHCAAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAoAAFAAAAAAAkAP8BHwABBQAAAAAABRUAAAAvr7t03PyHGk2FokNHCAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABAUAP8BHwABAQAAAAAABRIAAAAAEBgA/wEfAAECAAAAAAAFIAAAACACAAAAECQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAACAHQAAwAAAAKAJAC/AQIAAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDtgQAAALAJAC/AQMAAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDPgkAAAJAJAD/AQ8AAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDtQQAAA==", + } + TestDirSDs = []string{"AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAfAAEAAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABMUAP8BHwABAQAAAAAABRIAAAAAExgA/wEfAAECAAAAAAAFIAAAACACAAAAEyQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAA=", + "AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIA3AAIAAAAAAIUAKkAEgABAQAAAAAABQcAAAAAAxQAiQASAAEBAAAAAAAFBwAAAAAAJACpABIAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSar7QMAAAAAJAC/ARMAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSar6gMAAAALFAC/ARMAAQEAAAAAAAMAAAAAABMUAP8BHwABAQAAAAAABRIAAAAAExgA/wEfAAECAAAAAAAFIAAAACACAAAAEyQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAA=", + "AQAUvBQAAAAwAAAA7AAAAEwAAAABBQAAAAAABRUAAAAvr7t03PyHGk2FokNHCAAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAoAAFAAAAAAAkAP8BHwABBQAAAAAABRUAAAAvr7t03PyHGk2FokNHCAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABMUAP8BHwABAQAAAAAABRIAAAAAExgA/wEfAAECAAAAAAAFIAAAACACAAAAEyQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAACAHQAAwAAAAKAJAC/AQIAAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDtgQAAALAJAC/AQMAAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDPgkAAAJAJAD/AQ8AAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDtQQAAA==", + } +) + +// CompareSecurityDescriptors runs tests for comparing 2 security descriptors in []byte format. +func CompareSecurityDescriptors(t *testing.T, testPath string, sdInputBytes, sdOutputBytes []byte) { + sdInput, err := SecurityDescriptorBytesToStruct(sdInputBytes) + test.OK(t, errors.Wrapf(err, "Error converting SD to struct for: %s", testPath)) + + sdOutput, err := SecurityDescriptorBytesToStruct(sdOutputBytes) + test.OK(t, errors.Wrapf(err, "Error converting SD to struct for: %s", testPath)) + + isAdmin, err := IsAdmin() + test.OK(t, errors.Wrapf(err, "Error checking if user is admin: %s", testPath)) + + var ownerExpected *windows.SID + var defaultedOwnerExpected bool + var groupExpected *windows.SID + var defaultedGroupExpected bool + var daclExpected *windows.ACL + var defaultedDaclExpected bool + var saclExpected *windows.ACL + var defaultedSaclExpected bool + + // The Dacl is set correctly whether or not application is running as admin. + daclExpected, defaultedDaclExpected, err = sdInput.DACL() + test.OK(t, errors.Wrapf(err, "Error getting input dacl for: %s", testPath)) + + if isAdmin { + // If application is running as admin, all sd values including owner, group, dacl, sacl are set correctly during restore. + // Hence we will use the input values for comparison with the output values. + ownerExpected, defaultedOwnerExpected, err = sdInput.Owner() + test.OK(t, errors.Wrapf(err, "Error getting input owner for: %s", testPath)) + groupExpected, defaultedGroupExpected, err = sdInput.Group() + test.OK(t, errors.Wrapf(err, "Error getting input group for: %s", testPath)) + saclExpected, defaultedSaclExpected, err = sdInput.SACL() + test.OK(t, errors.Wrapf(err, "Error getting input sacl for: %s", testPath)) + } else { + // If application is not running as admin, owner and group are set as current user's SID/GID during restore and sacl is empty. + // Get the current user + user, err := user.Current() + test.OK(t, errors.Wrapf(err, "Could not get current user for: %s", testPath)) + // Get current user's SID + currentUserSID, err := windows.StringToSid(user.Uid) + test.OK(t, errors.Wrapf(err, "Error getting output group for: %s", testPath)) + // Get current user's Group SID + currentGroupSID, err := windows.StringToSid(user.Gid) + test.OK(t, errors.Wrapf(err, "Error getting output group for: %s", testPath)) + + // Set owner and group as current user's SID and GID during restore. + ownerExpected = currentUserSID + defaultedOwnerExpected = false + groupExpected = currentGroupSID + defaultedGroupExpected = false + + // If application is not running as admin, SACL is returned empty. + saclExpected = nil + defaultedSaclExpected = false + } + // Now do all the comparisons + // Get owner SID from output file + ownerOut, defaultedOwnerOut, err := sdOutput.Owner() + test.OK(t, errors.Wrapf(err, "Error getting output owner for: %s", testPath)) + // Compare owner SIDs. We must use the Equals method for comparison as a syscall is made for comparing SIDs. + test.Assert(t, ownerExpected.Equals(ownerOut), "Owner from SDs read from test path don't match: %s, cur:%s, exp: %s", testPath, ownerExpected.String(), ownerOut.String()) + test.Equals(t, defaultedOwnerExpected, defaultedOwnerOut, "Defaulted for owner from SDs read from test path don't match: %s", testPath) + + // Get group SID from output file + groupOut, defaultedGroupOut, err := sdOutput.Group() + test.OK(t, errors.Wrapf(err, "Error getting output group for: %s", testPath)) + // Compare group SIDs. We must use the Equals method for comparison as a syscall is made for comparing SIDs. + test.Assert(t, groupExpected.Equals(groupOut), "Group from SDs read from test path don't match: %s, cur:%s, exp: %s", testPath, groupExpected.String(), groupOut.String()) + test.Equals(t, defaultedGroupExpected, defaultedGroupOut, "Defaulted for group from SDs read from test path don't match: %s", testPath) + + // Get dacl from output file + daclOut, defaultedDaclOut, err := sdOutput.DACL() + test.OK(t, errors.Wrapf(err, "Error getting output dacl for: %s", testPath)) + // Compare dacls + test.Equals(t, daclExpected, daclOut, "DACL from SDs read from test path don't match: %s", testPath) + test.Equals(t, defaultedDaclExpected, defaultedDaclOut, "Defaulted for DACL from SDs read from test path don't match: %s", testPath) + + // Get sacl from output file + saclOut, defaultedSaclOut, err := sdOutput.SACL() + test.OK(t, errors.Wrapf(err, "Error getting output sacl for: %s", testPath)) + // Compare sacls + test.Equals(t, saclExpected, saclOut, "DACL from SDs read from test path don't match: %s", testPath) + test.Equals(t, defaultedSaclExpected, defaultedSaclOut, "Defaulted for SACL from SDs read from test path don't match: %s", testPath) +} diff --git a/internal/restic/node_windows_test.go b/internal/restic/node_windows_test.go index 501d5a98a26..5fd1fe37621 100644 --- a/internal/restic/node_windows_test.go +++ b/internal/restic/node_windows_test.go @@ -4,6 +4,7 @@ package restic import ( + "encoding/base64" "encoding/json" "fmt" "os" @@ -12,10 +13,66 @@ import ( "testing" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/test" "golang.org/x/sys/windows" ) +func TestRestoreSecurityDescriptors(t *testing.T) { + t.Parallel() + tempDir := t.TempDir() + for i, sd := range fs.TestFileSDs { + testRestoreSecurityDescriptor(t, sd, tempDir, "file", fmt.Sprintf("testfile%d", i)) + } + for i, sd := range fs.TestDirSDs { + testRestoreSecurityDescriptor(t, sd, tempDir, "dir", fmt.Sprintf("testdir%d", i)) + } +} + +func testRestoreSecurityDescriptor(t *testing.T, sd string, tempDir, fileType, fileName string) { + // Decode the encoded string SD to get the security descriptor input in bytes. + sdInputBytes, err := base64.StdEncoding.DecodeString(sd) + test.OK(t, errors.Wrapf(err, "Error decoding SD for: %s", fileName)) + // Wrap the security descriptor bytes in windows attributes and convert to generic attributes. + genericAttributes, err := WindowsAttrsToGenericAttributes(WindowsAttributes{CreationTime: nil, FileAttributes: nil, SecurityDescriptor: &sdInputBytes}) + test.OK(t, errors.Wrapf(err, "Error constructing windows attributes for: %s", fileName)) + // Construct a Node with the generic attributes. + expectedNode := getNode(fileName, fileType, genericAttributes) + + // Restore the file/dir and restore the meta data including the security descriptors. + testPath, node := restoreAndGetNode(t, tempDir, expectedNode, false) + // Get the security descriptor from the node constructed from the file info of the restored path. + sdByteFromRestoredNode := getWindowsAttr(t, testPath, node).SecurityDescriptor + + // Get the security descriptor for the test path after the restore. + sdBytesFromRestoredPath, err := fs.GetSecurityDescriptor(testPath) + test.OK(t, errors.Wrapf(err, "Error while getting the security descriptor for: %s", testPath)) + + // Compare the input SD and the SD got from the restored file. + fs.CompareSecurityDescriptors(t, testPath, sdInputBytes, *sdBytesFromRestoredPath) + // Compare the SD got from node constructed from the restored file info and the SD got directly from the restored file. + fs.CompareSecurityDescriptors(t, testPath, *sdByteFromRestoredNode, *sdBytesFromRestoredPath) +} + +func getNode(name string, fileType string, genericAttributes map[GenericAttributeType]json.RawMessage) Node { + return Node{ + Name: name, + Type: fileType, + Mode: 0644, + ModTime: parseTime("2024-02-21 6:30:01.111"), + AccessTime: parseTime("2024-02-22 7:31:02.222"), + ChangeTime: parseTime("2024-02-23 8:32:03.333"), + GenericAttributes: genericAttributes, + } +} + +func getWindowsAttr(t *testing.T, testPath string, node *Node) WindowsAttributes { + windowsAttributes, unknownAttribs, err := genericAttributesToWindowsAttrs(node.GenericAttributes) + test.OK(t, errors.Wrapf(err, "Error getting windows attr from generic attr: %s", testPath)) + test.Assert(t, len(unknownAttribs) == 0, "Unkown attribs found: %s for: %s", unknownAttribs, testPath) + return windowsAttributes +} + func TestRestoreCreationTime(t *testing.T) { t.Parallel() path := t.TempDir() From c0a1b9ada51f0d4f03ebd05e45b18b72c1c1142d Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Sat, 24 Feb 2024 13:28:18 -0700 Subject: [PATCH 044/893] Update docs for security descriptors --- changelog/unreleased/pull-4611 | 2 +- doc/040_backup.rst | 7 ++++++- doc/050_restore.rst | 5 +++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/changelog/unreleased/pull-4611 b/changelog/unreleased/pull-4611 index 940de9c26fc..a3c7a24d0c8 100644 --- a/changelog/unreleased/pull-4611 +++ b/changelog/unreleased/pull-4611 @@ -1,7 +1,7 @@ Enhancement: Back up windows created time and file attributes like hidden flag Restic did not back up windows-specific meta-data like created time and file attributes like hidden flag. -Restic now backs up file created time and file attributes like hidden, readonly and encrypted flag when backing up files and folders on windows. +Restic now backs up file created time and file attributes like hidden, readonly and encrypted flag when backing up files and folders on Windows. https://github.com/restic/restic/pull/4611 diff --git a/doc/040_backup.rst b/doc/040_backup.rst index d0bd4b2e2c6..b697e38bda2 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -481,12 +481,17 @@ written, and the next backup needs to write new metadata again. If you really want to save the access time for files and directories, you can pass the ``--with-atime`` option to the ``backup`` command. +Backing up full security descriptors on windows is only possible when the user +has ``SeBackupPrivilege``privilege or is running as admin. This is a restriction +of windows not restic. +If either of these conditions are not met, only the owner, group and DACL will +be backed up. + Note that ``restic`` does not back up some metadata associated with files. Of particular note are: * File creation date on Unix platforms * Inode flags on Unix platforms -* File ownership and ACLs on Windows Reading data from a command *************************** diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 916b11c8673..5ab0286f10b 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -72,6 +72,11 @@ Restoring symbolic links on windows is only possible when the user has ``SeCreateSymbolicLinkPrivilege`` privilege or is running as admin. This is a restriction of windows not restic. +Restoring full security descriptors on windows is only possible when the user has +``SeRestorePrivilege``, ``SeSecurityPrivilege`` and ``SeTakeOwnershipPrivilege`` +privilege or is running as admin. This is a restriction of windows not restic. +If either of these conditions are not met, only the DACL will be restored. + By default, restic does not restore files as sparse. Use ``restore --sparse`` to enable the creation of sparse files if supported by the filesystem. Then restic will restore long runs of zero bytes as holes in the corresponding files. From 5764300022f7afceb2e54babe5ffed6e56b7aff9 Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Sat, 24 Feb 2024 13:47:49 -0700 Subject: [PATCH 045/893] Add changelog and fix lint error --- changelog/unreleased/pull-4708 | 12 ++++++++++++ internal/errors/errors.go | 3 +-- 2 files changed, 13 insertions(+), 2 deletions(-) create mode 100644 changelog/unreleased/pull-4708 diff --git a/changelog/unreleased/pull-4708 b/changelog/unreleased/pull-4708 new file mode 100644 index 00000000000..2c666c30044 --- /dev/null +++ b/changelog/unreleased/pull-4708 @@ -0,0 +1,12 @@ +Enhancement: Back up and restore SecurityDescriptors on Windows + +Restic did not back up SecurityDescriptors of files on Windows. +Restic now backs up and restores SecurityDescriptors (which includes owner, group, +discretionary access control list (DACL), system access control list (SACL)) +when backing up files and folders on Windows. This requires the user to be +a member of backup operators or the application must be run as admin. +If that is not the case, only the current user's owner, group and DACL will be backed up +and during restore only the DACL of the backed file will be restored while the current +user's owner and group will be set during the restore. + +https://github.com/restic/restic/pull/4708 diff --git a/internal/errors/errors.go b/internal/errors/errors.go index 68a48b325d9..ca36611ebe8 100644 --- a/internal/errors/errors.go +++ b/internal/errors/errors.go @@ -65,7 +65,6 @@ func CombineErrors(errors ...error) (err error) { return nil // If no errors, return nil } else if !multipleErrors { return err // If only one error, return that first error - } else { - return fmt.Errorf("multiple errors occurred: [%s]", combinedErrorMsg) } + return fmt.Errorf("multiple errors occurred: [%s]", combinedErrorMsg) } From 062d40898723997ccb63acf8c36fb6a25c3f0aa7 Mon Sep 17 00:00:00 2001 From: Aneesh Nireshwalia <99904+aneesh-n@users.noreply.github.com> Date: Sat, 24 Feb 2024 14:23:04 -0700 Subject: [PATCH 046/893] Clean up SecurityDescriptor helper --- internal/fs/sd_windows.go | 58 +++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index 3616317661e..9d53b39748d 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -13,6 +13,33 @@ import ( "golang.org/x/sys/windows" ) +var ( + onceBackup sync.Once + onceRestore sync.Once + + // SeBackupPrivilege allows the application to bypass file and directory ACLs to back up files and directories. + SeBackupPrivilege = "SeBackupPrivilege" + // SeRestorePrivilege allows the application to bypass file and directory ACLs to restore files and directories. + SeRestorePrivilege = "SeRestorePrivilege" + // SeSecurityPrivilege allows read and write access to all SACLs. + SeSecurityPrivilege = "SeSecurityPrivilege" + // SeTakeOwnershipPrivilege allows the application to take ownership of files and directories, regardless of the permissions set on them. + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" + + backupPrivilegeError error + restorePrivilegeError error + lowerPrivileges bool +) + +// Flags for backup and restore with admin permissions +var highSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.SACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.BACKUP_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION | windows.PROTECTED_SACL_SECURITY_INFORMATION + +// Flags for backup without admin permissions. If there are no admin permissions, only the current user's owner, group and DACL will be backed up. +var lowBackupSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION + +// Flags for restore without admin permissions. If there are no admin permissions, only the DACL from the SD can be restored and owner and group will be set based on the current user. +var lowRestoreSecurityFlags windows.SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION + // GetSecurityDescriptor takes the path of the file and returns the SecurityDescriptor for the file. // This needs admin permissions or SeBackupPrivilege for getting the full SD. // If there are no admin permissions, only the current user's owner, group and DACL will be got. @@ -87,6 +114,7 @@ func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { if err != nil { if isHandlePrivilegeNotHeldError(err) { + // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. lowerPrivileges = true err = setNamedSecurityInfoLow(filePath, dacl) if err != nil { @@ -99,33 +127,6 @@ func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { return nil } -var ( - onceBackup sync.Once - onceRestore sync.Once - - // SeBackupPrivilege allows the application to bypass file and directory ACLs to back up files and directories. - SeBackupPrivilege = "SeBackupPrivilege" - // SeRestorePrivilege allows the application to bypass file and directory ACLs to restore files and directories. - SeRestorePrivilege = "SeRestorePrivilege" - // SeSecurityPrivilege allows read and write access to all SACLs. - SeSecurityPrivilege = "SeSecurityPrivilege" - // SeTakeOwnershipPrivilege allows the application to take ownership of files and directories, regardless of the permissions set on them. - SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" - - backupPrivilegeError error - restorePrivilegeError error - lowerPrivileges bool -) - -// Flags for backup and restore with admin permissions -var highSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.SACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.BACKUP_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION | windows.PROTECTED_SACL_SECURITY_INFORMATION - -// Flags for backup without admin permissions. If there are no admin permissions, only the current user's owner, group and DACL will be backed up. -var lowBackupSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION - -// Flags for restore without admin permissions. If there are no admin permissions, only the DACL from the SD can be restored and owner and group will be set based on the current user. -var lowRestoreSecurityFlags windows.SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION - // getNamedSecurityInfoHigh gets the higher level SecurityDescriptor which requires admin permissions. func getNamedSecurityInfoHigh(sd *windows.SECURITY_DESCRIPTOR, err error, filePath string) (*windows.SECURITY_DESCRIPTOR, error) { return windows.GetNamedSecurityInfo(filePath, windows.SE_FILE_OBJECT, highSecurityFlags) @@ -464,8 +465,5 @@ func errnoErr(e syscall.Errno) error { case errnoErrorIOPending: return errErrorIOPending } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } From e71660cd1e5e990476911201bd336dafbaa13bdf Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 25 Feb 2024 20:40:52 +0100 Subject: [PATCH 047/893] backup: rename data_added_in_repo statistic to data_added_packed --- doc/075_scripting.rst | 4 ++-- internal/archiver/archiver.go | 2 +- internal/archiver/archiver_test.go | 2 +- internal/restic/snapshot.go | 2 +- internal/ui/backup/json.go | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 86851c54b48..d51516cbe71 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -165,7 +165,7 @@ Summary is the last output line in a successful backup. +---------------------------+---------------------------------------------------------+ | ``data_added`` | Amount of (uncompressed) data added, in bytes | +---------------------------+---------------------------------------------------------+ -| ``data_added_in_repo`` | Amount of data added (after compression), in bytes | +| ``data_added_packed`` | Amount of data added (after compression), in bytes | +---------------------------+---------------------------------------------------------+ | ``total_files_processed`` | Total number of files processed | +---------------------------+---------------------------------------------------------+ @@ -588,7 +588,7 @@ was created. +---------------------------+---------------------------------------------------------+ | ``data_added`` | Amount of (uncompressed) data added, in bytes | +---------------------------+---------------------------------------------------------+ -| ``data_added_in_repo`` | Amount of data added (after compression), in bytes | +| ``data_added_packed`` | Amount of data added (after compression), in bytes | +---------------------------+---------------------------------------------------------+ | ``total_files_processed`` | Total number of files processed | +---------------------------+---------------------------------------------------------+ diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 39b05d0e606..73cd7a12e43 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -882,7 +882,7 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps DataBlobs: arch.summary.ItemStats.DataBlobs, TreeBlobs: arch.summary.ItemStats.TreeBlobs, DataAdded: arch.summary.ItemStats.DataSize + arch.summary.ItemStats.TreeSize, - DataAddedInRepo: arch.summary.ItemStats.DataSizeInRepo + arch.summary.ItemStats.TreeSizeInRepo, + DataAddedPacked: arch.summary.ItemStats.DataSizeInRepo + arch.summary.ItemStats.TreeSizeInRepo, TotalFilesProcessed: arch.summary.Files.New + arch.summary.Files.Changed + arch.summary.Files.Unchanged, TotalBytesProcessed: arch.summary.ProcessedBytes, } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index ac0ffa6d08f..5391a10360c 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1649,7 +1649,7 @@ func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) { bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs)) bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs)) bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded)) - bothZeroOrNeither(t, uint64(stat.DataSizeInRepo+stat.TreeSizeInRepo), uint64(sn.Summary.DataAddedInRepo)) + bothZeroOrNeither(t, uint64(stat.DataSizeInRepo+stat.TreeSizeInRepo), uint64(sn.Summary.DataAddedPacked)) } func TestArchiverParent(t *testing.T) { diff --git a/internal/restic/snapshot.go b/internal/restic/snapshot.go index 5ee3088798f..39ed806276a 100644 --- a/internal/restic/snapshot.go +++ b/internal/restic/snapshot.go @@ -45,7 +45,7 @@ type SnapshotSummary struct { DataBlobs int `json:"data_blobs"` TreeBlobs int `json:"tree_blobs"` DataAdded uint64 `json:"data_added"` - DataAddedInRepo uint64 `json:"data_added_in_repo"` + DataAddedPacked uint64 `json:"data_added_packed"` TotalFilesProcessed uint `json:"total_files_processed"` TotalBytesProcessed uint64 `json:"total_bytes_processed"` } diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index b3a8b44a792..a14c7ccecaf 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -175,7 +175,7 @@ func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *ar DataBlobs: summary.ItemStats.DataBlobs, TreeBlobs: summary.ItemStats.TreeBlobs, DataAdded: summary.ItemStats.DataSize + summary.ItemStats.TreeSize, - DataAddedInRepo: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo, + DataAddedPacked: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo, TotalFilesProcessed: summary.Files.New + summary.Files.Changed + summary.Files.Unchanged, TotalBytesProcessed: summary.ProcessedBytes, TotalDuration: time.Since(start).Seconds(), @@ -231,7 +231,7 @@ type summaryOutput struct { DataBlobs int `json:"data_blobs"` TreeBlobs int `json:"tree_blobs"` DataAdded uint64 `json:"data_added"` - DataAddedInRepo uint64 `json:"data_added_in_repo"` + DataAddedPacked uint64 `json:"data_added_packed"` TotalFilesProcessed uint `json:"total_files_processed"` TotalBytesProcessed uint64 `json:"total_bytes_processed"` TotalDuration float64 `json:"total_duration"` // in seconds From e1a588b75c97d64ef293b7d78944fad8ddc13e43 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:17:20 +0000 Subject: [PATCH 048/893] build(deps): bump docker/login-action Bumps [docker/login-action](https://github.com/docker/login-action) from 3d58c274f17dffee475a5520cbe67f0a882c4dbb to 5139682d94efc37792e6b54386b5b470a68a4737. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/3d58c274f17dffee475a5520cbe67f0a882c4dbb...5139682d94efc37792e6b54386b5b470a68a4737) --- updated-dependencies: - dependency-name: docker/login-action dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a1976784930..d9df0b8ba75 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -25,7 +25,7 @@ jobs: uses: actions/checkout@v4 - name: Log in to the Container registry - uses: docker/login-action@3d58c274f17dffee475a5520cbe67f0a882c4dbb + uses: docker/login-action@5139682d94efc37792e6b54386b5b470a68a4737 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} From 70c8aaa303fa555dc2099984941b100e61b8b36a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:17:27 +0000 Subject: [PATCH 049/893] build(deps): bump golangci/golangci-lint-action from 3 to 4 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3 to 4. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v3...v4) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 091d42e8a8c..134656d8a3b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -257,7 +257,7 @@ jobs: uses: actions/checkout@v4 - name: golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. version: v1.56.1 From f185c80cf04c1464f139051f4ec7d99caa9a4542 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:24:46 +0000 Subject: [PATCH 050/893] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/storage/azblob Bumps [github.com/Azure/azure-sdk-for-go/sdk/storage/azblob](https://github.com/Azure/azure-sdk-for-go) from 1.2.1 to 1.3.1. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azidentity/v1.2.1...sdk/azcore/v1.3.1) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/storage/azblob dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 16 ++++++++-------- go.sum | 33 ++++++++++++++++----------------- 2 files changed, 24 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index afcbc427b99..31c0c6b27c7 100644 --- a/go.mod +++ b/go.mod @@ -2,9 +2,9 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.37.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.2.0 github.com/cenkalti/backoff/v4 v4.2.1 @@ -41,20 +41,20 @@ require ( cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.5 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect github.com/google/s2a-go v0.1.7 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -66,7 +66,7 @@ require ( github.com/minio/md5-simd v1.1.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect diff --git a/go.sum b/go.sum index fb70ac9d518..8bc8593c9c7 100644 --- a/go.sum +++ b/go.sum @@ -9,17 +9,17 @@ cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4= cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s= github.com/Backblaze/blazer v0.6.1/go.mod h1:7/jrGx4O6OKOto6av+hLwelPR8rwZ+PLxQ5ZOiYAjwY= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -67,8 +67,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -104,8 +104,8 @@ github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0Z github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= @@ -142,8 +142,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk= github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= @@ -241,7 +241,6 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From b5a9b5d0bc8ddce2ad3da3d985206799127df9e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:25:00 +0000 Subject: [PATCH 051/893] build(deps): bump github.com/klauspost/compress from 1.17.6 to 1.17.7 Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.17.6 to 1.17.7. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.17.6...v1.17.7) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index afcbc427b99..60d01810270 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/go-ole/go-ole v1.3.0 github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/klauspost/compress v1.17.6 + github.com/klauspost/compress v1.17.7 github.com/minio/minio-go/v7 v7.0.66 github.com/minio/sha256-simd v1.0.1 github.com/ncw/swift/v2 v2.0.2 diff --git a/go.sum b/go.sum index fb70ac9d518..d8d2d69b51b 100644 --- a/go.sum +++ b/go.sum @@ -117,8 +117,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= From 79e8ddac3ffce1d62e2f7affa7e7182ace104c75 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:25:41 +0000 Subject: [PATCH 052/893] build(deps): bump github.com/spf13/cobra from 1.7.0 to 1.8.0 Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.7.0 to 1.8.0. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.7.0...v1.8.0) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index afcbc427b99..fa4f40e7838 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/pkg/sftp v1.13.6 github.com/pkg/xattr v0.4.10-0.20221120235825-35026bbbd013 github.com/restic/chunker v0.4.0 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 golang.org/x/crypto v0.18.0 diff --git a/go.sum b/go.sum index fb70ac9d518..8a5c5f91555 100644 --- a/go.sum +++ b/go.sum @@ -37,7 +37,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -165,8 +164,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc= From 8e7f29ae28ca7eb7916dc0989303ddd65a14c6f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:25:47 +0000 Subject: [PATCH 053/893] build(deps): bump golang.org/x/oauth2 from 0.16.0 to 0.17.0 Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.16.0 to 0.17.0. - [Commits](https://github.com/golang/oauth2/compare/v0.16.0...v0.17.0) --- updated-dependencies: - dependency-name: golang.org/x/oauth2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index afcbc427b99..be0c9018601 100644 --- a/go.mod +++ b/go.mod @@ -25,12 +25,12 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.18.0 - golang.org/x/net v0.20.0 - golang.org/x/oauth2 v0.16.0 + golang.org/x/crypto v0.19.0 + golang.org/x/net v0.21.0 + golang.org/x/oauth2 v0.17.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.16.0 - golang.org/x/term v0.16.0 + golang.org/x/sys v0.17.0 + golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 google.golang.org/api v0.157.0 diff --git a/go.sum b/go.sum index fb70ac9d518..b7745bf8653 100644 --- a/go.sum +++ b/go.sum @@ -203,8 +203,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -222,11 +222,11 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -249,13 +249,13 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= From b48b1fa2c9a81f5cfd88841025510e7f7f2cb9ac Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 3 Mar 2024 16:18:13 +0530 Subject: [PATCH 054/893] docker: update the base image to golang:1.22-alpine --- docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 978da796040..02b53261ffa 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20-alpine AS builder +FROM golang:1.22-alpine AS builder WORKDIR /go/src/github.com/restic/restic From 608116817b3025bcc9539e4aff64c8badfc8fd23 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Mar 2024 13:59:23 +0000 Subject: [PATCH 055/893] build(deps): bump cloud.google.com/go/storage from 1.37.0 to 1.39.0 Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.37.0 to 1.39.0. - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.37.0...spanner/v1.39.0) --- updated-dependencies: - dependency-name: cloud.google.com/go/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 30 ++++++++++++++-------------- go.sum | 62 +++++++++++++++++++++++++++++----------------------------- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/go.mod b/go.mod index a7381e07a9b..6e546974e5c 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,7 @@ module github.com/restic/restic require ( - cloud.google.com/go/storage v1.37.0 + cloud.google.com/go/storage v1.39.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 @@ -33,21 +33,21 @@ require ( golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.157.0 + google.golang.org/api v0.166.0 ) require ( cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect + cloud.google.com/go/iam v1.1.6 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -56,7 +56,7 @@ require ( github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect @@ -71,16 +71,16 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect + go.opentelemetry.io/otel v1.23.0 // indirect + go.opentelemetry.io/otel/metric v1.23.0 // indirect + go.opentelemetry.io/otel/trace v1.23.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/grpc v1.60.1 // indirect + google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/grpc v1.61.1 // indirect google.golang.org/protobuf v1.32.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index dc62f67c952..668d6a339de 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,14 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4= -cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= +cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= +cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/storage v1.39.0 h1:brbjUa4hbDHhpQf48tjqMaXEV+f1OGoaTmQau9tmCsA= +cloud.google.com/go/storage v1.39.0/go.mod h1:OAEj/WZwUYjA3YHQ10/YcN9ttGuEpLwvaoyBXIPikEk= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= @@ -36,7 +36,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -60,8 +60,8 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= @@ -107,8 +107,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= +github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= @@ -184,17 +184,17 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= +go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= +go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= +go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= +go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= +go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -275,8 +275,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -google.golang.org/api v0.157.0 h1:ORAeqmbrrozeyw5NjnMxh7peHO0UzV4wWYSwZeCUb20= -google.golang.org/api v0.157.0/go.mod h1:+z4v4ufbZ1WEpld6yMGHyggs+PmAHiaLNj5ytP3N01g= +google.golang.org/api v0.166.0 h1:6m4NUwrZYhAaVIHZWxaKjw1L1vNAjtMwORmKRyEEo24= +google.golang.org/api v0.166.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -284,19 +284,19 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k= -google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 h1:KHBtwE+eQc3+NxpjmRFlQ3pJQ2FNnhhgB9xOV8kyBuU= -google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c h1:9g7erC9qu44ks7UK4gDNlnk4kOxZG707xKm4jVniy6o= +google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 5974a7949777bb0b17ec39b5ccadfb29897f2358 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 28 Jan 2024 16:15:32 +0100 Subject: [PATCH 056/893] features: add basic feature flag implementation --- cmd/restic/cmd_features.go | 51 +++++++++++++ internal/feature/features.go | 140 +++++++++++++++++++++++++++++++++++ internal/feature/registry.go | 15 ++++ internal/feature/testing.go | 29 ++++++++ 4 files changed, 235 insertions(+) create mode 100644 cmd/restic/cmd_features.go create mode 100644 internal/feature/features.go create mode 100644 internal/feature/registry.go create mode 100644 internal/feature/testing.go diff --git a/cmd/restic/cmd_features.go b/cmd/restic/cmd_features.go new file mode 100644 index 00000000000..b1544b9d8b3 --- /dev/null +++ b/cmd/restic/cmd_features.go @@ -0,0 +1,51 @@ +package main + +import ( + "fmt" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" + "github.com/restic/restic/internal/ui/table" + + "github.com/spf13/cobra" +) + +// FIXME explain semantics + +var featuresCmd = &cobra.Command{ + Use: "features", + Short: "Print list of feature flags", + Long: ` +The "features" command prints a list of supported feature flags. + +EXIT STATUS +=========== + +Exit status is 0 if the command was successful, and non-zero if there was any error. +`, + Hidden: true, + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.Fatal("the feature command expects no arguments") + } + + fmt.Printf("All Feature Flags:\n") + flags := feature.Flag.List() + + tab := table.New() + tab.AddColumn("Name", "{{ .Name }}") + tab.AddColumn("Type", "{{ .Type }}") + tab.AddColumn("Default", "{{ .Default }}") + tab.AddColumn("Description", "{{ .Description }}") + + for _, flag := range flags { + tab.AddRow(flag) + } + return tab.Write(globalOptions.stdout) + }, +} + +func init() { + cmdRoot.AddCommand(featuresCmd) +} diff --git a/internal/feature/features.go b/internal/feature/features.go new file mode 100644 index 00000000000..1e1f3785c9d --- /dev/null +++ b/internal/feature/features.go @@ -0,0 +1,140 @@ +package feature + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +type state string +type FlagName string + +const ( + // Alpha features are disabled by default. They do not guarantee any backwards compatibility and may change in arbitrary ways between restic versions. + Alpha state = "alpha" + // Beta features are enabled by default. They may still change, but incompatible changes should be avoided. + Beta state = "beta" + // Stable features are always enabled + Stable state = "stable" + // Deprecated features are always disabled + Deprecated state = "deprecated" +) + +type FlagDesc struct { + Type state + Description string +} + +type FlagSet struct { + flags map[FlagName]*FlagDesc + enabled map[FlagName]bool +} + +func New() *FlagSet { + return &FlagSet{} +} + +func getDefault(phase state) bool { + switch phase { + case Alpha, Deprecated: + return false + case Beta, Stable: + return true + default: + panic("unknown feature phase") + } +} + +func (f *FlagSet) SetFlags(flags map[FlagName]FlagDesc) { + f.flags = map[FlagName]*FlagDesc{} + f.enabled = map[FlagName]bool{} + + for name, flag := range flags { + fcopy := flag + f.flags[name] = &fcopy + f.enabled[name] = getDefault(fcopy.Type) + } +} + +func (f *FlagSet) Apply(flags string) error { + if flags == "" { + return nil + } + + selection := make(map[string]bool) + + for _, flag := range strings.Split(flags, ",") { + parts := strings.SplitN(flag, "=", 2) + + name := parts[0] + value := "true" + if len(parts) == 2 { + value = parts[1] + } + + isEnabled, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("failed to parse value %q for feature flag %v: %w", value, name, err) + } + + selection[name] = isEnabled + } + + for name, value := range selection { + fname := FlagName(name) + flag := f.flags[fname] + if flag == nil { + return fmt.Errorf("unknown feature flag %q", name) + } + + switch flag.Type { + case Alpha, Beta: + f.enabled[fname] = value + case Stable: + // FIXME print warning + case Deprecated: + // FIXME print warning + default: + panic("unknown feature phase") + } + } + + return nil +} + +func (f *FlagSet) Enabled(name FlagName) bool { + isEnabled, ok := f.enabled[name] + if !ok { + panic(fmt.Sprintf("unknown feature flag %v", name)) + } + + return isEnabled +} + +// Help contains information about a feature. +type Help struct { + Name string + Type string + Default bool + Description string +} + +func (f *FlagSet) List() []Help { + var help []Help + + for name, flag := range f.flags { + help = append(help, Help{ + Name: string(name), + Type: string(flag.Type), + Default: getDefault(flag.Type), + Description: flag.Description, + }) + } + + sort.Slice(help, func(i, j int) bool { + return strings.Compare(help[i].Name, help[j].Name) < 0 + }) + + return help +} diff --git a/internal/feature/registry.go b/internal/feature/registry.go new file mode 100644 index 00000000000..7a9cbf560c5 --- /dev/null +++ b/internal/feature/registry.go @@ -0,0 +1,15 @@ +package feature + +// Flag is named such that checking for a feature uses `feature.Flag.Enabled(feature.ExampleFeature)`. +var Flag = New() + +// flag names are written in kebab-case +const ( + ExampleFeature FlagName = "example-feature" +) + +func init() { + Flag.SetFlags(map[FlagName]FlagDesc{ + ExampleFeature: {Type: Alpha, Description: "just for testing"}, + }) +} diff --git a/internal/feature/testing.go b/internal/feature/testing.go new file mode 100644 index 00000000000..c13f5250932 --- /dev/null +++ b/internal/feature/testing.go @@ -0,0 +1,29 @@ +package feature + +import ( + "fmt" + "testing" +) + +// TestSetFlag temporarily sets a feature flag to the given value until the +// returned function is called. +// +// Usage +// ``` +// defer TestSetFlag(t, features.Flags, features.ExampleFlag, true)() +// ``` +func TestSetFlag(t *testing.T, f *FlagSet, flag FlagName, value bool) func() { + current := f.Enabled(flag) + + if err := f.Apply(fmt.Sprintf("%s=%v", flag, value)); err != nil { + // not reachable + panic(err) + } + + return func() { + if err := f.Apply(fmt.Sprintf("%s=%v", flag, current)); err != nil { + // not reachable + panic(err) + } + } +} From 1c77c51a03f583d2e5169665e74b47668a2fe35c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 28 Jan 2024 16:21:22 +0100 Subject: [PATCH 057/893] features: initialize based on RESTIC_FEATURES environment variable --- cmd/restic/main.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/restic/main.go b/cmd/restic/main.go index b31ce1bb400..1a11abc4099 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -14,6 +14,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/options" "github.com/restic/restic/internal/restic" ) @@ -103,10 +104,16 @@ func main() { // we can show the logs log.SetOutput(logBuffer) + err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES")) + if err != nil { + fmt.Fprintln(os.Stderr, err) + Exit(1) + } + debug.Log("main %#v", os.Args) debug.Log("restic %s compiled with %v on %v/%v", version, runtime.Version(), runtime.GOOS, runtime.GOARCH) - err := cmdRoot.ExecuteContext(internalGlobalCtx) + err = cmdRoot.ExecuteContext(internalGlobalCtx) switch { case restic.IsAlreadyLocked(err): From 70839155f247c74935d2ee900790a3a8fc4099d2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 12 Feb 2024 21:34:37 +0100 Subject: [PATCH 058/893] features: add tests --- internal/feature/features_test.go | 139 ++++++++++++++++++++++++++++++ internal/feature/testing_test.go | 19 ++++ 2 files changed, 158 insertions(+) create mode 100644 internal/feature/features_test.go create mode 100644 internal/feature/testing_test.go diff --git a/internal/feature/features_test.go b/internal/feature/features_test.go new file mode 100644 index 00000000000..3611ac99855 --- /dev/null +++ b/internal/feature/features_test.go @@ -0,0 +1,139 @@ +package feature_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/restic/restic/internal/feature" + rtest "github.com/restic/restic/internal/test" +) + +var ( + alpha = feature.FlagName("alpha-feature") + beta = feature.FlagName("beta-feature") + stable = feature.FlagName("stable-feature") + deprecated = feature.FlagName("deprecated-feature") +) + +var testFlags = map[feature.FlagName]feature.FlagDesc{ + alpha: { + Type: feature.Alpha, + Description: "alpha", + }, + beta: { + Type: feature.Beta, + Description: "beta", + }, + stable: { + Type: feature.Stable, + Description: "stable", + }, + deprecated: { + Type: feature.Deprecated, + Description: "deprecated", + }, +} + +func buildTestFlagSet() *feature.FlagSet { + flags := feature.New() + flags.SetFlags(testFlags) + return flags +} + +func TestFeatureDefaults(t *testing.T) { + flags := buildTestFlagSet() + for _, exp := range []struct { + flag feature.FlagName + value bool + }{ + {alpha, false}, + {beta, true}, + {stable, true}, + {deprecated, false}, + } { + rtest.Assert(t, flags.Enabled(exp.flag) == exp.value, "expected flag %v to have value %v got %v", exp.flag, exp.value, flags.Enabled(exp.flag)) + } +} + +func TestEmptyApply(t *testing.T) { + flags := buildTestFlagSet() + rtest.OK(t, flags.Apply("")) + + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") + rtest.Assert(t, flags.Enabled(beta), "expected beta feature to be enabled") +} + +func TestFeatureApply(t *testing.T) { + flags := buildTestFlagSet() + rtest.OK(t, flags.Apply(string(alpha))) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", alpha))) + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", alpha))) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled again") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", beta))) + rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", stable))) + rtest.Assert(t, flags.Enabled(stable), "expected stable feature to remain enabled") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", deprecated))) + rtest.Assert(t, !flags.Enabled(deprecated), "expected deprecated feature to remain disabled") +} + +func TestFeatureMultipleApply(t *testing.T) { + flags := buildTestFlagSet() + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true,%s=false", alpha, beta))) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") + rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") +} + +func TestFeatureApplyInvalid(t *testing.T) { + flags := buildTestFlagSet() + + err := flags.Apply("invalid-flag") + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "unknown feature flag"), "expected unknown feature flag error, got: %v", err) + + err = flags.Apply(fmt.Sprintf("%v=invalid", alpha)) + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "failed to parse value"), "expected parsing error, got: %v", err) +} + +func assertPanic(t *testing.T) { + if r := recover(); r == nil { + t.Fatal("should have panicked") + } +} + +func TestFeatureQueryInvalid(t *testing.T) { + defer assertPanic(t) + + flags := buildTestFlagSet() + flags.Enabled("invalid-flag") +} + +func TestFeatureSetInvalidPhase(t *testing.T) { + defer assertPanic(t) + + flags := feature.New() + flags.SetFlags(map[feature.FlagName]feature.FlagDesc{ + "invalid": { + Type: "invalid", + }, + }) +} + +func TestFeatureList(t *testing.T) { + flags := buildTestFlagSet() + + rtest.Equals(t, []feature.Help{ + {string(alpha), string(feature.Alpha), false, "alpha"}, + {string(beta), string(feature.Beta), true, "beta"}, + {string(deprecated), string(feature.Deprecated), false, "deprecated"}, + {string(stable), string(feature.Stable), true, "stable"}, + }, flags.List()) +} diff --git a/internal/feature/testing_test.go b/internal/feature/testing_test.go new file mode 100644 index 00000000000..f11b4bae40d --- /dev/null +++ b/internal/feature/testing_test.go @@ -0,0 +1,19 @@ +package feature_test + +import ( + "testing" + + "github.com/restic/restic/internal/feature" + rtest "github.com/restic/restic/internal/test" +) + +func TestSetFeatureFlag(t *testing.T) { + flags := buildTestFlagSet() + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") + + restore := feature.TestSetFlag(t, flags, alpha, true) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") + + restore() + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled again") +} From fe68d2cafb1098b7bee16983be57492ee91388cc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 17 Feb 2024 21:41:07 +0100 Subject: [PATCH 059/893] add feature flag documentation --- changelog/unreleased/issue-4601 | 9 +++++++++ cmd/restic/cmd_features.go | 11 +++++++++-- doc/047_tuning_backup_parameters.rst | 28 +++++++++++++++++++++++++++- 3 files changed, 45 insertions(+), 3 deletions(-) create mode 100644 changelog/unreleased/issue-4601 diff --git a/changelog/unreleased/issue-4601 b/changelog/unreleased/issue-4601 new file mode 100644 index 00000000000..f99dbe18722 --- /dev/null +++ b/changelog/unreleased/issue-4601 @@ -0,0 +1,9 @@ +Enhancement: Add support for feature flags + +Restic now supports feature flags that can be used to enable and disable +experimental features. The flags can be set using the environment variable +`RESTIC_FEATURES`. To get a list of currently supported feature flags, +run the `features` command. + +https://github.com/restic/restic/issues/4601 +https://github.com/restic/restic/pull/4666 diff --git a/cmd/restic/cmd_features.go b/cmd/restic/cmd_features.go index b1544b9d8b3..8125d3e2628 100644 --- a/cmd/restic/cmd_features.go +++ b/cmd/restic/cmd_features.go @@ -10,14 +10,21 @@ import ( "github.com/spf13/cobra" ) -// FIXME explain semantics - var featuresCmd = &cobra.Command{ Use: "features", Short: "Print list of feature flags", Long: ` The "features" command prints a list of supported feature flags. +To pass feature flags to restic, set the RESTIC_FEATURES environment variable +to "featureA=true,featureB=false". Specifying an unknown feature flag is an error. + +A feature can either be in alpha, beta, stable or deprecated state. +An _alpha_ feature is disabled by default and may change in arbitrary ways between restic versions or be removed. +A _beta_ feature is enabled by default, but still can change in minor ways or be removed. +A _stable_ feature is always enabled and cannot be disabled. The flag will be removed in a future restic version. +A _deprecated_ feature is always disabled and cannot be enabled. The flag will be removed in a future restic version. + EXIT STATUS =========== diff --git a/doc/047_tuning_backup_parameters.rst b/doc/047_tuning_backup_parameters.rst index d8fb2c9b671..8456693e73e 100644 --- a/doc/047_tuning_backup_parameters.rst +++ b/doc/047_tuning_backup_parameters.rst @@ -26,7 +26,8 @@ When you start a backup, restic will concurrently count the number of files and their total size, which is used to estimate how long it will take. This will cause some extra I/O, which can slow down backups of network file systems or FUSE mounts. To avoid this overhead at the cost of not seeing a progress -estimate, use the ``--no-scan`` option which disables this file scanning. +estimate, use the ``--no-scan`` option of the ``backup`` command which disables +this file scanning. Backend Connections =================== @@ -111,3 +112,28 @@ to disk. An operating system usually caches file write operations in memory and them to disk after a short delay. As larger pack files take longer to upload, this increases the chance of these files being written to disk. This can increase disk wear for SSDs. + + +Feature Flags +============= + +Feature flags allow disabling or enabling certain experimental restic features. The flags +can be specified via the ``RESTIC_FEATURES`` environment variable. The variable expects a +comma-separated list of ``key[=value],key2[=value2]`` pairs. The key is the name of a feature +flag. The value is optional and can contain either the value ``true`` (default if omitted) +or ``false``. The list of currently available feautre flags is shown by the ``features`` +command. + +Restic will return an error if an invalid feature flag is specified. No longer relevant +feature flags may be removed in a future restic release. Thus, make sure to no longer +specify these flags. + +A feature can either be in alpha, beta, stable or deprecated state. + +- An _alpha_ feature is disabled by default and may change in arbitrary ways between restic + versions or be removed. +- A _beta_ feature is enabled by default, but still can change in minor ways or be removed. +- A _stable_ feature is always enabled and cannot be disabled. This allows for a transition + period after which the flag will be removed in a future restic version. +- A _deprecated_ feature is always disabled and cannot be enabled. The flag will be removed + in a future restic version. From a9b64cd7ad92a44a9db00a917e381b762c7d7acb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 17 Feb 2024 21:50:25 +0100 Subject: [PATCH 060/893] features: print warning for stable/depreacted feature flags --- cmd/restic/main.go | 4 +++- internal/feature/features.go | 6 +++--- internal/feature/features_test.go | 32 +++++++++++++++++++++---------- internal/feature/testing.go | 8 ++++++-- 4 files changed, 34 insertions(+), 16 deletions(-) diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 1a11abc4099..a4acb1cab38 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -104,7 +104,9 @@ func main() { // we can show the logs log.SetOutput(logBuffer) - err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES")) + err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES"), func(s string) { + fmt.Fprintln(os.Stderr, s) + }) if err != nil { fmt.Fprintln(os.Stderr, err) Exit(1) diff --git a/internal/feature/features.go b/internal/feature/features.go index 1e1f3785c9d..e3b625e928b 100644 --- a/internal/feature/features.go +++ b/internal/feature/features.go @@ -57,7 +57,7 @@ func (f *FlagSet) SetFlags(flags map[FlagName]FlagDesc) { } } -func (f *FlagSet) Apply(flags string) error { +func (f *FlagSet) Apply(flags string, logWarning func(string)) error { if flags == "" { return nil } @@ -92,9 +92,9 @@ func (f *FlagSet) Apply(flags string) error { case Alpha, Beta: f.enabled[fname] = value case Stable: - // FIXME print warning + logWarning(fmt.Sprintf("feature flag %q is always enabled and will be removed in a future release", fname)) case Deprecated: - // FIXME print warning + logWarning(fmt.Sprintf("feature flag %q is always disabled and will be removed in a future release", fname)) default: panic("unknown feature phase") } diff --git a/internal/feature/features_test.go b/internal/feature/features_test.go index 3611ac99855..f5d405fa7d4 100644 --- a/internal/feature/features_test.go +++ b/internal/feature/features_test.go @@ -56,9 +56,13 @@ func TestFeatureDefaults(t *testing.T) { } } +func panicIfCalled(msg string) { + panic(msg) +} + func TestEmptyApply(t *testing.T) { flags := buildTestFlagSet() - rtest.OK(t, flags.Apply("")) + rtest.OK(t, flags.Apply("", panicIfCalled)) rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") rtest.Assert(t, flags.Enabled(beta), "expected beta feature to be enabled") @@ -66,29 +70,37 @@ func TestEmptyApply(t *testing.T) { func TestFeatureApply(t *testing.T) { flags := buildTestFlagSet() - rtest.OK(t, flags.Apply(string(alpha))) + rtest.OK(t, flags.Apply(string(alpha), panicIfCalled)) rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") - rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", alpha))) + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", alpha), panicIfCalled)) rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") - rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", alpha))) + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", alpha), panicIfCalled)) rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled again") - rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", beta))) + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", beta), panicIfCalled)) rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") - rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", stable))) + logMsg := "" + log := func(msg string) { + logMsg = msg + } + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", stable), log)) rtest.Assert(t, flags.Enabled(stable), "expected stable feature to remain enabled") + rtest.Assert(t, strings.Contains(logMsg, string(stable)), "unexpected log message for stable flag: %v", logMsg) - rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", deprecated))) + logMsg = "" + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", deprecated), log)) rtest.Assert(t, !flags.Enabled(deprecated), "expected deprecated feature to remain disabled") + rtest.Assert(t, strings.Contains(logMsg, string(deprecated)), "unexpected log message for deprecated flag: %v", logMsg) } func TestFeatureMultipleApply(t *testing.T) { flags := buildTestFlagSet() - rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true,%s=false", alpha, beta))) + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true,%s=false", alpha, beta), panicIfCalled)) rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") } @@ -96,10 +108,10 @@ func TestFeatureMultipleApply(t *testing.T) { func TestFeatureApplyInvalid(t *testing.T) { flags := buildTestFlagSet() - err := flags.Apply("invalid-flag") + err := flags.Apply("invalid-flag", panicIfCalled) rtest.Assert(t, err != nil && strings.Contains(err.Error(), "unknown feature flag"), "expected unknown feature flag error, got: %v", err) - err = flags.Apply(fmt.Sprintf("%v=invalid", alpha)) + err = flags.Apply(fmt.Sprintf("%v=invalid", alpha), panicIfCalled) rtest.Assert(t, err != nil && strings.Contains(err.Error(), "failed to parse value"), "expected parsing error, got: %v", err) } diff --git a/internal/feature/testing.go b/internal/feature/testing.go index c13f5250932..b796e89b5b9 100644 --- a/internal/feature/testing.go +++ b/internal/feature/testing.go @@ -15,13 +15,17 @@ import ( func TestSetFlag(t *testing.T, f *FlagSet, flag FlagName, value bool) func() { current := f.Enabled(flag) - if err := f.Apply(fmt.Sprintf("%s=%v", flag, value)); err != nil { + panicIfCalled := func(msg string) { + panic(msg) + } + + if err := f.Apply(fmt.Sprintf("%s=%v", flag, value), panicIfCalled); err != nil { // not reachable panic(err) } return func() { - if err := f.Apply(fmt.Sprintf("%s=%v", flag, current)); err != nil { + if err := f.Apply(fmt.Sprintf("%s=%v", flag, current), panicIfCalled); err != nil { // not reachable panic(err) } From 1a8bf358f1ea7b6903ad76da6e31f3f01be0eb04 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 9 Mar 2024 18:20:52 +0100 Subject: [PATCH 061/893] index: deprecate legacy index format --- .../repo-restore-permissions-test.tar.gz | Bin 4174 -> 4256 bytes internal/feature/registry.go | 6 ++++-- internal/index/index.go | 6 ++++++ internal/index/index_test.go | 3 +++ 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/cmd/restic/testdata/repo-restore-permissions-test.tar.gz b/cmd/restic/testdata/repo-restore-permissions-test.tar.gz index 36aa62dbfb0ad52d5c0cafab528cfee393d388a2..dc8e9bc80e992fca9a59632cbb89344dcd870869 100644 GIT binary patch literal 4256 zcmV;R5MS>fiwFP!000001MS**I92<<2XI0~DaUEfl*r6p(;k${tZkmh6MI2cBcS8f1-|!^(ALS1s2#f$20REgm03kR|{!8JP z`~f^nfXD!i0FkH!3>izNpg5EYk!U0W1Q8I@;x`zSfCp%clM_ZL7zjlO2%fU|J&2+Z zME?CTJ_ii{i~O-T+^_QoK?nx9B>r_E|407j`3Ff-7%0gXrAn$wf*1f%!r+uZ*a8Gp zK~NO{lnHnQgW&N51q`T)!AL4fem)MKBoB0P`^7`HxPtD*^!?`wHxkpu!-cW9(v3=6 z{LG{<0=#_}KN?A@Vi5$wDoQdWRq=|F-jb?-q9l{#?z^}Zq-$ei3X&+g-V8%c3#N;O zjW5|#JG=~ddTdIdD-0W31(29pb~ff@Ka`}fkLhX2 zHgcw#niH%j8rBq+mjSBHG+q3KA^&#%U*Ud!g%HoxxHY6R^LA1>y+@-~$yf|j zIfwh6*RSgiKJAqKct*^zq)>`)ZoGh}FtT#Xe#|nH+^FG0y;EY8H4|kS;XNW1$qi@d zawF@Qt#|6yUR@Aaxz_Yz8B?8GGkMT`w$ej7RF#jIbazdkvS?adqRmU4E413s39IbP zO|lkZqYv26CW7f0itBYYW;~77PW#|2!vC=H{DvN1;+l;9^`L4gY($ zfM4nV004r&>Hi>%133MEDY%X74zboTV(JV^UF!1x`50-VNo?vfiUh0W!Qpr}l?5J= z_FWFqo&<%r=V z%Ur5Sze4YO-u1+(s4{oGQ`UX}qX8&icT_M)Yu7@dFbUrQ6i-9v4}@ z!80v$S;+7$r={l488j}Q~Rhk0I_?LKW~ockBObDlks?Zd{6lHVQuFhbf&}e zA|?;%PPdEs3~p3<>7lzb$nS=_H1TSK-w97_%%~uGL}fzroS*ditI8`xBj3&Fw=zrn zryq`ri1^)um7w`#`OP27DJna=Y8gL14Vj4Ok*s_MKkvaYN zF7LIS5=!hsHdf+7ukMsmYnNJGdd;ZGfby$(d0wec@~1b}9YLgi=Xl3<3gV049+M zSQ>&ws1ObZ0w^A!fm8^@V{pGS##j132m-&o|6jcR!*crnQb?RDl|yFsYbrVtl3PtY z6aCkFL%eL0zOdX6sQGeh?nYaw`Q}o+W%2uqLX`Ji z_1R}akIG7qe|I3#w)TSJnpl!pfuetjt#RSuG7KBV7H!er>Av~w_wmGRBDWf3=Gn;u zJX2s_dQC8ijo7k|xb6Aw;rQ4C!|KU`;sG0<0NIRN`85zS5V51c>zH)CLdnL7fUb&G zk#@5WFKW8HYWW`;ay-WQc$i9~&YnjdLnd{l zA@$$$xm`=otst#_xW%(MBmT@|C_QG^u`=bMWa;*Ymb&H#7*yL~tCgMZ!Gt|M0j!{g z(CL(0yJv1m+b_%u2MFEfRy`+`F&chISoG@KdZFHvBaF0gI6ULY;-YZ`9*=Hn6 zde-%s^2Pe;X1*m^bhzK^ByGSF1P$^HlAXm}$A|%R#^~9(lpo?69#5aE?@i7e6Np-FD1tW>Y>z1;JU2^DwC7G;k2AQcZryi9&CV%vueI96?LS_Ry6Sa{ zKFkQXeAOl?a|eyMocGA_Q|sB-Xh*TmX0ap609lkU4W zwi)V6t?RBZB$GprAM-z}l%C^Dd(j$lBy~4yMm{pJRh+OCIriq>O2{qNB${MP#@blXSkpC8!} z%rzZu`$Xd=1LVUbL@3r_sU2*Qh z&aC1xk?V%G^`>jR#O<6f8eG`nFr_vXGcQnTx-;d;)U@RA^o8QKnWVbAQOXH+&i8@a0U3V>CP>3d_jUQgsJTARSI?H6WgyCRDTjrQR zT1ks-i^jd|C$q6cr;~cUg!Q%9~Ag+pNxd)ca{hR~r%bqckfOWXzka}x^5CLg9(Txv^I48MgpbN@F&UwPmbG8`ZMiqaF~%D-VeWB|MwHY-l~Z!}FM<<;i=m^?l|qzmIs5+RTo| z4_&8zVx2RUeg5&oe3Z_@^188x==2B5tgLvbsJ*n^E-xK2*#(K;A`Du5fG<@(%Qx$e{7UNce%oQgzyi1Q4Dcn@YJfRt!jFs$L!553x{K`gWuzn@f@Ke6ph&Z=SMafj?;UdzdkckHZXBBSo_wKCUr zD0+xB8U3@5&ELV7`S0#Uar=gU0Qw{U|3VPp%lzj&|6dwk@^|s1qW*ue{5kjkOX9!e zkH?eoG#o-EU|=kcN}-@2L;w%~qJcP=hEf(=Gl<0ys3-+R5rjm;k||Ug8Bf4NI1GS- zG}`Zu@wMmw2#EW2{vh}ze@_4Z_70!$8tHzCz86ao(x;aRxddlka!)^0q|B^+FJzG^ zpL*ZIRrr&+;!}LtXhfrdb5{v!(BGtK-oY~0*0Hy2b6Ivi_tZO6=R|hw275^j-xReQ zcdcIz9TdoT)%qrgUy}FeJC+V_LGHltT!Jq<9iKQ^=$N}TY5QG14?EKqk>DTHyU`p` z)zYnd_TL`-Iscf#3m>7t7}xgZK;b6GO3T^oS7&SV2RzQE6w)ddQtd+fyLbt+lK34} zXJ?;}T1=mvT~%oIzNei{v^(<5O}tx~{CKm-T^0M=N#Bof74+s{YiCRyxK?HuxomPe z3OYEPJh(4{-q_h>zUkJ2+ZxK*7bS8>TqlY&Zs1b2pAJ;xROeN51sr9o)LCx)m=5rP z^S1RKlREd7C4_t@?6s%5U%AaKi5i_O&((YI%{VDyta|7={sKzPN;^#l z^u{-7>fCSQ=D)l*YCi2?Z=h0*;YvBeka~q4uoNeBz*4qd7g((Np^?DDg>OHsKCHqQLQp=gE zL08(iz7w6yeYl`qY-dogp~9Azb@BcNS>qHR8zrjn3qAcQ%l_1Y+{gefqu9Os-gK?K zhqvAJpX!U zCT^8qe;gX}cJEOFCb)=dlqgl?RZz83%~D`V?s05^tO z^QSKjwRT6wMz@bJtLd8a`LER+i>ys{81?M0emxV)6M9jH-b^H?w>R`5f_z=;B4hXa z$aIQkDN0R;xE~lbUoOg>-+R$V(Qd;gW$&vi1dHQ{Qx)aX1$%ad{q%ZzMZ%Q)L zt?Rz8@BRCIuitgC=)OLheBT!o7YqvZOABC-g%$zY^0D{Kw z$^COd{+IX<*FQp}<)UPlrHMe02;*V2o*!0yywY(ianw$Xy);YDw=Dg#>TO+AAH)1~ ztbEVPAmj43wLI$g<$*e`O3lFMYvr@K5W-BEw-0^e5~sH*ye)V<^39?&Sy%5Y~O#q?`W?a z75#XZosF^c<)>(?!M}QL{u5Y;e}8Y^@96=5BL4$0un_+s3gYGe#b7~yPkJ!>JN^KF z%Kza7{UHS5>Ax8KLw^7Z;~)yaz(Hgh4n;vzsB{dS29X(L90cJIGWQOOj>7^B6gNf@ zDhi?_I0Q@O-UsP)2%`M)7~k@LGzRlq{Xy=E0LAnF#UNCk%A%9~=`?vQc@PC4>L`pl z2opg-3k0MVD3rXK{4evsBzx1j&vU1Y+u`QJ_W!lRlgxJac4u)r zJ!uT?HJi!`_VwppTF7gm5d=c3$+P6Ouxj$Y@>+nJJe%z0&wUCqc5onoWU8?*3$IUP zyAvJ!DNNlEOE(`oFC;|M(%9WXUzLsv@pRP#OfA?9j-e)-f+3PZSgMA$h9;^3bX^0Y zwSfc6pWyCc;O6hcfHmp5+S=T4X=HygcLW<(4whg_rdnt+Frd4c5sP6T5&&UB^aysY z4%UFBnS%#`7-Z`~a`RzOV75M8)r`e(*E2MtQ0-s?S4Rj<;;@3PnITMHstMv~v0D}N zHFdD01_oRBTH9K2Ld?xAC_so?2uQWG)b#WuP|Wp-s^)+y9%S2HUOrUM?`Q!3g8w4`nwS3$W(eZ&6&2m}3Af3E*y0G|IZ2AS#vgA42?0f6A7@d4PS z>v2%gC=@%70xqOB5Si+RmAi9$8s-Tm9eHze_ykmV)z|ez-0FuXEpsNW%7`XIOVN`2 z{nkH|_eCBRZWr%=oWV&%ACzNR06vYr(|uW%=_H55e0)xA`G9=LhL=(w*Aw@~C096q z)v#9CGHK58i4fVN-Ef{W>o9I79X_DBHWa}>UOrRh9=z&~JoGL@<&a+Tb5epFzOm6J zW5xX%-AVKXrb6iem0_utceXiCM=E4y`))(;%h9&SKjHkiCCTAz5I9jD+PIh*X+Z4!EG z*x1c*fZ(>{+47}_3dPthGUEPwStqA#P8L;KZf(+imb^S%`jawaS7*Z6riie$kKTOl z%w5(`SwTKG`Qv=7p@!96)FTCNKO$xK(%HgL`=9CEahnBuaXl@E%WopBi1x1GZRtDsgC9RB6+VQIbfi`f@w*)ol8S=@(1p zkE`sBt)pWK9+|Hij!}(zWHE(_07ZxUn?F6L@Ri>6q_dKVxT$>g`v*_&_moho+8j}B zqbnCP(x3thq$d23RVJRS+YvjI5|S2fw}uw^@%SD$W5;;-!|1gx(&eEzNtc1Mhwp9E zVD?ky%3lk;t(Sp_MRV*=!lbZX%EX4QZPreUcY`(``~5u zvJlgXp~5tQ8H2`;`!mgs;+;Cx#UkuPk1d&yDisQFuNLAf8TbjXn-nRMtKNq%yBoNE zxsyQJ=j_Ccd?ytmMk(#KhM};fP*O@8H)yR06>Eo3^YK7xt&z5j37*=V`&T=0CATEFpNS| z>2wT@Lc?IOR66>99OGN{KZO3B{vd>cK%W0E26aBm5>mV=)aQD$7T2Y>zdh}cOJTs!b(Dd)^dG(FESE61)+&l3Xz7sF>S* zGJ3f(vG+1MFp&YXVN|L+&f|Ib*6|6jIA{wDjszt(@;dV-h#7lVK3PlLbLFYqtukA?uA z{)@qH^ryizI*m>R$p{rkCBqmx4hG2}h^C++kP6ZeItai383&>P7{pLv3=IM>R2q#8 zq9F)mVE^a{;9K=Szzz1_)&DT>{C{!q9MB1~H?m+Gy;8i=8T2TPEM*m&GE9{VYXnV&r;O6|f!RW?HME4RHjk(eAUc41ftgNl*ktnZ-bDM?bG@XJ^Nl*K<^uVFocvOu z)#VPz2cPz>%DYK2Exotr3G411pB3wkSr$DW{Buq=aPz?4ks~H9OZ~PMTsz&kF`!3$ zTG=+Nxm-~3)qt|;b)`66X*Y+tLMAeB%uW>F;HE+qjI3PZ9 zm1rSqP@#OK=2W#?&zsIZm&;Xok}lcKgX2jJF4sn)=T05AI&rW@p;5Ra?qqbw*_V;I z0lQw9rnvAcT5kz&J`>9{Rd?+KZ_j}8(e$~f8hdtDj5+!$wnXiSQJw6)}kRUoyC zfN$obY-?bqW(tJ$-*H*C>SpP(A6=o(Ungsy)dnMuedp(F5VH}PqB>>oq4b>*3jKzz zdyFO`@+03LF`j6X@p~nu{?6NYduYHS5P&#ch<6w4~0#}jK+lq=*#u{$(s{r3gTPc#x|^^ z=w(hke<*ZwyPP_wn1hzP*lj$n-qNXEcdBO4s#oK~+%_S_*==W~YVw3;-d_?zImI*j zCM2fq>H|Hh_>?cI%#UKGIIrfLyvi#*UyD7Ny8}u}s15{vWt=dgg8>0L7GQuh2*jc=e=x>3{2v5?-`D@#^&iah|HUA2rfe%RxmRCp zA1=T^s?o-yUu zhOC(#g21pU=gLQ%Rcz#jHKv>2==8_O9_rUk5|a&<>I2TO?wqfIklx5Gg+6IY^{S;( zW5J!3Ez)h)({F1!eQJfB&+P16^`HWUr?hr7m>%Gh6rR4x`Q&%}cH56;QVrK?weId~ z5Bj-{zxPyqg}tI$)QXRtC|P^0UeZY2DM3LQiQ*uy7f)#Ga>a7|s;Wj~T+ZJcmxj>_{uuc%7uw_SDwc<`9cU>bG0g;k6&P z^F?rAXhZl!@*SPYJ4#OTa}vR;?(%CDDP{~t9Ff>`W2$~t_vryvY6S8)Z)1t&iEPLRVWr?cx zUHYNNDI-aqS=U1ljtwx*oFWt3y_!47Ytc9{^Yi9OuCg8@ronEk!E-aod*T|NPZZUP zWNlpkys1yYUNo`!{bZVG^b))@7BAKoQ;vIMos{UrpCXAdzoBd2Q?2djlDW%X`;x-( zThTXs?zr`{g0J3iIF`AEVY)=@bt8xbLyvZ{TXjt!cL^<1PB((-6{hl{ZzZ0 z4w_vUSi7HZB7)SX_m1C9#;;)VhqE&sp(}c?DOTrxZVqM_UNJCi#yge>vc_abOHDq? z&exUX&2P^xDVM&5C)E?gePkV7FPUH5;ykWB9y2FeM%bR*H$EZXKXI|*ilOvQzGE|K z`*WH^L&Nl`ts?O9#!;npO4(K`#IIQMehz>P<9b}jna82;bt{+$0WI4atNorZCcCztG?TaPn zA)f=(>s~Ku2EO8ABB%mQ&QNRMgF4xn#2v&<#oMgkcR8%ce%3uZsiNOg;~j3}GS;vH zI}uSgm>(5rzf}WwJArDvB&fc3AN9#4lYIWLix;x@wT5UDKgDRZ&qeNuub|7wZDPN9 zIZ+Y*lA_yWoKqIFexx;HY3iT>X{T;*sL(b@HuUiD#8tfgmo3b+eYU5YKbrZ?UHv(- zFQt)l5c~QTZ8oropz`MH^jx&j{F1tnhJ&Y`Xar`*Lzmmi+8jgjyIL=ptvOgzJlk+@ z!~UM3mK>ug8HwFp99yf=HyMnigGGfNe{~Y%!Gi}69z1yP;K73j4<0;t@ZiCN2M-=R Yc<|uCg9i^DJpPaKUr7c%#{f_O02$#w+W-In diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 7a9cbf560c5..620c9ec35b4 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -5,11 +5,13 @@ var Flag = New() // flag names are written in kebab-case const ( - ExampleFeature FlagName = "example-feature" + ExampleFeature FlagName = "example-feature" + DeprecateLegacyIndex FlagName = "deprecate-legacy-index" ) func init() { Flag.SetFlags(map[FlagName]FlagDesc{ - ExampleFeature: {Type: Alpha, Description: "just for testing"}, + ExampleFeature: {Type: Alpha, Description: "just for testing"}, + DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, }) } diff --git a/internal/index/index.go b/internal/index/index.go index ecd4815944f..b571c55eb7f 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -3,12 +3,14 @@ package index import ( "context" "encoding/json" + "fmt" "io" "sync" "time" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/debug" @@ -515,6 +517,10 @@ func DecodeIndex(buf []byte, id restic.ID) (idx *Index, oldFormat bool, err erro debug.Log("Error %v", err) if isErrOldIndex(err) { + if feature.Flag.Enabled(feature.DeprecateLegacyIndex) { + return nil, false, fmt.Errorf("index seems to use the legacy format. update it using `restic repair index`") + } + debug.Log("index is probably old format, trying that") idx, err = decodeOldIndex(buf) return idx, err == nil, err diff --git a/internal/index/index_test.go b/internal/index/index_test.go index 4f0dbd2a0e5..78e4800cac3 100644 --- a/internal/index/index_test.go +++ b/internal/index/index_test.go @@ -8,6 +8,7 @@ import ( "sync" "testing" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -427,6 +428,8 @@ func BenchmarkEncodeIndex(b *testing.B) { } func TestIndexUnserializeOld(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateLegacyIndex, false)() + idx, oldFormat, err := index.DecodeIndex(docOldExample, restic.NewRandomID()) rtest.OK(t, err) rtest.Assert(t, oldFormat, "old index format recognized as new format") From f8852f0eb6f63d08fddbabf19609fb530c67bb5e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 9 Mar 2024 18:21:22 +0100 Subject: [PATCH 062/893] repair index: fix deletion of legacy indexes --- internal/index/index.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/index/index.go b/internal/index/index.go index b571c55eb7f..1fb2c155edd 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -523,6 +523,7 @@ func DecodeIndex(buf []byte, id restic.ID) (idx *Index, oldFormat bool, err erro debug.Log("index is probably old format, trying that") idx, err = decodeOldIndex(buf) + idx.ids = append(idx.ids, id) return idx, err == nil, err } From 98a6817d013a96898718514132a752c95f81eebe Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 9 Mar 2024 18:35:00 +0100 Subject: [PATCH 063/893] add changelog for legacy index deprecation --- changelog/unreleased/issue-4602 | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 changelog/unreleased/issue-4602 diff --git a/changelog/unreleased/issue-4602 b/changelog/unreleased/issue-4602 new file mode 100644 index 00000000000..3cba63876f5 --- /dev/null +++ b/changelog/unreleased/issue-4602 @@ -0,0 +1,13 @@ +Change: Deprecate legacy index format + +Support for the legacy index format used by restic before version 0.2.0 has +been depreacted and will be removed in the next minor restic version. You can +use `restic repair index` to update the index to the current format. + +It is possible to temporarily reenable support for the legacy index format by +setting the environment variable +`RESTIC_FEATURES=deprecate-legacy-index=false`. Note that this feature flag +will be removed in the next minor restic version. + +https://github.com/restic/restic/issues/4602 +https://github.com/restic/restic/pull/4724 From 69ca12d2eba28b0744e30c5a367fddb607980b23 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 9 Mar 2024 18:36:33 +0100 Subject: [PATCH 064/893] check: treat legacy index format as errors --- cmd/restic/cmd_check.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 990702b61d9..cbe388877e8 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -231,12 +231,17 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args errorsFound := false suggestIndexRebuild := false + suggestLegacyIndexRebuild := false mixedFound := false for _, hint := range hints { switch hint.(type) { - case *checker.ErrDuplicatePacks, *checker.ErrOldIndexFormat: + case *checker.ErrDuplicatePacks: Printf("%v\n", hint) suggestIndexRebuild = true + case *checker.ErrOldIndexFormat: + Warnf("error: %v\n", hint) + suggestLegacyIndexRebuild = true + errorsFound = true case *checker.ErrMixedPack: Printf("%v\n", hint) mixedFound = true @@ -247,7 +252,10 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } if suggestIndexRebuild { - Printf("Duplicate packs/old indexes are non-critical, you can run `restic repair index' to correct this.\n") + Printf("Duplicate packs are non-critical, you can run `restic repair index' to correct this.\n") + } + if suggestLegacyIndexRebuild { + Warnf("Found indexes using the legacy format, you must run `restic repair index' to correct this.\n") } if mixedFound { Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n") From 9f3e1462c0c2dd147c1774c6acd469b7d524ca5e Mon Sep 17 00:00:00 2001 From: Leo Heitmann Ruiz <148111793+leoheitmannruiz@users.noreply.github.com> Date: Sat, 9 Mar 2024 23:56:16 +0100 Subject: [PATCH 065/893] Minor README.md cleanups --- README.md | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index ad6b13cefa5..ef12f3e1b2a 100644 --- a/README.md +++ b/README.md @@ -10,8 +10,7 @@ For detailed usage and installation instructions check out the [documentation](h You can ask questions in our [Discourse forum](https://forum.restic.net). -Quick start ------------ +## Quick start Once you've [installed](https://restic.readthedocs.io/en/latest/020_installation.html) restic, start off with creating a repository for your backups: @@ -59,7 +58,7 @@ Therefore, restic supports the following backends for storing backups natively: Restic is a program that does backups right and was designed with the following principles in mind: -- **Easy:** Doing backups should be a frictionless process, otherwise +- **Easy**: Doing backups should be a frictionless process, otherwise you might be tempted to skip it. Restic should be easy to configure and use, so that, in the event of a data loss, you can just restore it. Likewise, restoring data should not be complicated. @@ -92,20 +91,17 @@ reproduce a byte identical version from the source code for that release. Instructions on how to do that are contained in the [builder repository](https://github.com/restic/builder). -News ----- +## News -You can follow the restic project on Mastodon [@resticbackup](https://fosstodon.org/@restic) or by subscribing to +You can follow the restic project on Mastodon [@resticbackup](https://fosstodon.org/@restic) or subscribe to the [project blog](https://restic.net/blog/). -License -------- +## License Restic is licensed under [BSD 2-Clause License](https://opensource.org/licenses/BSD-2-Clause). You can find the -complete text in [``LICENSE``](LICENSE). +complete text in [`LICENSE`](LICENSE). -Sponsorship ------------ +## Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! From 00f762373fa625659f411bfa6a4ba5d590599bb3 Mon Sep 17 00:00:00 2001 From: Leo Heitmann Ruiz <148111793+leoheitmannruiz@users.noreply.github.com> Date: Sun, 10 Mar 2024 00:20:26 +0100 Subject: [PATCH 066/893] Capitalize Homebrew --- doc/020_installation.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/020_installation.rst b/doc/020_installation.rst index 0f1cd6c0481..b21a2a66f18 100644 --- a/doc/020_installation.rst +++ b/doc/020_installation.rst @@ -77,8 +77,7 @@ avoid any conflicts: macOS ===== -If you are using macOS, you can install restic using the -`homebrew `__ package manager: +If you are using macOS, you can install restic using `Homebrew `__: .. code-block:: console From ac948fccda125156e3e03768ec22f75974aceee3 Mon Sep 17 00:00:00 2001 From: avoidalone Date: Mon, 11 Mar 2024 14:35:12 +0800 Subject: [PATCH 067/893] fix some typos Signed-off-by: avoidalone --- CHANGELOG.md | 2 +- changelog/0.10.0_2020-09-19/pull-2195 | 2 +- doc/faq.rst | 2 +- internal/restorer/doc.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b8969a4438c..5fea763e312 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3488,7 +3488,7 @@ restic users. The changes are ordered by importance. NOTE: This new implementation does not guarantee order in which blobs are written to the target files and, for example, the last blob of a file can be - written to the file before any of the preceeding file blobs. It is therefore + written to the file before any of the preceding file blobs. It is therefore possible to have gaps in the data written to the target files if restore fails or interrupted by the user. diff --git a/changelog/0.10.0_2020-09-19/pull-2195 b/changelog/0.10.0_2020-09-19/pull-2195 index a139aa4e120..7898568fa8e 100644 --- a/changelog/0.10.0_2020-09-19/pull-2195 +++ b/changelog/0.10.0_2020-09-19/pull-2195 @@ -10,7 +10,7 @@ https://github.com/restic/restic/issues/2244 NOTE: This new implementation does not guarantee order in which blobs are written to the target files and, for example, the last blob of a -file can be written to the file before any of the preceeding file blobs. +file can be written to the file before any of the preceding file blobs. It is therefore possible to have gaps in the data written to the target files if restore fails or interrupted by the user. diff --git a/doc/faq.rst b/doc/faq.rst index e8ef2de5e56..8e56b5d9e16 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -74,7 +74,7 @@ $ restic backup --exclude "~/documents" ~ This command will result in a complete backup of the current logged in user's home directory and it won't exclude the folder ``~/documents/`` - which is not what the user wanted to achieve. The problem is how the path to ``~/documents`` is passed to restic. -In order to spot an issue like this, you can make use of the following ruby command preceeding your restic command. +In order to spot an issue like this, you can make use of the following ruby command preceding your restic command. :: diff --git a/internal/restorer/doc.go b/internal/restorer/doc.go index 8d68d716155..e230f23f0b3 100644 --- a/internal/restorer/doc.go +++ b/internal/restorer/doc.go @@ -18,7 +18,7 @@ // // Implementation does not guarantee order in which blobs are written to the // target files and, for example, the last blob of a file can be written to the -// file before any of the preceeding file blobs. It is therefore possible to +// file before any of the preceding file blobs. It is therefore possible to // have gaps in the data written to the target files if restore fails or // interrupted by the user. package restorer From 521713fc94fba9f4d103725c198e487ac94f0881 Mon Sep 17 00:00:00 2001 From: Facundo Tuesca Date: Sat, 16 Mar 2024 18:54:27 +0100 Subject: [PATCH 068/893] doc: Add instructions to configure PowerShell completions --- doc/020_installation.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/doc/020_installation.rst b/doc/020_installation.rst index b21a2a66f18..da9ed8a3fa4 100644 --- a/doc/020_installation.rst +++ b/doc/020_installation.rst @@ -362,3 +362,18 @@ Example for using sudo to write a zsh completion script directly to the system-w the operating system used, e.g. ``/usr/share/bash-completion/completions/restic`` in Debian and derivatives. Please look up the correct path in the appropriate documentation. + +Example for setting up a powershell completion script for the local user's profile: + +.. code-block:: pwsh-session + + # Create profile if one does not exist + PS> If (!(Test-Path $PROFILE.CurrentUserAllHosts)) {New-Item -Path $PROFILE.CurrentUserAllHosts -Force} + + PS> $ProfileDir = (Get-Item $PROFILE.CurrentUserAllHosts).Directory + + # Generate Restic completions in the same directory as the profile + PS> restic generate --powershell-completion "$ProfileDir\restic-completion.ps1" + + # Append to the profile file the command to load Restic completions + PS> Add-Content -Path $Profile.CurrentUserAllHosts -Value "`r`nImport-Module $ProfileDir\restic-completion.ps1" From 5c4a4b4a30951f54fae5befdc834f4671464ddcc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 17:09:59 +0100 Subject: [PATCH 069/893] CI: Allow golangci-lint to annotate PRs --- .github/workflows/tests.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 134656d8a3b..e03ee326406 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -247,6 +247,10 @@ jobs: lint: name: lint runs-on: ubuntu-latest + permissions: + contents: read + # allow annotating code in the PR + checks: write steps: - name: Set up Go ${{ env.latest_go }} uses: actions/setup-go@v5 From 6e775d3787d4614e2805f15e46d62f1ed67195e6 Mon Sep 17 00:00:00 2001 From: Adam Eijdenberg Date: Mon, 29 Jan 2024 14:23:56 +1100 Subject: [PATCH 070/893] Enhancement: option to send HTTP over unix socket add tests for unix socket connection switch HTTP rest-server test to use any free port allow rest-server test graceful shutdown opportunity --- doc/030_preparing_a_new_repo.rst | 7 +- go.mod | 1 + go.sum | 13 +++ internal/backend/http_transport.go | 3 + internal/backend/rest/config_test.go | 7 ++ internal/backend/rest/rest_test.go | 146 +++++++++++++++++++----- internal/backend/rest/rest_unix_test.go | 30 +++++ 7 files changed, 174 insertions(+), 33 deletions(-) create mode 100644 internal/backend/rest/rest_unix_test.go diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 8661f59046e..0c50b65be31 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -201,15 +201,16 @@ scheme like this: $ restic -r rest:http://host:8000/ init Depending on your REST server setup, you can use HTTPS protocol, -password protection, multiple repositories or any combination of -those features. The TCP/IP port is also configurable. Here -are some more examples: +unix socket, password protection, multiple repositories or any +combination of those features. The TCP/IP port is also configurable. +Here are some more examples: .. code-block:: console $ restic -r rest:https://host:8000/ init $ restic -r rest:https://user:pass@host:8000/ init $ restic -r rest:https://user:pass@host:8000/my_backup_repo/ init + $ restic -r rest:http+unix:///tmp/rest.socket:/my_backup_repo/ init The server username and password can be specified using environment variables as well: diff --git a/go.mod b/go.mod index 6e546974e5c..7121cdac499 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/minio/minio-go/v7 v7.0.66 github.com/minio/sha256-simd v1.0.1 github.com/ncw/swift/v2 v2.0.2 + github.com/peterbourgon/unixtransport v0.0.4 github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 github.com/pkg/sftp v1.13.6 diff --git a/go.sum b/go.sum index 668d6a339de..1cd51cbacc9 100644 --- a/go.sum +++ b/go.sum @@ -128,6 +128,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= @@ -141,6 +142,11 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk= github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/ff/v3 v3.3.1/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= +github.com/peterbourgon/unixtransport v0.0.4 h1:UTF0FxXCAglvoZz9jaGPYjEg52DjBLDYGMJvJni6Tfw= +github.com/peterbourgon/unixtransport v0.0.4/go.mod h1:o8aUkOCa8W/BIXpi15uKvbSabjtBh0JhSOJGSfoOhAU= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -210,6 +216,7 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -221,6 +228,7 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -231,6 +239,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -246,12 +255,14 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -271,6 +282,7 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -315,6 +327,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/backend/http_transport.go b/internal/backend/http_transport.go index 9ee1c91f112..19b20dc6af6 100644 --- a/internal/backend/http_transport.go +++ b/internal/backend/http_transport.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/peterbourgon/unixtransport" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" ) @@ -82,6 +83,8 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { TLSClientConfig: &tls.Config{}, } + unixtransport.Register(tr) + if opts.InsecureTLS { tr.TLSClientConfig.InsecureSkipVerify = true } diff --git a/internal/backend/rest/config_test.go b/internal/backend/rest/config_test.go index 23ea9095b11..13a1ebb13c0 100644 --- a/internal/backend/rest/config_test.go +++ b/internal/backend/rest/config_test.go @@ -31,6 +31,13 @@ var configTests = []test.ConfigTestData[Config]{ Connections: 5, }, }, + { + S: "rest:http+unix:///tmp/rest.socket:/my_backup_repo/", + Cfg: Config{ + URL: parseURL("http+unix:///tmp/rest.socket:/my_backup_repo/"), + Connections: 5, + }, + }, } func TestParseConfig(t *testing.T) { diff --git a/internal/backend/rest/rest_test.go b/internal/backend/rest/rest_test.go index 6a5b4f8a58f..93b9a103e81 100644 --- a/internal/backend/rest/rest_test.go +++ b/internal/backend/rest/rest_test.go @@ -1,11 +1,18 @@ +//go:build go1.20 +// +build go1.20 + package rest_test import ( + "bufio" "context" - "net" + "fmt" "net/url" "os" "os/exec" + "regexp" + "strings" + "syscall" "testing" "time" @@ -14,54 +21,133 @@ import ( rtest "github.com/restic/restic/internal/test" ) -func runRESTServer(ctx context.Context, t testing.TB, dir string) (*url.URL, func()) { +var ( + serverStartedRE = regexp.MustCompile("^start server on (.*)$") +) + +func runRESTServer(ctx context.Context, t testing.TB, dir, reqListenAddr string) (*url.URL, func()) { srv, err := exec.LookPath("rest-server") if err != nil { t.Skip(err) } - cmd := exec.CommandContext(ctx, srv, "--no-auth", "--path", dir) + // create our own context, so that our cleanup can cancel and wait for completion + // this will ensure any open ports, open unix sockets etc are properly closed + processCtx, cancel := context.WithCancel(ctx) + cmd := exec.CommandContext(processCtx, srv, "--no-auth", "--path", dir, "--listen", reqListenAddr) + + // this cancel func is called by when the process context is done + cmd.Cancel = func() error { + // we execute in a Go-routine as we know the caller will + // be waiting on a .Wait() regardless + go func() { + // try to send a graceful termination signal + if cmd.Process.Signal(syscall.SIGTERM) == nil { + // if we succeed, then wait a few seconds + time.Sleep(2 * time.Second) + } + // and then make sure it's killed either way, ignoring any error code + _ = cmd.Process.Kill() + }() + return nil + } + + // this is the cleanup function that we return the caller, + // which will cancel our process context, and then wait for it to finish + cleanup := func() { + cancel() + _ = cmd.Wait() + } + + // but in-case we don't finish this method, e.g. by calling t.Fatal() + // we also defer a call to clean it up ourselves, guarded by a flag to + // indicate that we returned the function to the caller to deal with. + callerWillCleanUp := false + defer func() { + if !callerWillCleanUp { + cleanup() + } + }() + + // send stdout to our std out cmd.Stdout = os.Stdout - cmd.Stderr = os.Stdout - if err := cmd.Start(); err != nil { + + // capture stderr with a pipe, as we want to examine this output + // to determine when the server is started and listening. + cmdErr, err := cmd.StderrPipe() + if err != nil { t.Fatal(err) } - // wait until the TCP port is reachable - var success bool - for i := 0; i < 10; i++ { - time.Sleep(200 * time.Millisecond) + // start the rest-server + if err := cmd.Start(); err != nil { + t.Fatal(err) + } - c, err := net.Dial("tcp", "localhost:8000") - if err != nil { - continue + // create a channel to receive the actual listen address on + listenAddrCh := make(chan string) + go func() { + defer close(listenAddrCh) + matched := false + br := bufio.NewReader(cmdErr) + for { + line, err := br.ReadString('\n') + if err != nil { + // we ignore errors, as code that relies on this + // will happily fail via timeout and empty closed + // channel. + return + } + + line = strings.Trim(line, "\r\n") + if !matched { + // look for the server started message, and return the address + // that it's listening on + matchedServerListen := serverStartedRE.FindSubmatch([]byte(line)) + if len(matchedServerListen) == 2 { + listenAddrCh <- string(matchedServerListen[1]) + matched = true + } + } + fmt.Fprintln(os.Stdout, line) // print all output to console } + }() - success = true - if err := c.Close(); err != nil { - t.Fatal(err) + // wait for us to get an address, + // or the parent context to cancel, + // or for us to timeout + var actualListenAddr string + select { + case <-processCtx.Done(): + t.Fatal(context.Canceled) + case <-time.NewTimer(2 * time.Second).C: + t.Fatal(context.DeadlineExceeded) + case a, ok := <-listenAddrCh: + if !ok { + t.Fatal(context.Canceled) } + actualListenAddr = a } - if !success { - t.Fatal("unable to connect to rest server") - return nil, nil + // this translate the address that the server is listening on + // to a URL suitable for us to connect to + var addrToConnectTo string + if strings.HasPrefix(reqListenAddr, "unix:") { + addrToConnectTo = fmt.Sprintf("http+unix://%s:/restic-test/", actualListenAddr) + } else { + // while we may listen on 0.0.0.0, we connect to localhost + addrToConnectTo = fmt.Sprintf("http://%s/restic-test/", strings.Replace(actualListenAddr, "0.0.0.0", "localhost", 1)) } - url, err := url.Parse("http://localhost:8000/restic-test/") + // parse to a URL + url, err := url.Parse(addrToConnectTo) if err != nil { t.Fatal(err) } - cleanup := func() { - if err := cmd.Process.Kill(); err != nil { - t.Fatal(err) - } - - // ignore errors, we've killed the process - _ = cmd.Wait() - } - + // indicate that we've completed successfully, and that the caller + // is responsible for calling cleanup + callerWillCleanUp = true return url, cleanup } @@ -91,7 +177,7 @@ func TestBackendREST(t *testing.T) { defer cancel() dir := rtest.TempDir(t) - serverURL, cleanup := runRESTServer(ctx, t, dir) + serverURL, cleanup := runRESTServer(ctx, t, dir, ":0") defer cleanup() newTestSuite(serverURL, false).RunTests(t) @@ -116,7 +202,7 @@ func BenchmarkBackendREST(t *testing.B) { defer cancel() dir := rtest.TempDir(t) - serverURL, cleanup := runRESTServer(ctx, t, dir) + serverURL, cleanup := runRESTServer(ctx, t, dir, ":0") defer cleanup() newTestSuite(serverURL, false).RunBenchmarks(t) diff --git a/internal/backend/rest/rest_unix_test.go b/internal/backend/rest/rest_unix_test.go new file mode 100644 index 00000000000..85ef7a73d9b --- /dev/null +++ b/internal/backend/rest/rest_unix_test.go @@ -0,0 +1,30 @@ +//go:build !windows && go1.20 +// +build !windows,go1.20 + +package rest_test + +import ( + "context" + "fmt" + "path" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func TestBackendRESTWithUnixSocket(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/rest.TestBackendREST") + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dir := rtest.TempDir(t) + serverURL, cleanup := runRESTServer(ctx, t, path.Join(dir, "data"), fmt.Sprintf("unix:%s", path.Join(dir, "sock"))) + defer cleanup() + + newTestSuite(serverURL, false).RunTests(t) +} From add37fcd9f58bb59c1e079ca13f1f35a1039a4ea Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 16:57:36 +0100 Subject: [PATCH 071/893] CI: uses rest-server from master branch until unix sockets are released --- .github/workflows/tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 134656d8a3b..ba11d7fd2ab 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -74,7 +74,7 @@ jobs: - name: Get programs (Linux/macOS) run: | echo "build Go tools" - go install github.com/restic/rest-server/cmd/rest-server@latest + go install github.com/restic/rest-server/cmd/rest-server@master echo "install minio server" mkdir $HOME/bin @@ -106,7 +106,7 @@ jobs: $ProgressPreference = 'SilentlyContinue' echo "build Go tools" - go install github.com/restic/rest-server/... + go install github.com/restic/rest-server/cmd/rest-server@master echo "install minio server" mkdir $Env:USERPROFILE/bin From 6ac751918806921b8949da9bca480b7014d5eca8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 17:40:02 +0100 Subject: [PATCH 072/893] add changelog for rest unix socket support --- changelog/unreleased/issue-4287 | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 changelog/unreleased/issue-4287 diff --git a/changelog/unreleased/issue-4287 b/changelog/unreleased/issue-4287 new file mode 100644 index 00000000000..df4fc55903f --- /dev/null +++ b/changelog/unreleased/issue-4287 @@ -0,0 +1,14 @@ +Enhancement: support connection to rest-server using unix socket + +Restic now supports connecting to rest-server using a unix socket for +rest-server version 0.13.0 or later. + +This allows running restic as follows: + +``` +rest-server --listen unix:/tmp/rest.socket --data /path/to/data & +restic -r rest:http+unix:///tmp/rest.socket:/my_backup_repo/ [...] +``` + +https://github.com/restic/restic/issues/4287 +https://github.com/restic/restic/pull/4655 From aee6d311f141d49ac620f71254d7f35f4d349f2a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 17:56:13 +0100 Subject: [PATCH 073/893] CI: update docker actions --- .github/workflows/docker.yml | 8 ++++---- .github/workflows/tests.yml | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index d9df0b8ba75..71cfe1691e3 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -25,7 +25,7 @@ jobs: uses: actions/checkout@v4 - name: Log in to the Container registry - uses: docker/login-action@5139682d94efc37792e6b54386b5b470a68a4737 + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} @@ -33,7 +33,7 @@ jobs: - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} tags: | @@ -45,7 +45,7 @@ jobs: uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 + uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 - name: Ensure consistent binaries run: | @@ -55,7 +55,7 @@ jobs: if: github.ref != 'refs/heads/master' - name: Build and push Docker image - uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 + uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 with: push: true context: . diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 134656d8a3b..d8f5b308b33 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -298,7 +298,7 @@ jobs: - name: Docker meta id: meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: # list of Docker images to use as base name for tags images: | @@ -321,7 +321,7 @@ jobs: - name: Build and push id: docker_build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: push: false context: . From 1497525e1555e2cf1aa149f3f1aef142ced39fc2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 18:15:50 +0100 Subject: [PATCH 074/893] CI: Update golangci-lint to version 1.57.1 --- .github/workflows/tests.yml | 2 +- cmd/restic/cmd_backup_integration_test.go | 2 ++ cmd/restic/cmd_tag_integration_test.go | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e03ee326406..7c41fe4e560 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -264,7 +264,7 @@ jobs: uses: golangci/golangci-lint-action@v4 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.56.1 + version: v1.57.1 args: --verbose --timeout 5m # only run golangci-lint for pull requests, otherwise ALL hints get diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index c60e9c543be..0bc4a9eaa17 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -406,6 +406,7 @@ func TestIncrementalBackup(t *testing.T) { t.Logf("repository grown by %d bytes", stat3.size-stat2.size) } +// nolint: staticcheck // false positive nil pointer dereference check func TestBackupTags(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -441,6 +442,7 @@ func TestBackupTags(t *testing.T) { "expected parent to be %v, got %v", parent.ID, newest.Parent) } +// nolint: staticcheck // false positive nil pointer dereference check func TestBackupProgramVersion(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() diff --git a/cmd/restic/cmd_tag_integration_test.go b/cmd/restic/cmd_tag_integration_test.go index 3b902c51e2b..6979f9c1161 100644 --- a/cmd/restic/cmd_tag_integration_test.go +++ b/cmd/restic/cmd_tag_integration_test.go @@ -12,6 +12,7 @@ func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) { rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{})) } +// nolint: staticcheck // false positive nil pointer dereference check func TestTag(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() From 15555c9898c83838f80e29c3f1629568da0ca2b7 Mon Sep 17 00:00:00 2001 From: Facundo Tuesca Date: Thu, 28 Mar 2024 18:35:18 +0100 Subject: [PATCH 075/893] doc: Use consistent case for PROFILE env variable in PowerShell --- doc/020_installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/020_installation.rst b/doc/020_installation.rst index da9ed8a3fa4..17b581a87bb 100644 --- a/doc/020_installation.rst +++ b/doc/020_installation.rst @@ -376,4 +376,4 @@ Example for setting up a powershell completion script for the local user's profi PS> restic generate --powershell-completion "$ProfileDir\restic-completion.ps1" # Append to the profile file the command to load Restic completions - PS> Add-Content -Path $Profile.CurrentUserAllHosts -Value "`r`nImport-Module $ProfileDir\restic-completion.ps1" + PS> Add-Content -Path $PROFILE.CurrentUserAllHosts -Value "`r`nImport-Module $ProfileDir\restic-completion.ps1" From 2ba21fe72bf93f4caee3de1b2ea58bf0a50c7a64 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 9 Mar 2024 17:38:41 +0100 Subject: [PATCH 076/893] archiver: only store deviceID for hardlinks The deviceID can change e.g. when backing up from filesystem snapshot. It is only used for hardlink detection. Thus there it is not necessary to store it for everything else. --- internal/archiver/archiver.go | 6 ++++++ internal/archiver/archiver_test.go | 1 + internal/restic/node.go | 2 +- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 77ddba7c4c6..63a1691b307 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -188,6 +188,12 @@ func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo) if !arch.WithAtime { node.AccessTime = node.ModTime } + if node.Links == 1 || node.Type == "dir" { + // the DeviceID is only necessary for hardlinked files + // when using subvolumes or snapshots their deviceIDs tend to change which causes + // restic to upload new tree blobs + node.DeviceID = 0 + } // overwrite name to match that within the snapshot node.Name = path.Base(snPath) if err != nil { diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 46ef4425184..3d50e555f28 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -2153,6 +2153,7 @@ func TestMetadataChanged(t *testing.T) { sn, node2 := snapshot(t, repo, fs, nil, "testfile") // set some values so we can then compare the nodes + want.DeviceID = 0 want.Content = node2.Content want.Path = "" if len(want.ExtendedAttributes) == 0 { diff --git a/internal/restic/node.go b/internal/restic/node.go index cbe9ef36394..e7688aada0b 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -82,7 +82,7 @@ type Node struct { User string `json:"user,omitempty"` Group string `json:"group,omitempty"` Inode uint64 `json:"inode,omitempty"` - DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev + DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev, only stored for hardlinks Size uint64 `json:"size,omitempty"` Links uint64 `json:"links,omitempty"` LinkTarget string `json:"linktarget,omitempty"` From a26d6ffa720af4dde2504c560ce838470bdf21ab Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 9 Mar 2024 17:44:48 +0100 Subject: [PATCH 077/893] archiver: move deviceID handling behind feature flag --- internal/archiver/archiver.go | 13 ++++++++----- internal/archiver/archiver_test.go | 3 +++ internal/feature/registry.go | 2 ++ 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 63a1691b307..19d16c4d348 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -12,6 +12,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" "golang.org/x/sync/errgroup" @@ -188,11 +189,13 @@ func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo) if !arch.WithAtime { node.AccessTime = node.ModTime } - if node.Links == 1 || node.Type == "dir" { - // the DeviceID is only necessary for hardlinked files - // when using subvolumes or snapshots their deviceIDs tend to change which causes - // restic to upload new tree blobs - node.DeviceID = 0 + if feature.Flag.Enabled(feature.DeviceIDForHardlinks) { + if node.Links == 1 || node.Type == "dir" { + // the DeviceID is only necessary for hardlinked files + // when using subvolumes or snapshots their deviceIDs tend to change which causes + // restic to upload new tree blobs + node.DeviceID = 0 + } } // overwrite name to match that within the snapshot node.Name = path.Base(snPath) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 3d50e555f28..4119949110c 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -19,6 +19,7 @@ import ( "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -2125,6 +2126,8 @@ const ( ) func TestMetadataChanged(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeviceIDForHardlinks, true)() + files := TestDir{ "testfile": TestFile{ Content: "foo bar test file", diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 620c9ec35b4..6a9874786b7 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -7,11 +7,13 @@ var Flag = New() const ( ExampleFeature FlagName = "example-feature" DeprecateLegacyIndex FlagName = "deprecate-legacy-index" + DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" ) func init() { Flag.SetFlags(map[FlagName]FlagDesc{ ExampleFeature: {Type: Alpha, Description: "just for testing"}, DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, + DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, }) } From a9b3d86c4f4d90a7b48ee02da6bf328b95ef4332 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 18:44:45 +0100 Subject: [PATCH 078/893] features: remove example feature --- internal/feature/registry.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 6a9874786b7..4693b890976 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -5,14 +5,12 @@ var Flag = New() // flag names are written in kebab-case const ( - ExampleFeature FlagName = "example-feature" DeprecateLegacyIndex FlagName = "deprecate-legacy-index" DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" ) func init() { Flag.SetFlags(map[FlagName]FlagDesc{ - ExampleFeature: {Type: Alpha, Description: "just for testing"}, DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, }) From d705741571530d4a9b1b3ee61dddeaf211aea393 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 19:11:56 +0100 Subject: [PATCH 079/893] backup: test that deviceID is only stored for hardlinks --- internal/archiver/archiver_unix_test.go | 48 +++++++++++++++++++++++++ internal/archiver/testing.go | 26 +++++++++++++- 2 files changed, 73 insertions(+), 1 deletion(-) diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 7523f074983..2552b23e190 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -6,6 +6,12 @@ package archiver import ( "os" "syscall" + "testing" + + "github.com/restic/restic/internal/feature" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" + restictest "github.com/restic/restic/internal/test" ) type wrappedFileInfo struct { @@ -39,3 +45,45 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { return res } + +func statAndSnapshot(t *testing.T, repo restic.Repository, name string) (*restic.Node, *restic.Node) { + fi := lstat(t, name) + want, err := restic.NodeFromFileInfo(name, fi) + restictest.OK(t, err) + + _, node := snapshot(t, repo, fs.Local{}, nil, name) + return want, node +} + +func TestHardlinkMetadata(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeviceIDForHardlinks, true)() + + files := TestDir{ + "testfile": TestFile{ + Content: "foo bar test file", + }, + "linktarget": TestFile{ + Content: "test file", + }, + "testlink": TestHardlink{ + Target: "./linktarget", + }, + "testdir": TestDir{}, + } + + tempdir, repo := prepareTempdirRepoSrc(t, files) + + back := restictest.Chdir(t, tempdir) + defer back() + + want, node := statAndSnapshot(t, repo, "testlink") + restictest.Assert(t, node.DeviceID == want.DeviceID, "device id mismatch expected %v got %v", want.DeviceID, node.DeviceID) + restictest.Assert(t, node.Links == want.Links, "link count mismatch expected %v got %v", want.Links, node.Links) + restictest.Assert(t, node.Inode == want.Inode, "inode mismatch expected %v got %v", want.Inode, node.Inode) + + _, node = statAndSnapshot(t, repo, "testfile") + restictest.Assert(t, node.DeviceID == 0, "device id mismatch for testfile expected %v got %v", 0, node.DeviceID) + + _, node = statAndSnapshot(t, repo, "testdir") + restictest.Assert(t, node.DeviceID == 0, "device id mismatch for testdir expected %v got %v", 0, node.DeviceID) +} diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go index 111c1e68cdc..0bbd03a722b 100644 --- a/internal/archiver/testing.go +++ b/internal/archiver/testing.go @@ -6,6 +6,7 @@ import ( "path" "path/filepath" "runtime" + "sort" "strings" "testing" "time" @@ -63,11 +64,29 @@ func (s TestSymlink) String() string { return "" } +// TestHardlink describes a hardlink created for a test. +type TestHardlink struct { + Target string +} + +func (s TestHardlink) String() string { + return "" +} + // TestCreateFiles creates a directory structure described by dir at target, // which must already exist. On Windows, symlinks aren't created. func TestCreateFiles(t testing.TB, target string, dir TestDir) { t.Helper() - for name, item := range dir { + + // ensure a stable order such that it can be guaranteed that a hardlink target already exists + var names []string + for name := range dir { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + item := dir[name] targetPath := filepath.Join(target, name) switch it := item.(type) { @@ -81,6 +100,11 @@ func TestCreateFiles(t testing.TB, target string, dir TestDir) { if err != nil { t.Fatal(err) } + case TestHardlink: + err := fs.Link(filepath.Join(target, filepath.FromSlash(it.Target)), targetPath) + if err != nil { + t.Fatal(err) + } case TestDir: err := fs.Mkdir(targetPath, 0755) if err != nil { From 21cf38fe96570e073b163a96ff6dedae073ad041 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 19:25:52 +0100 Subject: [PATCH 080/893] add changelog for deviceID only for hardlinks --- changelog/unreleased/pull-4006 | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 changelog/unreleased/pull-4006 diff --git a/changelog/unreleased/pull-4006 b/changelog/unreleased/pull-4006 new file mode 100644 index 00000000000..01f4ddb6e98 --- /dev/null +++ b/changelog/unreleased/pull-4006 @@ -0,0 +1,16 @@ +Enhancement: (alpha) Store deviceID only for hardlinks + +Set `RESTIC_FEATURES=device-id-for-hardlinks` to enable this alpha feature. +The feature flag will be removed after repository format version 3 becomes +available or be replaced with a different solution. + +When creating backups from a filesystem snapshot, for example created using +btrfs subvolumes, the deviceID of the filesystem changes compared to previous +snapshots. This prevented restic from deduplicating the directory metadata of +a snapshot. + +When this alpha feature is enabled, then the deviceID is only stored for +hardlinks. This significantly reduces the metadata duplication for most +backups. + +https://github.com/restic/restic/pull/4006 From cf81f8ced63bee4f1d56c95b1e6557af251e2dcb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 21:28:01 +0100 Subject: [PATCH 081/893] stats: only check for hardlinks for files with more than one link --- changelog/unreleased/pull-4503 | 1 + cmd/restic/cmd_stats.go | 11 +++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/changelog/unreleased/pull-4503 b/changelog/unreleased/pull-4503 index 3ce5c48e862..b52552d6967 100644 --- a/changelog/unreleased/pull-4503 +++ b/changelog/unreleased/pull-4503 @@ -4,4 +4,5 @@ If files on different devices had the same inode id, then the `stats` command did not correctly calculate the snapshot size. This has been fixed. https://github.com/restic/restic/pull/4503 +https://github.com/restic/restic/pull/4006 https://forum.restic.net/t/possible-bug-in-stats/6461/8 diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index d3078a419a4..b84620bab45 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -270,11 +270,14 @@ func statsWalkTree(repo restic.Loader, opts StatsOptions, stats *statsContainer, // will still be restored stats.TotalFileCount++ - // if inodes are present, only count each inode once - // (hard links do not increase restore size) - if !hardLinkIndex.Has(node.Inode, node.DeviceID) || node.Inode == 0 { - hardLinkIndex.Add(node.Inode, node.DeviceID, struct{}{}) + if node.Links == 1 || node.Type == "dir" { stats.TotalSize += node.Size + } else { + // if hardlinks are present only count each deviceID+inode once + if !hardLinkIndex.Has(node.Inode, node.DeviceID) || node.Inode == 0 { + hardLinkIndex.Add(node.Inode, node.DeviceID, struct{}{}) + stats.TotalSize += node.Size + } } } From 118a69a84b5408201142ea06f6e25ae908527598 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 15:19:02 +0100 Subject: [PATCH 082/893] lock: replace lockRepo(Exclusive) with openWith(Read/Write/Exclusive)Lock The new functions much better convey the intent behind the lock request. This allows cleanly integrating noLock (for read) and dryRun (write/exclusive) handling. There are only minor changes to existing behavior with two exceptions: - `tag` no longer accepts the `--no-lock` flag. As it replaces files in the repository, this always requires an exclusive lock. - `debug examine` now returns an error if both `--extract-pack` and `--no-lock` are given. --- cmd/restic/cmd_backup.go | 19 ++------------- cmd/restic/cmd_cat.go | 12 ++-------- cmd/restic/cmd_check.go | 16 ++++--------- cmd/restic/cmd_copy.go | 21 ++++------------- cmd/restic/cmd_debug.go | 28 +++++++--------------- cmd/restic/cmd_diff.go | 12 ++-------- cmd/restic/cmd_dump.go | 12 ++-------- cmd/restic/cmd_find.go | 12 ++-------- cmd/restic/cmd_forget.go | 16 ++++--------- cmd/restic/cmd_key_add.go | 9 ++----- cmd/restic/cmd_key_list.go | 12 ++-------- cmd/restic/cmd_key_passwd.go | 9 ++----- cmd/restic/cmd_key_remove.go | 13 +++------- cmd/restic/cmd_list.go | 12 ++-------- cmd/restic/cmd_migrate.go | 9 ++----- cmd/restic/cmd_mount.go | 12 ++-------- cmd/restic/cmd_prune.go | 9 ++----- cmd/restic/cmd_recover.go | 9 ++----- cmd/restic/cmd_repair_index.go | 9 ++----- cmd/restic/cmd_repair_packs.go | 9 ++----- cmd/restic/cmd_repair_snapshots.go | 15 ++---------- cmd/restic/cmd_restore.go | 12 ++-------- cmd/restic/cmd_rewrite.go | 31 ++++++++++-------------- cmd/restic/cmd_snapshots.go | 12 ++-------- cmd/restic/cmd_stats.go | 12 ++-------- cmd/restic/cmd_tag.go | 16 ++++--------- cmd/restic/lock.go | 38 ++++++++++++++++++++++++++---- cmd/restic/lock_test.go | 18 +++++++------- 28 files changed, 122 insertions(+), 292 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index acc4bddb11e..8b2f1f8083e 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -463,10 +463,11 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter Verbosef("open repository\n") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, opts.DryRun) if err != nil { return err } + defer unlock() var progressPrinter backup.ProgressPrinter if gopts.JSON { @@ -478,22 +479,6 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter calculateProgressInterval(!gopts.Quiet, gopts.JSON)) defer progressReporter.Done() - if opts.DryRun { - repo.SetDryRun() - } - - if !gopts.JSON { - progressPrinter.V("lock repository") - } - if !opts.DryRun { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } - // rejectByNameFuncs collect functions that can reject items from the backup based on path only rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo) if err != nil { diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index 92f58b2e734..ccec9b5d915 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -64,19 +64,11 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error { return err } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() tpe := args[0] diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index cbe388877e8..7bea641ae8e 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -204,20 +204,14 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args return code, nil }) - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - if !gopts.NoLock { Verbosef("create exclusive lock for repository\n") - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } } + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, gopts.NoLock) + if err != nil { + return err + } + defer unlock() chkr := checker.New(repo, opts.CheckUnused) err = chkr.LoadSnapshots(ctx) diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 92922b42b62..410134e41ec 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -62,30 +62,17 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] gopts, secondaryGopts = secondaryGopts, gopts } - srcRepo, err := OpenRepository(ctx, gopts) + ctx, srcRepo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() - dstRepo, err := OpenRepository(ctx, secondaryGopts) - if err != nil { - return err - } - - if !gopts.NoLock { - var srcLock *restic.Lock - srcLock, ctx, err = lockRepo(ctx, srcRepo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(srcLock) - if err != nil { - return err - } - } - - dstLock, ctx, err := lockRepo(ctx, dstRepo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(dstLock) + ctx, dstRepo, unlock, err := openWithAppendLock(ctx, secondaryGopts, false) if err != nil { return err } + defer unlock() srcSnapshotLister, err := restic.MemorizeList(ctx, srcRepo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index a87e7a0c596..3abb9d7eb8a 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -153,19 +153,11 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error return errors.Fatal("type not specified") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() tpe := args[0] @@ -442,10 +434,15 @@ func storePlainBlob(id restic.ID, prefix string, plain []byte) error { } func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamineOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + if opts.ExtractPack && gopts.NoLock { + return fmt.Errorf("--extract-pack and --no-lock are mutually exclusive") + } + + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() ids := make([]restic.ID, 0) for _, name := range args { @@ -464,15 +461,6 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamine return errors.Fatal("no pack files to examine") } - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } - bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) if err != nil { diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 3bd29fa675d..b156191dc59 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -344,19 +344,11 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args [] return errors.Fatalf("specify two snapshot IDs") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() // cache snapshots listing be, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 9178f2abe52..39e915b4091 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -131,19 +131,11 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args [] splittedPath := splitPath(path.Clean(pathToPrint)) - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() sn, subfolder, err := (&restic.SnapshotFilter{ Hosts: opts.Hosts, diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 7ea7c425a7d..e29fe30dc60 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -563,19 +563,11 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args [] return errors.Fatal("cannot have several ID types") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 65ff449a38e..f2fc1da8c58 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -163,23 +163,15 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption return err } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - if gopts.NoLock && !opts.DryRun { return errors.Fatal("--no-lock is only applicable in combination with --dry-run for forget command") } - if !opts.DryRun || !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun && gopts.NoLock) + if err != nil { + return err } + defer unlock() var snapshots restic.Snapshots removeSnIDs := restic.NewIDSet() diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index 43a38f4ebbd..83e0cab7f5d 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -50,16 +50,11 @@ func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, arg return fmt.Errorf("the key add command expects no arguments, only options - please see `restic help key add` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return addKey(ctx, repo, gopts, opts) } diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index 2b3574281b8..9bddb5ed33e 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -40,19 +40,11 @@ func runKeyList(ctx context.Context, gopts GlobalOptions, args []string) error { return fmt.Errorf("the key list command expects no arguments, only options - please see `restic help key list` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() return listKeys(ctx, repo, gopts) } diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index cb916274cc1..70abca6dc20 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -47,16 +47,11 @@ func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOption return fmt.Errorf("the key passwd command expects no arguments, only options - please see `restic help key passwd` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return changePassword(ctx, repo, gopts, opts) } diff --git a/cmd/restic/cmd_key_remove.go b/cmd/restic/cmd_key_remove.go index c8e303ffc80..93babb4f3ec 100644 --- a/cmd/restic/cmd_key_remove.go +++ b/cmd/restic/cmd_key_remove.go @@ -37,20 +37,13 @@ func runKeyRemove(ctx context.Context, gopts GlobalOptions, args []string) error return fmt.Errorf("key remove expects one argument as the key id") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - - idPrefix := args[0] - - return deleteKey(ctx, repo, idPrefix) + return deleteKey(ctx, repo, args[0]) } func deleteKey(ctx context.Context, repo *repository.Repository, idPrefix string) error { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index becad7f0deb..a3df0c98f5d 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -36,19 +36,11 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error { return errors.Fatal("type not specified") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock || args[0] == "locks") if err != nil { return err } - - if !gopts.NoLock && args[0] != "locks" { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() var t restic.FileType switch args[0] { diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index fd2e762c0f4..c3f82b8dd91 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -117,16 +117,11 @@ func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptio } func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() if len(args) == 0 { return checkMigrations(ctx, repo) diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 5fd81b3444f..cb2b1142d5d 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -125,19 +125,11 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args debug.Log("start mount") defer debug.Log("finish mount") - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 1b9352ea7f6..3a9a8c33cea 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -148,10 +148,11 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() if repo.Connections() < 2 { return errors.Fatal("prune requires a backend connection limit of at least two") @@ -169,12 +170,6 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error opts.unsafeRecovery = true } - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet()) } diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index b97a7582b19..f9a4d419d22 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -40,16 +40,11 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { return err } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false) if err != nil { return err } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index ea36f02f609..1ac743348ed 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -56,16 +56,11 @@ func init() { } func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error { - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return rebuildIndex(ctx, opts, gopts, repo) } diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index 521b5859fd4..00dee076b33 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -52,16 +52,11 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T return errors.Fatal("no ids specified") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index cc3d0eb8598..4d9745e1550 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -66,22 +66,11 @@ func init() { } func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun) if err != nil { return err } - - if !opts.DryRun { - var lock *restic.Lock - var err error - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } else { - repo.SetDryRun() - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 58f257541fe..5161be50d68 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -127,19 +127,11 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, debug.Log("restore %v to %v", snapshotIDString, opts.Target) - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() sn, subfolder, err := (&restic.SnapshotFilter{ Hosts: opts.Hosts, diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 62624e75c49..06d4ddbd177 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -256,27 +256,22 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a return errors.Fatal("Nothing to do: no excludes provided and no new metadata provided") } - repo, err := OpenRepository(ctx, gopts) + var ( + repo *repository.Repository + unlock func() + err error + ) + + if opts.Forget { + Verbosef("create exclusive lock for repository\n") + ctx, repo, unlock, err = openWithExclusiveLock(ctx, gopts, opts.DryRun) + } else { + ctx, repo, unlock, err = openWithAppendLock(ctx, gopts, opts.DryRun) + } if err != nil { return err } - - if !opts.DryRun { - var lock *restic.Lock - var err error - if opts.Forget { - Verbosef("create exclusive lock for repository\n") - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - } else { - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - } - defer unlockRepo(lock) - if err != nil { - return err - } - } else { - repo.SetDryRun() - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index d6199d47a27..1a9cd2232b2 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -59,19 +59,11 @@ func init() { } func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() var snapshots restic.Snapshots for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index b84620bab45..20d7a485c88 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -80,19 +80,11 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args return err } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index 01f3ad8afef..b0d139fa673 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -104,20 +104,12 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st return errors.Fatal("--set and --add/--remove cannot be given at the same time") } - repo, err := OpenRepository(ctx, gopts) + Verbosef("create exclusive lock for repository\n") + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { - return err - } - - if !gopts.NoLock { - Verbosef("create exclusive lock for repository\n") - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } + return nil } + defer unlock() changeCnt := 0 for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 600b7476f42..29641e670f9 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -9,6 +9,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) @@ -24,12 +25,41 @@ var globalLocks struct { sync.Once } -func lockRepo(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - return lockRepository(ctx, repo, false, retryLock, json) +func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun bool, exclusive bool) (context.Context, *repository.Repository, func(), error) { + repo, err := OpenRepository(ctx, gopts) + if err != nil { + return nil, nil, nil, err + } + + unlock := func() {} + if !dryRun { + var lock *restic.Lock + lock, ctx, err = lockRepository(ctx, repo, exclusive, gopts.RetryLock, gopts.JSON) + unlock = func() { + unlockRepo(lock) + } + if err != nil { + return nil, nil, nil, err + } + } else { + repo.SetDryRun() + } + + return ctx, repo, unlock, nil +} + +func openWithReadLock(ctx context.Context, gopts GlobalOptions, noLock bool) (context.Context, *repository.Repository, func(), error) { + // TODO enfore read-only operations once the locking code has moved to the repository + return internalOpenWithLocked(ctx, gopts, noLock, false) +} + +func openWithAppendLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { + // TODO enfore non-exclusive operations once the locking code has moved to the repository + return internalOpenWithLocked(ctx, gopts, dryRun, false) } -func lockRepoExclusive(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - return lockRepository(ctx, repo, true, retryLock, json) +func openWithExclusiveLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { + return internalOpenWithLocked(ctx, gopts, dryRun, true) } var ( diff --git a/cmd/restic/lock_test.go b/cmd/restic/lock_test.go index bf22db699d9..83d5f2a5e35 100644 --- a/cmd/restic/lock_test.go +++ b/cmd/restic/lock_test.go @@ -37,7 +37,7 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Reposit } func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) { - lock, wrappedCtx, err := lockRepo(ctx, repo, env.gopts.RetryLock, env.gopts.JSON) + lock, wrappedCtx, err := lockRepository(ctx, repo, false, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) test.OK(t, wrappedCtx.Err()) if lock.Stale() { @@ -94,10 +94,10 @@ func TestLockConflict(t *testing.T) { repo2, err := OpenRepository(context.TODO(), env.gopts) test.OK(t, err) - lock, _, err := lockRepoExclusive(context.Background(), repo, env.gopts.RetryLock, env.gopts.JSON) + lock, _, err := lockRepository(context.Background(), repo, true, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) defer unlockRepo(lock) - _, _, err = lockRepo(context.Background(), repo2, env.gopts.RetryLock, env.gopts.JSON) + _, _, err = lockRepository(context.Background(), repo2, false, env.gopts.RetryLock, env.gopts.JSON) if err == nil { t.Fatal("second lock should have failed") } @@ -260,13 +260,13 @@ func TestLockWaitTimeout(t *testing.T) { repo, cleanup, env := openLockTestRepo(t, nil) defer cleanup() - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) retryLock := 200 * time.Millisecond start := time.Now() - lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON) + lock, _, err := lockRepository(context.TODO(), repo, false, retryLock, env.gopts.JSON) duration := time.Since(start) test.Assert(t, err != nil, @@ -284,7 +284,7 @@ func TestLockWaitCancel(t *testing.T) { repo, cleanup, env := openLockTestRepo(t, nil) defer cleanup() - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) retryLock := 200 * time.Millisecond @@ -294,7 +294,7 @@ func TestLockWaitCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) time.AfterFunc(cancelAfter, cancel) - lock, _, err := lockRepo(ctx, repo, retryLock, env.gopts.JSON) + lock, _, err := lockRepository(ctx, repo, false, retryLock, env.gopts.JSON) duration := time.Since(start) test.Assert(t, err != nil, @@ -312,7 +312,7 @@ func TestLockWaitSuccess(t *testing.T) { repo, cleanup, env := openLockTestRepo(t, nil) defer cleanup() - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) retryLock := 200 * time.Millisecond @@ -322,7 +322,7 @@ func TestLockWaitSuccess(t *testing.T) { test.OK(t, elock.Unlock()) }) - lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON) + lock, _, err := lockRepository(context.TODO(), repo, false, retryLock, env.gopts.JSON) test.OK(t, err) test.OK(t, lock.Unlock()) From cbb5f89252523f8ebe631ee53f326b75f810d6f8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 16:26:29 +0100 Subject: [PATCH 083/893] lock: move code to repository package --- cmd/restic/lock.go | 312 +----------------- internal/repository/lock.go | 301 +++++++++++++++++ .../repository}/lock_test.go | 120 ++++--- 3 files changed, 370 insertions(+), 363 deletions(-) create mode 100644 internal/repository/lock.go rename {cmd/restic => internal/repository}/lock_test.go (70%) diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 29641e670f9..20ac4dd3492 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -2,26 +2,13 @@ package main import ( "context" - "fmt" "sync" - "time" - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) -type lockContext struct { - lock *restic.Lock - cancel context.CancelFunc - refreshWG sync.WaitGroup -} - var globalLocks struct { - locks map[*restic.Lock]*lockContext - sync.Mutex sync.Once } @@ -34,9 +21,20 @@ func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun boo unlock := func() {} if !dryRun { var lock *restic.Lock - lock, ctx, err = lockRepository(ctx, repo, exclusive, gopts.RetryLock, gopts.JSON) + + // make sure that a repository is unlocked properly and after cancel() was + // called by the cleanup handler in global.go + globalLocks.Do(func() { + AddCleanupHandler(repository.UnlockAll) + }) + + lock, ctx, err = repository.Lock(ctx, repo, exclusive, gopts.RetryLock, func(msg string) { + if !gopts.JSON { + Verbosef("%s", msg) + } + }, Warnf) unlock = func() { - unlockRepo(lock) + repository.Unlock(lock) } if err != nil { return nil, nil, nil, err @@ -61,287 +59,3 @@ func openWithAppendLock(ctx context.Context, gopts GlobalOptions, dryRun bool) ( func openWithExclusiveLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { return internalOpenWithLocked(ctx, gopts, dryRun, true) } - -var ( - retrySleepStart = 5 * time.Second - retrySleepMax = 60 * time.Second -) - -func minDuration(a, b time.Duration) time.Duration { - if a <= b { - return a - } - return b -} - -// lockRepository wraps the ctx such that it is cancelled when the repository is unlocked -// cancelling the original context also stops the lock refresh -func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - // make sure that a repository is unlocked properly and after cancel() was - // called by the cleanup handler in global.go - globalLocks.Do(func() { - AddCleanupHandler(unlockAll) - }) - - lockFn := restic.NewLock - if exclusive { - lockFn = restic.NewExclusiveLock - } - - var lock *restic.Lock - var err error - - retrySleep := minDuration(retrySleepStart, retryLock) - retryMessagePrinted := false - retryTimeout := time.After(retryLock) - -retryLoop: - for { - lock, err = lockFn(ctx, repo) - if err != nil && restic.IsAlreadyLocked(err) { - - if !retryMessagePrinted { - if !json { - Verbosef("repo already locked, waiting up to %s for the lock\n", retryLock) - } - retryMessagePrinted = true - } - - debug.Log("repo already locked, retrying in %v", retrySleep) - retrySleepCh := time.After(retrySleep) - - select { - case <-ctx.Done(): - return nil, ctx, ctx.Err() - case <-retryTimeout: - debug.Log("repo already locked, timeout expired") - // Last lock attempt - lock, err = lockFn(ctx, repo) - break retryLoop - case <-retrySleepCh: - retrySleep = minDuration(retrySleep*2, retrySleepMax) - } - } else { - // anything else, either a successful lock or another error - break retryLoop - } - } - if restic.IsInvalidLock(err) { - return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err) - } - if err != nil { - return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err) - } - debug.Log("create lock %p (exclusive %v)", lock, exclusive) - - ctx, cancel := context.WithCancel(ctx) - lockInfo := &lockContext{ - lock: lock, - cancel: cancel, - } - lockInfo.refreshWG.Add(2) - refreshChan := make(chan struct{}) - forceRefreshChan := make(chan refreshLockRequest) - - globalLocks.Lock() - globalLocks.locks[lock] = lockInfo - go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan) - go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan) - globalLocks.Unlock() - - return lock, ctx, err -} - -var refreshInterval = 5 * time.Minute - -// consider a lock refresh failed a bit before the lock actually becomes stale -// the difference allows to compensate for a small time drift between clients. -var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2 - -type refreshLockRequest struct { - result chan bool -} - -func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest) { - debug.Log("start") - lock := lockInfo.lock - ticker := time.NewTicker(refreshInterval) - lastRefresh := lock.Time - - defer func() { - ticker.Stop() - // ensure that the context was cancelled before removing the lock - lockInfo.cancel() - - // remove the lock from the repo - debug.Log("unlocking repository with lock %v", lock) - if err := lock.Unlock(); err != nil { - debug.Log("error while unlocking: %v", err) - Warnf("error while unlocking: %v", err) - } - - lockInfo.refreshWG.Done() - }() - - for { - select { - case <-ctx.Done(): - debug.Log("terminate") - return - - case req := <-forceRefresh: - debug.Log("trying to refresh stale lock") - // keep on going if our current lock still exists - success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel) - // inform refresh goroutine about forced refresh - select { - case <-ctx.Done(): - case req.result <- success: - } - - if success { - // update lock refresh time - lastRefresh = lock.Time - } - - case <-ticker.C: - if time.Since(lastRefresh) > refreshabilityTimeout { - // the lock is too old, wait until the expiry monitor cancels the context - continue - } - - debug.Log("refreshing locks") - err := lock.Refresh(context.TODO()) - if err != nil { - Warnf("unable to refresh lock: %v\n", err) - } else { - lastRefresh = lock.Time - // inform monitor goroutine about successful refresh - select { - case <-ctx.Done(): - case refreshed <- struct{}{}: - } - } - } - } -} - -func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest) { - // time.Now() might use a monotonic timer which is paused during standby - // convert to unix time to ensure we compare real time values - lastRefresh := time.Now().UnixNano() - pollDuration := 1 * time.Second - if refreshInterval < pollDuration { - // require for TestLockFailedRefresh - pollDuration = refreshInterval / 5 - } - // timers are paused during standby, which is a problem as the refresh timeout - // _must_ expire if the host was too long in standby. Thus fall back to periodic checks - // https://github.com/golang/go/issues/35012 - ticker := time.NewTicker(pollDuration) - defer func() { - ticker.Stop() - lockInfo.cancel() - lockInfo.refreshWG.Done() - }() - - var refreshStaleLockResult chan bool - - for { - select { - case <-ctx.Done(): - debug.Log("terminate expiry monitoring") - return - case <-refreshed: - if refreshStaleLockResult != nil { - // ignore delayed refresh notifications while the stale lock is refreshed - continue - } - lastRefresh = time.Now().UnixNano() - case <-ticker.C: - if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { - continue - } - - debug.Log("trying to refreshStaleLock") - // keep on going if our current lock still exists - refreshReq := refreshLockRequest{ - result: make(chan bool), - } - refreshStaleLockResult = refreshReq.result - - // inform refresh goroutine about forced refresh - select { - case <-ctx.Done(): - case forceRefresh <- refreshReq: - } - case success := <-refreshStaleLockResult: - if success { - lastRefresh = time.Now().UnixNano() - refreshStaleLockResult = nil - continue - } - - Warnf("Fatal: failed to refresh lock in time\n") - return - } - } -} - -func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc) bool { - freeze := backend.AsBackend[backend.FreezeBackend](be) - if freeze != nil { - debug.Log("freezing backend") - freeze.Freeze() - defer freeze.Unfreeze() - } - - err := lock.RefreshStaleLock(ctx) - if err != nil { - Warnf("failed to refresh stale lock: %v\n", err) - // cancel context while the backend is still frozen to prevent accidental modifications - cancel() - return false - } - - return true -} - -func unlockRepo(lock *restic.Lock) { - if lock == nil { - return - } - - globalLocks.Lock() - lockInfo, exists := globalLocks.locks[lock] - delete(globalLocks.locks, lock) - globalLocks.Unlock() - - if !exists { - debug.Log("unable to find lock %v in the global list of locks, ignoring", lock) - return - } - lockInfo.cancel() - lockInfo.refreshWG.Wait() -} - -func unlockAll(code int) (int, error) { - globalLocks.Lock() - locks := globalLocks.locks - debug.Log("unlocking %d locks", len(globalLocks.locks)) - for _, lockInfo := range globalLocks.locks { - lockInfo.cancel() - } - globalLocks.locks = make(map[*restic.Lock]*lockContext) - globalLocks.Unlock() - - for _, lockInfo := range locks { - lockInfo.refreshWG.Wait() - } - - return code, nil -} - -func init() { - globalLocks.locks = make(map[*restic.Lock]*lockContext) -} diff --git a/internal/repository/lock.go b/internal/repository/lock.go new file mode 100644 index 00000000000..c64cb92220d --- /dev/null +++ b/internal/repository/lock.go @@ -0,0 +1,301 @@ +package repository + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +type lockContext struct { + lock *restic.Lock + cancel context.CancelFunc + refreshWG sync.WaitGroup +} + +var globalLocks struct { + locks map[*restic.Lock]*lockContext + sync.Mutex +} + +var ( + retrySleepStart = 5 * time.Second + retrySleepMax = 60 * time.Second +) + +func minDuration(a, b time.Duration) time.Duration { + if a <= b { + return a + } + return b +} + +// Lock wraps the ctx such that it is cancelled when the repository is unlocked +// cancelling the original context also stops the lock refresh +func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*restic.Lock, context.Context, error) { + + lockFn := restic.NewLock + if exclusive { + lockFn = restic.NewExclusiveLock + } + + var lock *restic.Lock + var err error + + retrySleep := minDuration(retrySleepStart, retryLock) + retryMessagePrinted := false + retryTimeout := time.After(retryLock) + +retryLoop: + for { + lock, err = lockFn(ctx, repo) + if err != nil && restic.IsAlreadyLocked(err) { + + if !retryMessagePrinted { + printRetry(fmt.Sprintf("repo already locked, waiting up to %s for the lock\n", retryLock)) + retryMessagePrinted = true + } + + debug.Log("repo already locked, retrying in %v", retrySleep) + retrySleepCh := time.After(retrySleep) + + select { + case <-ctx.Done(): + return nil, ctx, ctx.Err() + case <-retryTimeout: + debug.Log("repo already locked, timeout expired") + // Last lock attempt + lock, err = lockFn(ctx, repo) + break retryLoop + case <-retrySleepCh: + retrySleep = minDuration(retrySleep*2, retrySleepMax) + } + } else { + // anything else, either a successful lock or another error + break retryLoop + } + } + if restic.IsInvalidLock(err) { + return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err) + } + if err != nil { + return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err) + } + debug.Log("create lock %p (exclusive %v)", lock, exclusive) + + ctx, cancel := context.WithCancel(ctx) + lockInfo := &lockContext{ + lock: lock, + cancel: cancel, + } + lockInfo.refreshWG.Add(2) + refreshChan := make(chan struct{}) + forceRefreshChan := make(chan refreshLockRequest) + + globalLocks.Lock() + globalLocks.locks[lock] = lockInfo + go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) + go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) + globalLocks.Unlock() + + return lock, ctx, err +} + +var refreshInterval = 5 * time.Minute + +// consider a lock refresh failed a bit before the lock actually becomes stale +// the difference allows to compensate for a small time drift between clients. +var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2 + +type refreshLockRequest struct { + result chan bool +} + +func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) { + debug.Log("start") + lock := lockInfo.lock + ticker := time.NewTicker(refreshInterval) + lastRefresh := lock.Time + + defer func() { + ticker.Stop() + // ensure that the context was cancelled before removing the lock + lockInfo.cancel() + + // remove the lock from the repo + debug.Log("unlocking repository with lock %v", lock) + if err := lock.Unlock(); err != nil { + debug.Log("error while unlocking: %v", err) + logger("error while unlocking: %v", err) + } + + lockInfo.refreshWG.Done() + }() + + for { + select { + case <-ctx.Done(): + debug.Log("terminate") + return + + case req := <-forceRefresh: + debug.Log("trying to refresh stale lock") + // keep on going if our current lock still exists + success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel, logger) + // inform refresh goroutine about forced refresh + select { + case <-ctx.Done(): + case req.result <- success: + } + + if success { + // update lock refresh time + lastRefresh = lock.Time + } + + case <-ticker.C: + if time.Since(lastRefresh) > refreshabilityTimeout { + // the lock is too old, wait until the expiry monitor cancels the context + continue + } + + debug.Log("refreshing locks") + err := lock.Refresh(context.TODO()) + if err != nil { + logger("unable to refresh lock: %v\n", err) + } else { + lastRefresh = lock.Time + // inform monitor goroutine about successful refresh + select { + case <-ctx.Done(): + case refreshed <- struct{}{}: + } + } + } + } +} + +func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) { + // time.Now() might use a monotonic timer which is paused during standby + // convert to unix time to ensure we compare real time values + lastRefresh := time.Now().UnixNano() + pollDuration := 1 * time.Second + if refreshInterval < pollDuration { + // required for TestLockFailedRefresh + pollDuration = refreshInterval / 5 + } + // timers are paused during standby, which is a problem as the refresh timeout + // _must_ expire if the host was too long in standby. Thus fall back to periodic checks + // https://github.com/golang/go/issues/35012 + ticker := time.NewTicker(pollDuration) + defer func() { + ticker.Stop() + lockInfo.cancel() + lockInfo.refreshWG.Done() + }() + + var refreshStaleLockResult chan bool + + for { + select { + case <-ctx.Done(): + debug.Log("terminate expiry monitoring") + return + case <-refreshed: + if refreshStaleLockResult != nil { + // ignore delayed refresh notifications while the stale lock is refreshed + continue + } + lastRefresh = time.Now().UnixNano() + case <-ticker.C: + if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { + continue + } + + debug.Log("trying to refreshStaleLock") + // keep on going if our current lock still exists + refreshReq := refreshLockRequest{ + result: make(chan bool), + } + refreshStaleLockResult = refreshReq.result + + // inform refresh goroutine about forced refresh + select { + case <-ctx.Done(): + case forceRefresh <- refreshReq: + } + case success := <-refreshStaleLockResult: + if success { + lastRefresh = time.Now().UnixNano() + refreshStaleLockResult = nil + continue + } + + logger("Fatal: failed to refresh lock in time\n") + return + } + } +} + +func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc, logger func(format string, args ...interface{})) bool { + freeze := backend.AsBackend[backend.FreezeBackend](be) + if freeze != nil { + debug.Log("freezing backend") + freeze.Freeze() + defer freeze.Unfreeze() + } + + err := lock.RefreshStaleLock(ctx) + if err != nil { + logger("failed to refresh stale lock: %v\n", err) + // cancel context while the backend is still frozen to prevent accidental modifications + cancel() + return false + } + + return true +} + +func Unlock(lock *restic.Lock) { + if lock == nil { + return + } + + globalLocks.Lock() + lockInfo, exists := globalLocks.locks[lock] + delete(globalLocks.locks, lock) + globalLocks.Unlock() + + if !exists { + debug.Log("unable to find lock %v in the global list of locks, ignoring", lock) + return + } + lockInfo.cancel() + lockInfo.refreshWG.Wait() +} + +func UnlockAll(code int) (int, error) { + globalLocks.Lock() + locks := globalLocks.locks + debug.Log("unlocking %d locks", len(globalLocks.locks)) + for _, lockInfo := range globalLocks.locks { + lockInfo.cancel() + } + globalLocks.locks = make(map[*restic.Lock]*lockContext) + globalLocks.Unlock() + + for _, lockInfo := range locks { + lockInfo.refreshWG.Wait() + } + + return code, nil +} + +func init() { + globalLocks.locks = make(map[*restic.Lock]*lockContext) +} diff --git a/cmd/restic/lock_test.go b/internal/repository/lock_test.go similarity index 70% rename from cmd/restic/lock_test.go rename to internal/repository/lock_test.go index 83d5f2a5e35..fb48a566f16 100644 --- a/cmd/restic/lock_test.go +++ b/internal/repository/lock_test.go @@ -1,4 +1,4 @@ -package main +package repository import ( "context" @@ -10,34 +10,35 @@ import ( "time" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) -func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository, func(), *testEnvironment) { - env, cleanup := withTestEnvironment(t) +type backendWrapper func(r backend.Backend) (backend.Backend, error) - reg := location.NewRegistry() - reg.Register(mem.NewFactory()) - env.gopts.backends = reg - env.gopts.Repo = "mem:" +func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { + be := backend.Backend(mem.New()) + // initialize repo + TestRepositoryWithBackend(t, be, 0, Options{}) + // reopen repository to allow injecting a backend wrapper if wrapper != nil { - env.gopts.backendTestHook = wrapper + var err error + be, err = wrapper(be) + rtest.OK(t, err) } - testRunInit(t, env.gopts) - repo, err := OpenRepository(context.TODO(), env.gopts) - test.OK(t, err) - return repo, cleanup, env + repo, err := New(be, Options{}) + rtest.OK(t, err) + rtest.OK(t, repo.SearchKey(context.TODO(), test.TestPassword, 1, "")) + return repo } -func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) { - lock, wrappedCtx, err := lockRepository(ctx, repo, false, env.gopts.RetryLock, env.gopts.JSON) +func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, retryLock time.Duration) (*restic.Lock, context.Context) { + lock, wrappedCtx, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, wrappedCtx.Err()) if lock.Stale() { @@ -47,57 +48,54 @@ func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, } func TestLock(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - unlockRepo(lock) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + Unlock(lock) if wrappedCtx.Err() == nil { t.Fatal("unlock did not cancel context") } } func TestLockCancel(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - lock, wrappedCtx := checkedLockRepo(ctx, t, repo, env) + lock, wrappedCtx := checkedLockRepo(ctx, t, repo, 0) cancel() if wrappedCtx.Err() == nil { t.Fatal("canceled parent context did not cancel context") } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } func TestLockUnlockAll(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - _, err := unlockAll(0) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + _, err := UnlockAll(0) test.OK(t, err) if wrappedCtx.Err() == nil { t.Fatal("canceled parent context did not cancel context") } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } func TestLockConflict(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() - repo2, err := OpenRepository(context.TODO(), env.gopts) + repo := openLockTestRepo(t, nil) + repo2, err := New(repo.Backend(), Options{}) test.OK(t, err) + test.OK(t, repo2.SearchKey(context.TODO(), test.TestPassword, 1, "")) - lock, _, err := lockRepository(context.Background(), repo, true, env.gopts.RetryLock, env.gopts.JSON) + lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) - defer unlockRepo(lock) - _, _, err = lockRepository(context.Background(), repo2, false, env.gopts.RetryLock, env.gopts.JSON) + defer Unlock(lock) + _, _, err = Lock(context.Background(), repo2, false, 0, func(msg string) {}, func(format string, args ...interface{}) {}) if err == nil { t.Fatal("second lock should have failed") } @@ -118,10 +116,9 @@ func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backen } func TestLockFailedRefresh(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &writeOnceBackend{Backend: r}, nil }) - defer cleanup() // reduce locking intervals to be suitable for testing ri, rt := refreshInterval, refreshabilityTimeout @@ -131,7 +128,7 @@ func TestLockFailedRefresh(t *testing.T) { refreshInterval, refreshabilityTimeout = ri, rt }() - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) select { case <-wrappedCtx.Done(): @@ -139,8 +136,8 @@ func TestLockFailedRefresh(t *testing.T) { case <-time.After(time.Second): t.Fatal("failed lock refresh did not cause context cancellation") } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } type loggingBackend struct { @@ -156,13 +153,12 @@ func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend. } func TestLockSuccessfulRefresh(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &loggingBackend{ Backend: r, t: t, }, nil }) - defer cleanup() t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing @@ -173,7 +169,7 @@ func TestLockSuccessfulRefresh(t *testing.T) { refreshInterval, refreshabilityTimeout = ri, rt }() - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) select { case <-wrappedCtx.Done(): @@ -189,8 +185,8 @@ func TestLockSuccessfulRefresh(t *testing.T) { case <-time.After(2 * refreshabilityTimeout): // expected lock refresh to work } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } type slowBackend struct { @@ -209,11 +205,10 @@ func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.Rew func TestLockSuccessfulStaleRefresh(t *testing.T) { var sb *slowBackend - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { sb = &slowBackend{Backend: r} return sb, nil }) - defer cleanup() t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing @@ -224,7 +219,7 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { refreshInterval, refreshabilityTimeout = ri, rt }() - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) // delay lock refreshing long enough that the lock would expire sb.m.Lock() sb.sleep = refreshabilityTimeout + refreshInterval @@ -252,21 +247,20 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { // expected lock refresh to work } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } func TestLockWaitTimeout(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) retryLock := 200 * time.Millisecond start := time.Now() - lock, _, err := lockRepository(context.TODO(), repo, false, retryLock, env.gopts.JSON) + lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -281,10 +275,9 @@ func TestLockWaitTimeout(t *testing.T) { } func TestLockWaitCancel(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) retryLock := 200 * time.Millisecond @@ -294,7 +287,7 @@ func TestLockWaitCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) time.AfterFunc(cancelAfter, cancel) - lock, _, err := lockRepository(ctx, repo, false, retryLock, env.gopts.JSON) + lock, _, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -309,10 +302,9 @@ func TestLockWaitCancel(t *testing.T) { } func TestLockWaitSuccess(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) retryLock := 200 * time.Millisecond @@ -322,7 +314,7 @@ func TestLockWaitSuccess(t *testing.T) { test.OK(t, elock.Unlock()) }) - lock, _, err := lockRepository(context.TODO(), repo, false, retryLock, env.gopts.JSON) + lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, lock.Unlock()) From e8df50fa3c0e5e4ab69e5f21aeedbd9ba0c36dee Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 16:45:57 +0100 Subject: [PATCH 084/893] repository: remove global list of locks --- cmd/restic/lock.go | 26 ++++++---------- internal/repository/lock.go | 52 +++++--------------------------- internal/repository/lock_test.go | 50 +++++++++++------------------- 3 files changed, 34 insertions(+), 94 deletions(-) diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 20ac4dd3492..69d433df172 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -2,16 +2,10 @@ package main import ( "context" - "sync" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" ) -var globalLocks struct { - sync.Once -} - func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun bool, exclusive bool) (context.Context, *repository.Repository, func(), error) { repo, err := OpenRepository(ctx, gopts) if err != nil { @@ -20,22 +14,22 @@ func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun boo unlock := func() {} if !dryRun { - var lock *restic.Lock - - // make sure that a repository is unlocked properly and after cancel() was - // called by the cleanup handler in global.go - globalLocks.Do(func() { - AddCleanupHandler(repository.UnlockAll) - }) + var lock *repository.Unlocker lock, ctx, err = repository.Lock(ctx, repo, exclusive, gopts.RetryLock, func(msg string) { if !gopts.JSON { Verbosef("%s", msg) } }, Warnf) - unlock = func() { - repository.Unlock(lock) - } + + unlock = lock.Unlock + // make sure that a repository is unlocked properly and after cancel() was + // called by the cleanup handler in global.go + AddCleanupHandler(func(code int) (int, error) { + lock.Unlock() + return code, nil + }) + if err != nil { return nil, nil, nil, err } diff --git a/internal/repository/lock.go b/internal/repository/lock.go index c64cb92220d..e3360cac06a 100644 --- a/internal/repository/lock.go +++ b/internal/repository/lock.go @@ -18,11 +18,6 @@ type lockContext struct { refreshWG sync.WaitGroup } -var globalLocks struct { - locks map[*restic.Lock]*lockContext - sync.Mutex -} - var ( retrySleepStart = 5 * time.Second retrySleepMax = 60 * time.Second @@ -37,7 +32,7 @@ func minDuration(a, b time.Duration) time.Duration { // Lock wraps the ctx such that it is cancelled when the repository is unlocked // cancelling the original context also stops the lock refresh -func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*restic.Lock, context.Context, error) { +func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { lockFn := restic.NewLock if exclusive { @@ -97,13 +92,10 @@ retryLoop: refreshChan := make(chan struct{}) forceRefreshChan := make(chan refreshLockRequest) - globalLocks.Lock() - globalLocks.locks[lock] = lockInfo go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) - globalLocks.Unlock() - return lock, ctx, err + return &Unlocker{lockInfo}, ctx, nil } var refreshInterval = 5 * time.Minute @@ -261,41 +253,11 @@ func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.L return true } -func Unlock(lock *restic.Lock) { - if lock == nil { - return - } - - globalLocks.Lock() - lockInfo, exists := globalLocks.locks[lock] - delete(globalLocks.locks, lock) - globalLocks.Unlock() - - if !exists { - debug.Log("unable to find lock %v in the global list of locks, ignoring", lock) - return - } - lockInfo.cancel() - lockInfo.refreshWG.Wait() -} - -func UnlockAll(code int) (int, error) { - globalLocks.Lock() - locks := globalLocks.locks - debug.Log("unlocking %d locks", len(globalLocks.locks)) - for _, lockInfo := range globalLocks.locks { - lockInfo.cancel() - } - globalLocks.locks = make(map[*restic.Lock]*lockContext) - globalLocks.Unlock() - - for _, lockInfo := range locks { - lockInfo.refreshWG.Wait() - } - - return code, nil +type Unlocker struct { + info *lockContext } -func init() { - globalLocks.locks = make(map[*restic.Lock]*lockContext) +func (l *Unlocker) Unlock() { + l.info.cancel() + l.info.refreshWG.Wait() } diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index fb48a566f16..2975ed7ffd2 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -37,11 +37,11 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { return repo } -func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, retryLock time.Duration) (*restic.Lock, context.Context) { +func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, retryLock time.Duration) (*Unlocker, context.Context) { lock, wrappedCtx, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, wrappedCtx.Err()) - if lock.Stale() { + if lock.info.lock.Stale() { t.Fatal("lock returned stale lock") } return lock, wrappedCtx @@ -51,7 +51,7 @@ func TestLock(t *testing.T) { repo := openLockTestRepo(t, nil) lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) - Unlock(lock) + lock.Unlock() if wrappedCtx.Err() == nil { t.Fatal("unlock did not cancel context") } @@ -69,21 +69,7 @@ func TestLockCancel(t *testing.T) { } // Unlock should not crash - Unlock(lock) -} - -func TestLockUnlockAll(t *testing.T) { - repo := openLockTestRepo(t, nil) - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) - _, err := UnlockAll(0) - test.OK(t, err) - if wrappedCtx.Err() == nil { - t.Fatal("canceled parent context did not cancel context") - } - - // Unlock should not crash - Unlock(lock) + lock.Unlock() } func TestLockConflict(t *testing.T) { @@ -94,7 +80,7 @@ func TestLockConflict(t *testing.T) { lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) - defer Unlock(lock) + defer lock.Unlock() _, _, err = Lock(context.Background(), repo2, false, 0, func(msg string) {}, func(format string, args ...interface{}) {}) if err == nil { t.Fatal("second lock should have failed") @@ -137,7 +123,7 @@ func TestLockFailedRefresh(t *testing.T) { t.Fatal("failed lock refresh did not cause context cancellation") } // Unlock should not crash - Unlock(lock) + lock.Unlock() } type loggingBackend struct { @@ -186,7 +172,7 @@ func TestLockSuccessfulRefresh(t *testing.T) { // expected lock refresh to work } // Unlock should not crash - Unlock(lock) + lock.Unlock() } type slowBackend struct { @@ -248,19 +234,21 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { } // Unlock should not crash - Unlock(lock) + lock.Unlock() } func TestLockWaitTimeout(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) + defer elock.Unlock() retryLock := 200 * time.Millisecond start := time.Now() - lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) + _, _, err = Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -269,16 +257,15 @@ func TestLockWaitTimeout(t *testing.T) { "create normal lock with exclusively locked repo didn't return the correct error") test.Assert(t, retryLock <= duration && duration < retryLock*3/2, "create normal lock with exclusively locked repo didn't wait for the specified timeout") - - test.OK(t, lock.Unlock()) - test.OK(t, elock.Unlock()) } func TestLockWaitCancel(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) + defer elock.Unlock() retryLock := 200 * time.Millisecond cancelAfter := 40 * time.Millisecond @@ -287,7 +274,7 @@ func TestLockWaitCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) time.AfterFunc(cancelAfter, cancel) - lock, _, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) + _, _, err = Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -296,12 +283,10 @@ func TestLockWaitCancel(t *testing.T) { "create normal lock with exclusively locked repo didn't return the correct error") test.Assert(t, cancelAfter <= duration && duration < retryLock-10*time.Millisecond, "create normal lock with exclusively locked repo didn't return in time, duration %v", duration) - - test.OK(t, lock.Unlock()) - test.OK(t, elock.Unlock()) } func TestLockWaitSuccess(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) @@ -311,11 +296,10 @@ func TestLockWaitSuccess(t *testing.T) { unlockAfter := 40 * time.Millisecond time.AfterFunc(unlockAfter, func() { - test.OK(t, elock.Unlock()) + elock.Unlock() }) lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) - - test.OK(t, lock.Unlock()) + lock.Unlock() } From 044e8bf82157abcf9623465db608a1994c0e9dac Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 17:07:14 +0100 Subject: [PATCH 085/893] repository: parallelize lock tests --- internal/repository/lock.go | 63 +++++++++++++++++------------ internal/repository/lock_test.go | 68 +++++++++++++++++--------------- 2 files changed, 73 insertions(+), 58 deletions(-) diff --git a/internal/repository/lock.go b/internal/repository/lock.go index e3360cac06a..fd8214cd1d0 100644 --- a/internal/repository/lock.go +++ b/internal/repository/lock.go @@ -18,21 +18,31 @@ type lockContext struct { refreshWG sync.WaitGroup } -var ( - retrySleepStart = 5 * time.Second - retrySleepMax = 60 * time.Second -) +type locker struct { + retrySleepStart time.Duration + retrySleepMax time.Duration + refreshInterval time.Duration + refreshabilityTimeout time.Duration +} -func minDuration(a, b time.Duration) time.Duration { - if a <= b { - return a - } - return b +const defaultRefreshInterval = 5 * time.Minute + +var lockerInst = &locker{ + retrySleepStart: 5 * time.Second, + retrySleepMax: 60 * time.Second, + refreshInterval: defaultRefreshInterval, + // consider a lock refresh failed a bit before the lock actually becomes stale + // the difference allows to compensate for a small time drift between clients. + refreshabilityTimeout: restic.StaleLockTimeout - defaultRefreshInterval*3/2, +} + +func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { + return lockerInst.Lock(ctx, repo, exclusive, retryLock, printRetry, logger) } // Lock wraps the ctx such that it is cancelled when the repository is unlocked // cancelling the original context also stops the lock refresh -func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { +func (l *locker) Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { lockFn := restic.NewLock if exclusive { @@ -42,7 +52,7 @@ func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock var lock *restic.Lock var err error - retrySleep := minDuration(retrySleepStart, retryLock) + retrySleep := minDuration(l.retrySleepStart, retryLock) retryMessagePrinted := false retryTimeout := time.After(retryLock) @@ -68,7 +78,7 @@ retryLoop: lock, err = lockFn(ctx, repo) break retryLoop case <-retrySleepCh: - retrySleep = minDuration(retrySleep*2, retrySleepMax) + retrySleep = minDuration(retrySleep*2, l.retrySleepMax) } } else { // anything else, either a successful lock or another error @@ -92,26 +102,27 @@ retryLoop: refreshChan := make(chan struct{}) forceRefreshChan := make(chan refreshLockRequest) - go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) - go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) + go l.refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) + go l.monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) return &Unlocker{lockInfo}, ctx, nil } -var refreshInterval = 5 * time.Minute - -// consider a lock refresh failed a bit before the lock actually becomes stale -// the difference allows to compensate for a small time drift between clients. -var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2 +func minDuration(a, b time.Duration) time.Duration { + if a <= b { + return a + } + return b +} type refreshLockRequest struct { result chan bool } -func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) { +func (l *locker) refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) { debug.Log("start") lock := lockInfo.lock - ticker := time.NewTicker(refreshInterval) + ticker := time.NewTicker(l.refreshInterval) lastRefresh := lock.Time defer func() { @@ -151,7 +162,7 @@ func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockCo } case <-ticker.C: - if time.Since(lastRefresh) > refreshabilityTimeout { + if time.Since(lastRefresh) > l.refreshabilityTimeout { // the lock is too old, wait until the expiry monitor cancels the context continue } @@ -172,14 +183,14 @@ func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockCo } } -func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) { +func (l *locker) monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) { // time.Now() might use a monotonic timer which is paused during standby // convert to unix time to ensure we compare real time values lastRefresh := time.Now().UnixNano() pollDuration := 1 * time.Second - if refreshInterval < pollDuration { + if l.refreshInterval < pollDuration { // required for TestLockFailedRefresh - pollDuration = refreshInterval / 5 + pollDuration = l.refreshInterval / 5 } // timers are paused during standby, which is a problem as the refresh timeout // _must_ expire if the host was too long in standby. Thus fall back to periodic checks @@ -205,7 +216,7 @@ func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <- } lastRefresh = time.Now().UnixNano() case <-ticker.C: - if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { + if time.Now().UnixNano()-lastRefresh < l.refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { continue } diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index 2975ed7ffd2..360ee2b23d0 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -37,8 +37,8 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { return repo } -func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, retryLock time.Duration) (*Unlocker, context.Context) { - lock, wrappedCtx, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) +func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) { + lock, wrappedCtx, err := lockerInst.Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, wrappedCtx.Err()) if lock.info.lock.Stale() { @@ -48,9 +48,10 @@ func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, } func TestLock(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, lockerInst, 0) lock.Unlock() if wrappedCtx.Err() == nil { t.Fatal("unlock did not cancel context") @@ -58,11 +59,12 @@ func TestLock(t *testing.T) { } func TestLockCancel(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - lock, wrappedCtx := checkedLockRepo(ctx, t, repo, 0) + lock, wrappedCtx := checkedLockRepo(ctx, t, repo, lockerInst, 0) cancel() if wrappedCtx.Err() == nil { t.Fatal("canceled parent context did not cancel context") @@ -73,6 +75,7 @@ func TestLockCancel(t *testing.T) { } func TestLockConflict(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) repo2, err := New(repo.Backend(), Options{}) test.OK(t, err) @@ -102,19 +105,19 @@ func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backen } func TestLockFailedRefresh(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &writeOnceBackend{Backend: r}, nil }) // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 20 * time.Millisecond - refreshabilityTimeout = 100 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 20 * time.Millisecond, + refreshabilityTimeout: 100 * time.Millisecond, + } + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) select { case <-wrappedCtx.Done(): @@ -139,6 +142,7 @@ func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend. } func TestLockSuccessfulRefresh(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &loggingBackend{ Backend: r, @@ -148,14 +152,13 @@ func TestLockSuccessfulRefresh(t *testing.T) { t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 60 * time.Millisecond - refreshabilityTimeout = 500 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 60 * time.Millisecond, + refreshabilityTimeout: 500 * time.Millisecond, + } + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) select { case <-wrappedCtx.Done(): @@ -168,7 +171,7 @@ func TestLockSuccessfulRefresh(t *testing.T) { buf = buf[:n] t.Log(string(buf)) - case <-time.After(2 * refreshabilityTimeout): + case <-time.After(2 * li.refreshabilityTimeout): // expected lock refresh to work } // Unlock should not crash @@ -190,6 +193,7 @@ func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.Rew } func TestLockSuccessfulStaleRefresh(t *testing.T) { + t.Parallel() var sb *slowBackend repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { sb = &slowBackend{Backend: r} @@ -198,17 +202,17 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 10 * time.Millisecond - refreshabilityTimeout = 50 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 10 * time.Millisecond, + refreshabilityTimeout: 50 * time.Millisecond, + } + + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) // delay lock refreshing long enough that the lock would expire sb.m.Lock() - sb.sleep = refreshabilityTimeout + refreshInterval + sb.sleep = li.refreshabilityTimeout + li.refreshInterval sb.m.Unlock() select { @@ -216,7 +220,7 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { // don't call t.Fatal to allow the lock to be properly cleaned up t.Error("lock refresh failed", time.Now()) - case <-time.After(refreshabilityTimeout): + case <-time.After(li.refreshabilityTimeout): } // reset slow backend sb.m.Lock() @@ -229,7 +233,7 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { // don't call t.Fatal to allow the lock to be properly cleaned up t.Error("lock refresh failed", time.Now()) - case <-time.After(3 * refreshabilityTimeout): + case <-time.After(3 * li.refreshabilityTimeout): // expected lock refresh to work } From 3ba1fa3cee58aabaee0550b19d2bcb9a103f82d0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 17:20:10 +0100 Subject: [PATCH 086/893] repository: remove a few global variables --- internal/repository/key.go | 2 +- internal/repository/testing.go | 13 +++++-------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/internal/repository/key.go b/internal/repository/key.go index d9f8d8e1781..4d597da4deb 100644 --- a/internal/repository/key.go +++ b/internal/repository/key.go @@ -47,7 +47,7 @@ type Key struct { // calibrated on the first run of AddKey(). var Params *crypto.Params -var ( +const ( // KDFTimeout specifies the maximum runtime for the KDF. KDFTimeout = 500 * time.Millisecond diff --git a/internal/repository/testing.go b/internal/repository/testing.go index dbbdbeb07b1..faa40c70a77 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -17,13 +17,6 @@ import ( "github.com/restic/chunker" ) -// testKDFParams are the parameters for the KDF to be used during testing. -var testKDFParams = crypto.Params{ - N: 128, - R: 1, - P: 1, -} - type logger interface { Logf(format string, args ...interface{}) } @@ -31,7 +24,11 @@ type logger interface { // TestUseLowSecurityKDFParameters configures low-security KDF parameters for testing. func TestUseLowSecurityKDFParameters(t logger) { t.Logf("using low-security KDF parameters for test") - Params = &testKDFParams + Params = &crypto.Params{ + N: 128, + R: 1, + P: 1, + } } // TestBackend returns a fully configured in-memory backend. From dc441c57a76874bc503b71b9b2946b0af31a7934 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 21:45:24 +0100 Subject: [PATCH 087/893] repository: unify repository initialization in tests Tests should use a helper from internal/repository/testing.go to construct a Repository object. --- internal/checker/checker_test.go | 36 +++++++------------------- internal/index/index_parallel_test.go | 4 +-- internal/repository/lock_test.go | 9 ++----- internal/repository/repository_test.go | 11 +++----- internal/repository/testing.go | 13 +++++++++- 5 files changed, 27 insertions(+), 46 deletions(-) diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index cca5a582cdd..b0fa4e3e386 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -72,11 +72,9 @@ func assertOnlyMixedPackHints(t *testing.T, hints []error) { } func TestCheckRepo(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) if len(errs) > 0 { @@ -92,11 +90,9 @@ func TestCheckRepo(t *testing.T) { } func TestMissingPack(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - packHandle := backend.Handle{ Type: restic.PackFile, Name: "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6", @@ -123,11 +119,9 @@ func TestMissingPack(t *testing.T) { } func TestUnreferencedPack(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - // index 3f1a only references pack 60e0 packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" indexHandle := backend.Handle{ @@ -156,11 +150,9 @@ func TestUnreferencedPack(t *testing.T) { } func TestUnreferencedBlobs(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - snapshotHandle := backend.Handle{ Type: restic.SnapshotFile, Name: "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02", @@ -195,11 +187,9 @@ func TestUnreferencedBlobs(t *testing.T) { } func TestModifiedIndex(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - done := make(chan struct{}) defer close(done) @@ -274,11 +264,9 @@ func TestModifiedIndex(t *testing.T) { var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz") func TestDuplicatePacksInIndex(t *testing.T) { - repodir, cleanup := test.Env(t, checkerDuplicateIndexTestData) + repo, cleanup := repository.TestFromFixture(t, checkerDuplicateIndexTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) if len(hints) == 0 { @@ -342,9 +330,7 @@ func TestCheckerModifiedData(t *testing.T) { t.Logf("archived as %v", sn.ID().Str()) beError := &errorBackend{Backend: repo.Backend()} - checkRepo, err := repository.New(beError, repository.Options{}) - test.OK(t, err) - test.OK(t, checkRepo.SearchKey(context.TODO(), test.TestPassword, 5, "")) + checkRepo := repository.TestOpenBackend(t, beError) chkr := checker.New(checkRepo, false) @@ -399,10 +385,8 @@ func (r *loadTreesOnceRepository) LoadTree(ctx context.Context, id restic.ID) (* } func TestCheckerNoDuplicateTreeDecodes(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - - repo := repository.TestOpenLocal(t, repodir) checkRepo := &loadTreesOnceRepository{ Repository: repo, loadedTrees: restic.NewIDSet(), @@ -549,9 +533,7 @@ func TestCheckerBlobTypeConfusion(t *testing.T) { } func loadBenchRepository(t *testing.B) (*checker.Checker, restic.Repository, func()) { - repodir, cleanup := test.Env(t, checkerTestData) - - repo := repository.TestOpenLocal(t, repodir) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) diff --git a/internal/index/index_parallel_test.go b/internal/index/index_parallel_test.go index db4853e1909..5cb8d299d9c 100644 --- a/internal/index/index_parallel_test.go +++ b/internal/index/index_parallel_test.go @@ -15,11 +15,9 @@ import ( var repoFixture = filepath.Join("..", "repository", "testdata", "test-repo.tar.gz") func TestRepositoryForAllIndexes(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - expectedIndexIDs := restic.NewIDSet() rtest.OK(t, repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error { expectedIndexIDs.Insert(id) diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index 360ee2b23d0..644fc6b374a 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -31,10 +31,7 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { rtest.OK(t, err) } - repo, err := New(be, Options{}) - rtest.OK(t, err) - rtest.OK(t, repo.SearchKey(context.TODO(), test.TestPassword, 1, "")) - return repo + return TestOpenBackend(t, be) } func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) { @@ -77,9 +74,7 @@ func TestLockCancel(t *testing.T) { func TestLockConflict(t *testing.T) { t.Parallel() repo := openLockTestRepo(t, nil) - repo2, err := New(repo.Backend(), Options{}) - test.OK(t, err) - test.OK(t, repo2.SearchKey(context.TODO(), test.TestPassword, 1, "")) + repo2 := TestOpenBackend(t, repo.Backend()) lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 0fa8e4d4acb..98ff560fe08 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -221,10 +221,9 @@ func benchmarkLoadUnpacked(b *testing.B, version uint) { var repoFixture = filepath.Join("testdata", "test-repo.tar.gz") func TestRepositoryLoadIndex(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } @@ -243,7 +242,7 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (* } func TestRepositoryLoadUnpackedBroken(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() data := rtest.Random(23, 12345) @@ -252,7 +251,6 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) { // damage buffer data[0] ^= 0xff - repo := repository.TestOpenLocal(t, repodir) // store broken file err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, nil)) rtest.OK(t, err) @@ -289,10 +287,7 @@ func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) { be, err := local.Open(context.TODO(), local.Config{Path: repodir, Connections: 2}) rtest.OK(t, err) - repo, err := repository.New(&damageOnceBackend{Backend: be}, repository.Options{}) - rtest.OK(t, err) - err = repo.SearchKey(context.TODO(), rtest.TestPassword, 10, "") - rtest.OK(t, err) + repo := repository.TestOpenBackend(t, &damageOnceBackend{Backend: be}) rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } diff --git a/internal/repository/testing.go b/internal/repository/testing.go index faa40c70a77..3a566565f50 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -95,8 +95,15 @@ func TestRepositoryWithVersion(t testing.TB, version uint) restic.Repository { return TestRepositoryWithBackend(t, nil, version, opts) } +func TestFromFixture(t testing.TB, repoFixture string) (restic.Repository, func()) { + repodir, cleanup := test.Env(t, repoFixture) + repo := TestOpenLocal(t, repodir) + + return repo, cleanup +} + // TestOpenLocal opens a local repository. -func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { +func TestOpenLocal(t testing.TB, dir string) restic.Repository { var be backend.Backend be, err := local.Open(context.TODO(), local.Config{Path: dir, Connections: 2}) if err != nil { @@ -105,6 +112,10 @@ func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { be = retry.New(be, 3, nil, nil) + return TestOpenBackend(t, be) +} + +func TestOpenBackend(t testing.TB, be backend.Backend) restic.Repository { repo, err := New(be, Options{}) if err != nil { t.Fatal(err) From d18726cd70527ff5d77f0d418585659834684756 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 21:52:28 +0100 Subject: [PATCH 088/893] ls: add missing read lock As `ls` reads data from the repository, it must acquire a read lock unless `--no-lock` was specified. The old behavior is equivalent to `ls --no-lock`. --- cmd/restic/cmd_ls.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index b0246625ee0..c4fb32de3fa 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -309,10 +309,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return false } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { From 8155dbe711b9636ca7b02e7833595820600657a3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 21:54:39 +0100 Subject: [PATCH 089/893] correctly lock repository in integration tests --- cmd/restic/cmd_backup_integration_test.go | 18 ++--------- cmd/restic/cmd_mount_integration_test.go | 27 +++++++--------- cmd/restic/cmd_rewrite_integration_test.go | 7 ++-- cmd/restic/integration_helpers_test.go | 37 ++++++++++++++++------ cmd/restic/integration_test.go | 5 +-- 5 files changed, 51 insertions(+), 43 deletions(-) diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 0bc4a9eaa17..75de1341cb9 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -9,7 +9,6 @@ import ( "runtime" "testing" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -250,29 +249,18 @@ func TestBackupTreeLoadError(t *testing.T) { opts := BackupOptions{} // Backup a subdirectory first, such that we can remove the tree pack for the subdirectory testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts) - - r, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - rtest.OK(t, r.LoadIndex(context.TODO(), nil)) - treePacks := restic.NewIDSet() - r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { - if pb.Type == restic.TreeBlob { - treePacks.Insert(pb.PackID) - } - }) + treePacks := listTreePacks(env.gopts, t) testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) testRunCheck(t, env.gopts) // delete the subdirectory pack first - for id := range treePacks { - rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})) - } + removePacks(env.gopts, t, treePacks) testRunRebuildIndex(t, env.gopts) // now the repo is missing the tree blob in the index; check should report this testRunCheckMustFail(t, env.gopts) // second backup should report an error but "heal" this situation - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory") testRunCheck(t, env.gopts) diff --git a/cmd/restic/cmd_mount_integration_test.go b/cmd/restic/cmd_mount_integration_test.go index d2025a395aa..590e1503047 100644 --- a/cmd/restic/cmd_mount_integration_test.go +++ b/cmd/restic/cmd_mount_integration_test.go @@ -12,7 +12,6 @@ import ( "testing" "time" - "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -86,12 +85,12 @@ func listSnapshots(t testing.TB, dir string) []string { return names } -func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) { +func checkSnapshots(t testing.TB, gopts GlobalOptions, mountpoint string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) { t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs) var wg sync.WaitGroup wg.Add(1) - go testRunMount(t, global, mountpoint, &wg) + go testRunMount(t, gopts, mountpoint, &wg) waitForMount(t, mountpoint) defer wg.Wait() defer testRunUmount(t, mountpoint) @@ -100,7 +99,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit t.Fatal(`virtual directory "snapshots" doesn't exist`) } - ids := listSnapshots(t, repodir) + ids := listSnapshots(t, gopts.Repo) t.Logf("found %v snapshots in repo: %v", len(ids), ids) namesInSnapshots := listSnapshots(t, mountpoint) @@ -124,6 +123,10 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit } } + _, repo, unlock, err := openWithReadLock(context.TODO(), gopts, false) + rtest.OK(t, err) + defer unlock() + for _, id := range snapshotIDs { snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id) rtest.OK(t, err) @@ -166,10 +169,7 @@ func TestMount(t *testing.T) { testRunInit(t, env.gopts) - repo, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{}, 0) + checkSnapshots(t, env.gopts, env.mountpoint, []restic.ID{}, 0) rtest.SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz")) @@ -179,7 +179,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 1, "expected one snapshot, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 2) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 2) // second backup, implicit incremental testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) @@ -187,7 +187,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 2, "expected two snapshots, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 3) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 3) // third backup, explicit incremental bopts := BackupOptions{Parent: snapshotIDs[0].String()} @@ -196,7 +196,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 3, "expected three snapshots, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 4) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 4) } func TestMountSameTimestamps(t *testing.T) { @@ -211,14 +211,11 @@ func TestMountSameTimestamps(t *testing.T) { rtest.SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz")) - repo, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - ids := []restic.ID{ restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"), restic.TestParseID("75ad6cdc0868e082f2596d5ab8705e9f7d87316f5bf5690385eeff8dbe49d9f5"), restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"), } - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids, 4) + checkSnapshots(t, env.gopts, env.mountpoint, ids, 4) } diff --git a/cmd/restic/cmd_rewrite_integration_test.go b/cmd/restic/cmd_rewrite_integration_test.go index 532855f5776..71d6a60a57e 100644 --- a/cmd/restic/cmd_rewrite_integration_test.go +++ b/cmd/restic/cmd_rewrite_integration_test.go @@ -78,8 +78,11 @@ func testRewriteMetadata(t *testing.T, metadata snapshotMetadataArgs) { createBasicRewriteRepo(t, env) testRunRewriteExclude(t, env.gopts, []string{}, true, metadata) - repo, _ := OpenRepository(context.TODO(), env.gopts) - snapshots, err := restic.TestLoadAllSnapshots(context.TODO(), repo, nil) + ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false) + rtest.OK(t, err) + defer unlock() + + snapshots, err := restic.TestLoadAllSnapshots(ctx, repo, nil) rtest.OK(t, err) rtest.Assert(t, len(snapshots) == 1, "expected one snapshot, got %v", len(snapshots)) newSnapshot := snapshots[0] diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index 184609d40c5..c87e1071e71 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -232,47 +232,66 @@ func testSetupBackupData(t testing.TB, env *testEnvironment) string { } func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet { - r, err := OpenRepository(context.TODO(), gopts) + ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() packs := restic.NewIDSet() - rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error { packs.Insert(id) return nil })) return packs } +func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet { + ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false) + rtest.OK(t, err) + defer unlock() + + rtest.OK(t, r.LoadIndex(ctx, nil)) + treePacks := restic.NewIDSet() + r.Index().Each(ctx, func(pb restic.PackedBlob) { + if pb.Type == restic.TreeBlob { + treePacks.Insert(pb.PackID) + } + }) + + return treePacks +} + func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) { - r, err := OpenRepository(context.TODO(), gopts) + ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() for id := range remove { - rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})) + rtest.OK(t, r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()})) } } func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, removeTreePacks bool) { - r, err := OpenRepository(context.TODO(), gopts) + ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() // Get all tree packs - rtest.OK(t, r.LoadIndex(context.TODO(), nil)) + rtest.OK(t, r.LoadIndex(ctx, nil)) treePacks := restic.NewIDSet() - r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { + r.Index().Each(ctx, func(pb restic.PackedBlob) { if pb.Type == restic.TreeBlob { treePacks.Insert(pb.PackID) } }) // remove all packs containing data blobs - rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error { if treePacks.Has(id) != removeTreePacks || keep.Has(id) { return nil } - return r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}) + return r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}) })) } diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 7cf8396a321..21be571e23d 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -154,12 +154,13 @@ func TestFindListOnce(t *testing.T) { testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) thirdSnapshot := restic.NewIDSet(testListSnapshots(t, env.gopts, 3)...) - repo, err := OpenRepository(context.TODO(), env.gopts) + ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false) rtest.OK(t, err) + defer unlock() snapshotIDs := restic.NewIDSet() // specify the two oldest snapshots explicitly and use "latest" to reference the newest one - for sn := range FindFilteredSnapshots(context.TODO(), repo, repo, &restic.SnapshotFilter{}, []string{ + for sn := range FindFilteredSnapshots(ctx, repo, repo, &restic.SnapshotFilter{}, []string{ secondSnapshot[0].String(), secondSnapshot[1].String()[:8], "latest", From 5e98f1e2eb49ef9dd84030611d6bc557b646d844 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 23:14:32 +0100 Subject: [PATCH 090/893] repository: fix test setup race conditions --- internal/repository/key.go | 16 ++++++++-------- internal/repository/testing.go | 19 ++++++++++++------- internal/restic/config.go | 6 +++++- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/internal/repository/key.go b/internal/repository/key.go index 4d597da4deb..0604b44dfb9 100644 --- a/internal/repository/key.go +++ b/internal/repository/key.go @@ -43,9 +43,9 @@ type Key struct { id restic.ID } -// Params tracks the parameters used for the KDF. If not set, it will be +// params tracks the parameters used for the KDF. If not set, it will be // calibrated on the first run of AddKey(). -var Params *crypto.Params +var params *crypto.Params const ( // KDFTimeout specifies the maximum runtime for the KDF. @@ -196,13 +196,13 @@ func LoadKey(ctx context.Context, s *Repository, id restic.ID) (k *Key, err erro // AddKey adds a new key to an already existing repository. func AddKey(ctx context.Context, s *Repository, password, username, hostname string, template *crypto.Key) (*Key, error) { // make sure we have valid KDF parameters - if Params == nil { + if params == nil { p, err := crypto.Calibrate(KDFTimeout, KDFMemory) if err != nil { return nil, errors.Wrap(err, "Calibrate") } - Params = &p + params = &p debug.Log("calibrated KDF parameters are %v", p) } @@ -213,9 +213,9 @@ func AddKey(ctx context.Context, s *Repository, password, username, hostname str Hostname: hostname, KDF: "scrypt", - N: Params.N, - R: Params.R, - P: Params.P, + N: params.N, + R: params.R, + P: params.P, } if newkey.Hostname == "" { @@ -237,7 +237,7 @@ func AddKey(ctx context.Context, s *Repository, password, username, hostname str } // call KDF to derive user key - newkey.user, err = crypto.KDF(*Params, newkey.Salt, password) + newkey.user, err = crypto.KDF(*params, newkey.Salt, password) if err != nil { return nil, err } diff --git a/internal/repository/testing.go b/internal/repository/testing.go index 3a566565f50..874d179cedb 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "sync" "testing" "github.com/restic/restic/internal/backend" @@ -21,14 +22,18 @@ type logger interface { Logf(format string, args ...interface{}) } +var paramsOnce sync.Once + // TestUseLowSecurityKDFParameters configures low-security KDF parameters for testing. func TestUseLowSecurityKDFParameters(t logger) { t.Logf("using low-security KDF parameters for test") - Params = &crypto.Params{ - N: 128, - R: 1, - P: 1, - } + paramsOnce.Do(func() { + params = &crypto.Params{ + N: 128, + R: 1, + P: 1, + } + }) } // TestBackend returns a fully configured in-memory backend. @@ -36,7 +41,7 @@ func TestBackend(_ testing.TB) backend.Backend { return mem.New() } -const TestChunkerPol = chunker.Pol(0x3DA3358B4DC173) +const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) // TestRepositoryWithBackend returns a repository initialized with a test // password. If be is nil, an in-memory backend is used. A constant polynomial @@ -55,7 +60,7 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o t.Fatalf("TestRepository(): new repo failed: %v", err) } - cfg := restic.TestCreateConfig(t, TestChunkerPol, version) + cfg := restic.TestCreateConfig(t, testChunkerPol, version) err = repo.init(context.TODO(), test.TestPassword, cfg) if err != nil { t.Fatalf("TestRepository(): initialize repo failed: %v", err) diff --git a/internal/restic/config.go b/internal/restic/config.go index 67ee190bc6c..67af259ba2f 100644 --- a/internal/restic/config.go +++ b/internal/restic/config.go @@ -2,6 +2,7 @@ package restic import ( "context" + "sync" "testing" "github.com/restic/restic/internal/errors" @@ -67,12 +68,15 @@ func TestCreateConfig(t testing.TB, pol chunker.Pol, version uint) (cfg Config) } var checkPolynomial = true +var checkPolynomialOnce sync.Once // TestDisableCheckPolynomial disables the check that the polynomial used for // the chunker. func TestDisableCheckPolynomial(t testing.TB) { t.Logf("disabling check of the chunker polynomial") - checkPolynomial = false + checkPolynomialOnce.Do(func() { + checkPolynomial = false + }) } // LoadConfig returns loads, checks and returns the config for a repository. From 07eb6c315b34bac8d698be41fecd0c26d542bb49 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 23:46:58 +0100 Subject: [PATCH 091/893] add changelog for locking refactor --- changelog/unreleased/pull-4709 | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 changelog/unreleased/pull-4709 diff --git a/changelog/unreleased/pull-4709 b/changelog/unreleased/pull-4709 new file mode 100644 index 00000000000..5ffb2a6a684 --- /dev/null +++ b/changelog/unreleased/pull-4709 @@ -0,0 +1,10 @@ +Bugfix: Correct `--no-lock` handling of `ls` and `tag` command + +The `ls` command never locked the repository. This has been fixed. The old +behavior is still supported using `ls --no-lock`. The latter invocation also +works with older restic versions. + +The `tag` command erroneously accepted the `--no-lock` command. The command +now always requires an exclusive lock. + +https://github.com/restic/restic/pull/4709 From ec2b79834aadc71352a49d5c4176e724c7fd18cf Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 29 Mar 2024 00:24:03 +0100 Subject: [PATCH 092/893] use consistent alias for interal/test package --- .golangci.yml | 10 +- cmd/restic/cmd_restore_integration_test.go | 4 +- internal/archiver/archiver_test.go | 128 ++++++++++----------- internal/archiver/archiver_unix_test.go | 16 +-- internal/archiver/scanner_test.go | 14 +-- internal/archiver/testing_test.go | 12 +- internal/archiver/tree_test.go | 6 +- internal/fs/stat_test.go | 4 +- internal/test/helpers.go | 5 +- 9 files changed, 103 insertions(+), 96 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 7dc6a8e7fc3..e632965bb2d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -38,6 +38,8 @@ linters: # ensure that http response bodies are closed - bodyclose + - importas + issues: # don't use the default exclude rules, this hides (among others) ignored # errors from Close() calls @@ -58,4 +60,10 @@ issues: exclude-rules: # revive: ignore unused parameters in tests - path: (_test\.go|testing\.go|backend/.*/tests\.go) - text: "unused-parameter:" \ No newline at end of file + text: "unused-parameter:" + +linters-settings: + importas: + alias: + - pkg: github.com/restic/restic/internal/test + alias: rtest diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 2c7cbe1fb67..04c13bc688c 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "io" - mrand "math/rand" + "math/rand" "os" "path/filepath" "syscall" @@ -116,7 +116,7 @@ func TestRestore(t *testing.T) { for i := 0; i < 10; i++ { p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i)) rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) - rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21)))) + rtest.OK(t, appendRandomData(p, uint(rand.Intn(2<<21)))) } opts := BackupOptions{} diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 91b26f3ddc9..841c8f2ce8f 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -23,12 +23,12 @@ import ( "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" ) func prepareTempdirRepoSrc(t testing.TB, src TestDir) (string, restic.Repository) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) repo := repository.TestRepository(t) TestCreateFiles(t, tempdir, src) @@ -133,7 +133,7 @@ func TestArchiverSaveFile(t *testing.T) { var tests = []TestFile{ {Content: ""}, {Content: "foo"}, - {Content: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Content: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, testfile := range tests { @@ -166,7 +166,7 @@ func TestArchiverSaveFileReaderFS(t *testing.T) { Data string }{ {Data: "foo"}, - {Data: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Data: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, test := range tests { @@ -208,7 +208,7 @@ func TestArchiverSave(t *testing.T) { var tests = []TestFile{ {Content: ""}, {Content: "foo"}, - {Content: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Content: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, testfile := range tests { @@ -277,7 +277,7 @@ func TestArchiverSaveReaderFS(t *testing.T) { Data string }{ {Data: "foo"}, - {Data: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Data: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, test := range tests { @@ -354,7 +354,7 @@ func TestArchiverSaveReaderFS(t *testing.T) { func BenchmarkArchiverSaveFileSmall(b *testing.B) { const fileSize = 4 * 1024 d := TestDir{"file": TestFile{ - Content: string(restictest.Random(23, fileSize)), + Content: string(rtest.Random(23, fileSize)), }} b.SetBytes(fileSize) @@ -386,7 +386,7 @@ func BenchmarkArchiverSaveFileSmall(b *testing.B) { func BenchmarkArchiverSaveFileLarge(b *testing.B) { const fileSize = 40*1024*1024 + 1287898 d := TestDir{"file": TestFile{ - Content: string(restictest.Random(23, fileSize)), + Content: string(rtest.Random(23, fileSize)), }} b.SetBytes(fileSize) @@ -462,14 +462,14 @@ func appendToFile(t testing.TB, filename string, data []byte) { } func TestArchiverSaveFileIncremental(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) repo := &blobCountingRepo{ Repository: repository.TestRepository(t), saved: make(map[restic.BlobHandle]uint), } - data := restictest.Random(23, 512*1024+887898) + data := rtest.Random(23, 512*1024+887898) testfile := filepath.Join(tempdir, "testfile") for i := 0; i < 3; i++ { @@ -512,12 +512,12 @@ func chmodTwice(t testing.TB, name string) { // POSIX says that ctime is updated "even if the file status does not // change", but let's make sure it does change, just in case. err := os.Chmod(name, 0700) - restictest.OK(t, err) + rtest.OK(t, err) sleep() err = os.Chmod(name, 0600) - restictest.OK(t, err) + rtest.OK(t, err) } func lstat(t testing.TB, name string) os.FileInfo { @@ -676,7 +676,7 @@ func TestFileChanged(t *testing.T) { t.Skip("don't run test on Windows") } - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) filename := filepath.Join(tempdir, "file") content := defaultContent @@ -712,7 +712,7 @@ func TestFileChanged(t *testing.T) { } func TestFilChangedSpecialCases(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) filename := filepath.Join(tempdir, "file") content := []byte("foobar") @@ -746,12 +746,12 @@ func TestArchiverSaveDir(t *testing.T) { }{ { src: TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, }, target: ".", want: TestDir{ "targetdir": TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, }, }, }, @@ -761,8 +761,8 @@ func TestArchiverSaveDir(t *testing.T) { "foo": TestFile{Content: "foo"}, "emptyfile": TestFile{Content: ""}, "bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}, - "largefile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, - "largerfile": TestFile{Content: string(restictest.Random(234, 5*1024*1024+5000))}, + "largefile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, + "largerfile": TestFile{Content: string(rtest.Random(234, 5*1024*1024+5000))}, }, }, target: "targetdir", @@ -841,7 +841,7 @@ func TestArchiverSaveDir(t *testing.T) { chdir = filepath.Join(chdir, test.chdir) } - back := restictest.Chdir(t, chdir) + back := rtest.Chdir(t, chdir) defer back() fi, err := fs.Lstat(test.target) @@ -899,7 +899,7 @@ func TestArchiverSaveDir(t *testing.T) { } func TestArchiverSaveDirIncremental(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) repo := &blobCountingRepo{ Repository: repository.TestRepository(t), @@ -989,7 +989,7 @@ func TestArchiverSaveDirIncremental(t *testing.T) { func bothZeroOrNeither(tb testing.TB, exp, act uint64) { tb.Helper() if (exp == 0 && act != 0) || (exp != 0 && act == 0) { - restictest.Equals(tb, exp, act) + rtest.Equals(tb, exp, act) } } @@ -1113,7 +1113,7 @@ func TestArchiverSaveTree(t *testing.T) { arch.runWorkers(ctx, wg) arch.summary = &Summary{} - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() if test.prepare != nil { @@ -1158,9 +1158,9 @@ func TestArchiverSaveTree(t *testing.T) { bothZeroOrNeither(t, test.stat.DataSize, stat.DataSize) bothZeroOrNeither(t, test.stat.DataSizeInRepo, stat.DataSizeInRepo) bothZeroOrNeither(t, test.stat.TreeSizeInRepo, stat.TreeSizeInRepo) - restictest.Equals(t, test.stat.ProcessedBytes, stat.ProcessedBytes) - restictest.Equals(t, test.stat.Files, stat.Files) - restictest.Equals(t, test.stat.Dirs, stat.Dirs) + rtest.Equals(t, test.stat.ProcessedBytes, stat.ProcessedBytes) + rtest.Equals(t, test.stat.Files, stat.Files) + rtest.Equals(t, test.stat.Dirs, stat.Dirs) }) } } @@ -1408,7 +1408,7 @@ func TestArchiverSnapshot(t *testing.T) { chdir = filepath.Join(chdir, filepath.FromSlash(test.chdir)) } - back := restictest.Chdir(t, chdir) + back := rtest.Chdir(t, chdir) defer back() var targets []string @@ -1561,7 +1561,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.Select = test.selFn - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() targets := []string{"."} @@ -1639,14 +1639,14 @@ func (f MockFile) Read(p []byte) (int, error) { } func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) { - restictest.Equals(t, stat.Files.New, sn.Summary.FilesNew) - restictest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged) - restictest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified) - restictest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew) - restictest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged) - restictest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified) - restictest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed) - restictest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed) + rtest.Equals(t, stat.Files.New, sn.Summary.FilesNew) + rtest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged) + rtest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified) + rtest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew) + rtest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged) + rtest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified) + rtest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed) + rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed) bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs)) bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs)) bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded)) @@ -1662,7 +1662,7 @@ func TestArchiverParent(t *testing.T) { }{ { src: TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, }, statInitial: Summary{ Files: ChangeStats{1, 0, 0}, @@ -1679,8 +1679,8 @@ func TestArchiverParent(t *testing.T) { { src: TestDir{ "targetDir": TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 1234))}, - "targetfile2": TestFile{Content: string(restictest.Random(888, 1235))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 1234))}, + "targetfile2": TestFile{Content: string(rtest.Random(888, 1235))}, }, }, statInitial: Summary{ @@ -1698,9 +1698,9 @@ func TestArchiverParent(t *testing.T) { { src: TestDir{ "targetDir": TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 1234))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 1234))}, }, - "targetfile2": TestFile{Content: string(restictest.Random(888, 1235))}, + "targetfile2": TestFile{Content: string(rtest.Random(888, 1235))}, }, modify: func(path string) { remove(t, filepath.Join(path, "targetDir", "targetfile")) @@ -1735,7 +1735,7 @@ func TestArchiverParent(t *testing.T) { arch := New(repo, testFS, Options{}) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() firstSnapshot, firstSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) @@ -1763,9 +1763,9 @@ func TestArchiverParent(t *testing.T) { } return nil }) - restictest.Equals(t, test.statInitial.Files, summary.Files) - restictest.Equals(t, test.statInitial.Dirs, summary.Dirs) - restictest.Equals(t, test.statInitial.ProcessedBytes, summary.ProcessedBytes) + rtest.Equals(t, test.statInitial.Files, summary.Files) + rtest.Equals(t, test.statInitial.Dirs, summary.Dirs) + rtest.Equals(t, test.statInitial.ProcessedBytes, summary.ProcessedBytes) checkSnapshotStats(t, firstSnapshot, test.statInitial) if test.modify != nil { @@ -1784,11 +1784,11 @@ func TestArchiverParent(t *testing.T) { if test.modify == nil { // check that no files were read this time - restictest.Equals(t, map[string]int{}, testFS.bytesRead) + rtest.Equals(t, map[string]int{}, testFS.bytesRead) } - restictest.Equals(t, test.statSecond.Files, summary.Files) - restictest.Equals(t, test.statSecond.Dirs, summary.Dirs) - restictest.Equals(t, test.statSecond.ProcessedBytes, summary.ProcessedBytes) + rtest.Equals(t, test.statSecond.Files, summary.Files) + rtest.Equals(t, test.statSecond.Dirs, summary.Dirs) + rtest.Equals(t, test.statSecond.ProcessedBytes, summary.ProcessedBytes) checkSnapshotStats(t, secondSnapshot, test.statSecond) t.Logf("second backup saved as %v", secondSnapshotID.Str()) @@ -1894,7 +1894,7 @@ func TestArchiverErrorReporting(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() if test.prepare != nil { @@ -1964,7 +1964,7 @@ func TestArchiverContextCanceled(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, TestDir{ "targetfile": TestFile{Content: "foobar"}, }) @@ -1972,7 +1972,7 @@ func TestArchiverContextCanceled(t *testing.T) { // Ensure that the archiver itself reports the canceled context and not just the backend repo := repository.TestRepositoryWithBackend(t, &noCancelBackend{mem.New()}, 0, repository.Options{}) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) @@ -2058,16 +2058,16 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { { src: TestDir{ "dir": TestDir{ - "file0": TestFile{Content: string(restictest.Random(0, 1024))}, - "file1": TestFile{Content: string(restictest.Random(1, 1024))}, - "file2": TestFile{Content: string(restictest.Random(2, 1024))}, - "file3": TestFile{Content: string(restictest.Random(3, 1024))}, - "file4": TestFile{Content: string(restictest.Random(4, 1024))}, - "file5": TestFile{Content: string(restictest.Random(5, 1024))}, - "file6": TestFile{Content: string(restictest.Random(6, 1024))}, - "file7": TestFile{Content: string(restictest.Random(7, 1024))}, - "file8": TestFile{Content: string(restictest.Random(8, 1024))}, - "file9": TestFile{Content: string(restictest.Random(9, 1024))}, + "file0": TestFile{Content: string(rtest.Random(0, 1024))}, + "file1": TestFile{Content: string(rtest.Random(1, 1024))}, + "file2": TestFile{Content: string(rtest.Random(2, 1024))}, + "file3": TestFile{Content: string(rtest.Random(3, 1024))}, + "file4": TestFile{Content: string(rtest.Random(4, 1024))}, + "file5": TestFile{Content: string(rtest.Random(5, 1024))}, + "file6": TestFile{Content: string(rtest.Random(6, 1024))}, + "file7": TestFile{Content: string(rtest.Random(7, 1024))}, + "file8": TestFile{Content: string(rtest.Random(8, 1024))}, + "file9": TestFile{Content: string(rtest.Random(9, 1024))}, }, }, wantOpen: map[string]uint{ @@ -2092,7 +2092,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() testFS := &TrackFS{ @@ -2225,7 +2225,7 @@ func TestMetadataChanged(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() // get metadata @@ -2300,7 +2300,7 @@ func TestRacyFileSwap(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() // get metadata of current folder diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 2552b23e190..9462420ddf3 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -11,7 +11,7 @@ import ( "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) type wrappedFileInfo struct { @@ -49,7 +49,7 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { func statAndSnapshot(t *testing.T, repo restic.Repository, name string) (*restic.Node, *restic.Node) { fi := lstat(t, name) want, err := restic.NodeFromFileInfo(name, fi) - restictest.OK(t, err) + rtest.OK(t, err) _, node := snapshot(t, repo, fs.Local{}, nil, name) return want, node @@ -73,17 +73,17 @@ func TestHardlinkMetadata(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() want, node := statAndSnapshot(t, repo, "testlink") - restictest.Assert(t, node.DeviceID == want.DeviceID, "device id mismatch expected %v got %v", want.DeviceID, node.DeviceID) - restictest.Assert(t, node.Links == want.Links, "link count mismatch expected %v got %v", want.Links, node.Links) - restictest.Assert(t, node.Inode == want.Inode, "inode mismatch expected %v got %v", want.Inode, node.Inode) + rtest.Assert(t, node.DeviceID == want.DeviceID, "device id mismatch expected %v got %v", want.DeviceID, node.DeviceID) + rtest.Assert(t, node.Links == want.Links, "link count mismatch expected %v got %v", want.Links, node.Links) + rtest.Assert(t, node.Inode == want.Inode, "inode mismatch expected %v got %v", want.Inode, node.Inode) _, node = statAndSnapshot(t, repo, "testfile") - restictest.Assert(t, node.DeviceID == 0, "device id mismatch for testfile expected %v got %v", 0, node.DeviceID) + rtest.Assert(t, node.DeviceID == 0, "device id mismatch for testfile expected %v got %v", 0, node.DeviceID) _, node = statAndSnapshot(t, repo, "testdir") - restictest.Assert(t, node.DeviceID == 0, "device id mismatch for testdir expected %v got %v", 0, node.DeviceID) + rtest.Assert(t, node.DeviceID == 0, "device id mismatch for testdir expected %v got %v", 0, node.DeviceID) } diff --git a/internal/archiver/scanner_test.go b/internal/archiver/scanner_test.go index 1b4cd1f7f2e..b5b7057b8ea 100644 --- a/internal/archiver/scanner_test.go +++ b/internal/archiver/scanner_test.go @@ -9,7 +9,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/fs" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) func TestScanner(t *testing.T) { @@ -81,10 +81,10 @@ func TestScanner(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() cur, err := os.Getwd() @@ -216,10 +216,10 @@ func TestScannerError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() cur, err := os.Getwd() @@ -288,10 +288,10 @@ func TestScannerCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() cur, err := os.Getwd() diff --git a/internal/archiver/testing_test.go b/internal/archiver/testing_test.go index e48b41ec796..ff3bd3668d4 100644 --- a/internal/archiver/testing_test.go +++ b/internal/archiver/testing_test.go @@ -11,7 +11,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) // MockT passes through all logging functions from T, but catches Fail(), @@ -101,7 +101,7 @@ func TestTestCreateFiles(t *testing.T) { } for i, test := range tests { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) t.Run("", func(t *testing.T) { tempdir := filepath.Join(tempdir, fmt.Sprintf("test-%d", i)) @@ -191,7 +191,7 @@ func TestTestWalkFiles(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) got := make(map[string]string) @@ -321,7 +321,7 @@ func TestTestEnsureFiles(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) createFilesAt(t, tempdir, test.files) subtestT := testing.TB(t) @@ -452,7 +452,7 @@ func TestTestEnsureSnapshot(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) targetDir := filepath.Join(tempdir, "target") err := fs.Mkdir(targetDir, 0700) @@ -462,7 +462,7 @@ func TestTestEnsureSnapshot(t *testing.T) { createFilesAt(t, targetDir, test.files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() repo := repository.TestRepository(t) diff --git a/internal/archiver/tree_test.go b/internal/archiver/tree_test.go index 7852a4c2ec6..a9d2d97ff23 100644 --- a/internal/archiver/tree_test.go +++ b/internal/archiver/tree_test.go @@ -8,7 +8,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/fs" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) // debug.Log requires Tree.String. @@ -439,10 +439,10 @@ func TestTree(t *testing.T) { t.Skip("skip test on unix") } - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() tree, err := NewTree(fs.Local{}, test.targets) diff --git a/internal/fs/stat_test.go b/internal/fs/stat_test.go index a5ec77c7a17..d52415c1d54 100644 --- a/internal/fs/stat_test.go +++ b/internal/fs/stat_test.go @@ -5,11 +5,11 @@ import ( "path/filepath" "testing" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) func TestExtendedStat(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) filename := filepath.Join(tempdir, "file") err := os.WriteFile(filename, []byte("foobar"), 0640) if err != nil { diff --git a/internal/test/helpers.go b/internal/test/helpers.go index 242da607944..3387d36df67 100644 --- a/internal/test/helpers.go +++ b/internal/test/helpers.go @@ -5,6 +5,7 @@ import ( "compress/gzip" "fmt" "io" + "math/rand" "os" "os/exec" "path/filepath" @@ -12,8 +13,6 @@ import ( "testing" "github.com/restic/restic/internal/errors" - - mrand "math/rand" ) // Assert fails the test if the condition is false. @@ -71,7 +70,7 @@ func Equals(tb testing.TB, exp, act interface{}, msgs ...string) { func Random(seed, count int) []byte { p := make([]byte, count) - rnd := mrand.New(mrand.NewSource(int64(seed))) + rnd := rand.New(rand.NewSource(int64(seed))) for i := 0; i < len(p); i += 8 { val := rnd.Int63() From df07814ec266d641466885318b6ed7889b4c889a Mon Sep 17 00:00:00 2001 From: Stephan Paul Date: Mon, 25 Mar 2024 15:42:15 +0100 Subject: [PATCH 093/893] forget json output: added id's in snapshots within reasons object In order to evaluate the keep reasons for snapshots, there should be also the id's to compare it with snapshots within the keep object. (See also Issue #3117) In order to avoid output parameters also changed function addJSONSnapshots to asJSONSnapshots --- changelog/unreleased/pull-4737 | 5 ++++ cmd/restic/cmd_forget.go | 46 ++++++++++++++++++++++++++-------- doc/075_scripting.rst | 14 +++++------ 3 files changed, 47 insertions(+), 18 deletions(-) create mode 100644 changelog/unreleased/pull-4737 diff --git a/changelog/unreleased/pull-4737 b/changelog/unreleased/pull-4737 new file mode 100644 index 00000000000..2637c8f83e2 --- /dev/null +++ b/changelog/unreleased/pull-4737 @@ -0,0 +1,5 @@ +Enhancement: include snapshot id in reason field of forget JSON output + +The JSON output of the `forget` command now includes the `id` and `short_id` of a snapshot in the `reason` field. + +https://github.com/restic/restic/pull/4737 diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index f2fc1da8c58..d634576c0e6 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -245,16 +245,16 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact) Printf("\n") } - addJSONSnapshots(&fg.Keep, keep) + fg.Keep = asJSONSnapshots(keep) if len(remove) != 0 && !gopts.Quiet && !gopts.JSON { Printf("remove %d snapshots:\n", len(remove)) PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact) Printf("\n") } - addJSONSnapshots(&fg.Remove, remove) + fg.Remove = asJSONSnapshots(remove) - fg.Reasons = reasons + fg.Reasons = asJSONKeeps(reasons) jsonGroups = append(jsonGroups, &fg) @@ -302,23 +302,47 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption // ForgetGroup helps to print what is forgotten in JSON. type ForgetGroup struct { - Tags []string `json:"tags"` - Host string `json:"host"` - Paths []string `json:"paths"` - Keep []Snapshot `json:"keep"` - Remove []Snapshot `json:"remove"` - Reasons []restic.KeepReason `json:"reasons"` + Tags []string `json:"tags"` + Host string `json:"host"` + Paths []string `json:"paths"` + Keep []Snapshot `json:"keep"` + Remove []Snapshot `json:"remove"` + Reasons []KeepReason `json:"reasons"` } -func addJSONSnapshots(js *[]Snapshot, list restic.Snapshots) { +func asJSONSnapshots(list restic.Snapshots) []Snapshot { + var resultList []Snapshot for _, sn := range list { k := Snapshot{ Snapshot: sn, ID: sn.ID(), ShortID: sn.ID().Str(), } - *js = append(*js, k) + resultList = append(resultList, k) } + return resultList +} + +// KeepReason helps to print KeepReasons as JSON with Snapshots with their ID included. +type KeepReason struct { + Snapshot Snapshot `json:"snapshot"` + Matches []string `json:"matches"` +} + +func asJSONKeeps(list []restic.KeepReason) []KeepReason { + var resultList []KeepReason + for _, keep := range list { + k := KeepReason{ + Snapshot: Snapshot{ + Snapshot: keep.Snapshot, + ID: keep.Snapshot.ID(), + ShortID: keep.Snapshot.ID().Str(), + }, + Matches: keep.Matches, + } + resultList = append(resultList, k) + } + return resultList } func printJSONForget(stdout io.Writer, forgets []*ForgetGroup) error { diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index d51516cbe71..28419c29273 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -367,13 +367,13 @@ Snapshot object Reason object -+----------------+---------------------------------------------------------+ -| ``snapshot`` | Snapshot object, without ``id`` and ``short_id`` fields | -+----------------+---------------------------------------------------------+ -| ``matches`` | Array containing descriptions of the matching criteria | -+----------------+---------------------------------------------------------+ -| ``counters`` | Object containing counters used by the policies | -+----------------+---------------------------------------------------------+ ++----------------+-----------------------------------------------------------+ +| ``snapshot`` | Snapshot object, including ``id`` and ``short_id`` fields | ++----------------+-----------------------------------------------------------+ +| ``matches`` | Array containing descriptions of the matching criteria | ++----------------+-----------------------------------------------------------+ +| ``counters`` | Object containing counters used by the policies | ++----------------+-----------------------------------------------------------+ init From 5145c8f9c092d8815e516be231c20f2234764e45 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 31 Mar 2024 12:25:20 +0200 Subject: [PATCH 094/893] key list: include full key id in JSON output --- changelog/unreleased/issue-4744 | 9 +++++++++ cmd/restic/cmd_key_list.go | 6 ++++-- 2 files changed, 13 insertions(+), 2 deletions(-) create mode 100644 changelog/unreleased/issue-4744 diff --git a/changelog/unreleased/issue-4744 b/changelog/unreleased/issue-4744 new file mode 100644 index 00000000000..b0ede1c5c01 --- /dev/null +++ b/changelog/unreleased/issue-4744 @@ -0,0 +1,9 @@ +Change: Include full key ID in JSON output of `key list` + +We have changed the JSON output of the `key list` command to include the full +key ID instead of just a shortened version, as the latter can be ambiguous +in some rare cases. To derive the short ID, please truncate the full ID down to +eight characters. + +https://github.com/restic/restic/issues/4744 +https://github.com/restic/restic/pull/4745 diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index 9bddb5ed33e..fcca6055a2b 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -53,6 +53,7 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions type keyInfo struct { Current bool `json:"current"` ID string `json:"id"` + ShortID string `json:"-"` UserName string `json:"userName"` HostName string `json:"hostName"` Created string `json:"created"` @@ -70,7 +71,8 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions key := keyInfo{ Current: id == s.KeyID(), - ID: id.Str(), + ID: id.String(), + ShortID: id.Str(), UserName: k.Username, HostName: k.Hostname, Created: k.Created.Local().Format(TimeFormat), @@ -91,7 +93,7 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions } tab := table.New() - tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ID }}") + tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ShortID }}") tab.AddColumn("User", "{{ .UserName }}") tab.AddColumn("Host", "{{ .HostName }}") tab.AddColumn("Created", "{{ .Created }}") From f8a72ac2a397f8d0a6109e8e5ebae85f079b23fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 02:32:19 +0000 Subject: [PATCH 095/893] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore Bumps [github.com/Azure/azure-sdk-for-go/sdk/azcore](https://github.com/Azure/azure-sdk-for-go) from 1.9.2 to 1.10.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.9.2...sdk/azcore/v1.10.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azcore dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6e546974e5c..3ef0e89b6ec 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.39.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 github.com/Backblaze/blazer v0.6.1 diff --git a/go.sum b/go.sum index 668d6a339de..f4877ed35b1 100644 --- a/go.sum +++ b/go.sum @@ -9,8 +9,8 @@ cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= cloud.google.com/go/storage v1.39.0 h1:brbjUa4hbDHhpQf48tjqMaXEV+f1OGoaTmQau9tmCsA= cloud.google.com/go/storage v1.39.0/go.mod h1:OAEj/WZwUYjA3YHQ10/YcN9ttGuEpLwvaoyBXIPikEk= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= From 96c602a6de89ab1adc67bc97a2ace9e541989afa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Apr 2024 19:41:38 +0000 Subject: [PATCH 096/893] build(deps): bump golang.org/x/net from 0.21.0 to 0.23.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.21.0 to 0.23.0. - [Commits](https://github.com/golang/net/compare/v0.21.0...v0.23.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 7121cdac499..2aa48ebb7fc 100644 --- a/go.mod +++ b/go.mod @@ -26,12 +26,12 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.19.0 - golang.org/x/net v0.21.0 + golang.org/x/crypto v0.21.0 + golang.org/x/net v0.23.0 golang.org/x/oauth2 v0.17.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.17.0 - golang.org/x/term v0.17.0 + golang.org/x/sys v0.18.0 + golang.org/x/term v0.18.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 google.golang.org/api v0.166.0 diff --git a/go.sum b/go.sum index 1cd51cbacc9..895bd23c91b 100644 --- a/go.sum +++ b/go.sum @@ -208,8 +208,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -229,8 +229,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= @@ -257,14 +257,14 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= From ba136b31b86eef304363fd5caec7935ef7343f1f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Apr 2024 20:03:22 +0000 Subject: [PATCH 097/893] build(deps): bump cloud.google.com/go/storage from 1.39.0 to 1.40.0 Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.39.0 to 1.40.0. - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.39.0...spanner/v1.40.0) --- updated-dependencies: - dependency-name: cloud.google.com/go/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 32 +++++++++++++-------------- go.sum | 68 ++++++++++++++++++++++++++++------------------------------ 2 files changed, 49 insertions(+), 51 deletions(-) diff --git a/go.mod b/go.mod index 1637f769aed..c928b4a97b2 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,7 @@ module github.com/restic/restic require ( - cloud.google.com/go/storage v1.39.0 + cloud.google.com/go/storage v1.40.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 @@ -28,20 +28,20 @@ require ( go.uber.org/automaxprocs v1.5.3 golang.org/x/crypto v0.21.0 golang.org/x/net v0.23.0 - golang.org/x/oauth2 v0.17.0 + golang.org/x/oauth2 v0.18.0 golang.org/x/sync v0.6.0 golang.org/x/sys v0.18.0 golang.org/x/term v0.18.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.166.0 + google.golang.org/api v0.170.0 ) require ( - cloud.google.com/go v0.112.0 // indirect + cloud.google.com/go v0.112.1 // indirect cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.6 // indirect + cloud.google.com/go/iam v1.1.7 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect @@ -52,12 +52,12 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.1 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect @@ -72,17 +72,17 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect - go.opentelemetry.io/otel v1.23.0 // indirect - go.opentelemetry.io/otel/metric v1.23.0 // indirect - go.opentelemetry.io/otel/trace v1.23.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect - google.golang.org/grpc v1.61.1 // indirect - google.golang.org/protobuf v1.32.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 // indirect + google.golang.org/grpc v1.62.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 14951005ac4..1a7dc1a7967 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,14 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/storage v1.39.0 h1:brbjUa4hbDHhpQf48tjqMaXEV+f1OGoaTmQau9tmCsA= -cloud.google.com/go/storage v1.39.0/go.mod h1:OAEj/WZwUYjA3YHQ10/YcN9ttGuEpLwvaoyBXIPikEk= +cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= +cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= @@ -36,7 +36,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -54,7 +53,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -84,8 +82,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -107,8 +105,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= -github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= @@ -190,17 +188,17 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= -go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= -go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= -go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= -go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= -go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -232,8 +230,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -287,8 +285,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -google.golang.org/api v0.166.0 h1:6m4NUwrZYhAaVIHZWxaKjw1L1vNAjtMwORmKRyEEo24= -google.golang.org/api v0.166.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= +google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48= +google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -298,17 +296,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= -google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c h1:9g7erC9qu44ks7UK4gDNlnk4kOxZG707xKm4jVniy6o= -google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 h1:9IZDv+/GcI6u+a4jRFRLxQs0RUCfavGfoOgEW6jpkI0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= -google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -320,8 +318,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= From 09d2183351ce016fb2a5639014569f0cc1eb1926 Mon Sep 17 00:00:00 2001 From: Martin Geisler Date: Sun, 7 Apr 2024 18:05:53 +0200 Subject: [PATCH 098/893] doc: fix typo in 047_tuning_backup_parameters.rst --- doc/047_tuning_backup_parameters.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/047_tuning_backup_parameters.rst b/doc/047_tuning_backup_parameters.rst index 8456693e73e..a6b998cfeda 100644 --- a/doc/047_tuning_backup_parameters.rst +++ b/doc/047_tuning_backup_parameters.rst @@ -121,7 +121,7 @@ Feature flags allow disabling or enabling certain experimental restic features. can be specified via the ``RESTIC_FEATURES`` environment variable. The variable expects a comma-separated list of ``key[=value],key2[=value2]`` pairs. The key is the name of a feature flag. The value is optional and can contain either the value ``true`` (default if omitted) -or ``false``. The list of currently available feautre flags is shown by the ``features`` +or ``false``. The list of currently available feature flags is shown by the ``features`` command. Restic will return an error if an invalid feature flag is specified. No longer relevant From 0747cf5319775b3deca0ad0db1657e3179f10e06 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 10 Apr 2024 19:17:25 +0200 Subject: [PATCH 099/893] cache: ignore ErrNotExist during cleanup of old files Two restic processes running concurrently can try to remove the same files from the cache. This could cause one process to fail with an error if the other one has already remove a file that the current process also tries to delete. --- changelog/unreleased/issue-4760 | 8 ++++++++ internal/cache/file.go | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/issue-4760 diff --git a/changelog/unreleased/issue-4760 b/changelog/unreleased/issue-4760 new file mode 100644 index 00000000000..bb2d9c5b4e0 --- /dev/null +++ b/changelog/unreleased/issue-4760 @@ -0,0 +1,8 @@ +Bugfix: Fix possible error on concurrent cache cleanup + +If multiple restic processes concurrently cleaned up no longer existing files +from the cache, this could cause some of the processes to fail with an `no such +file or directory` error. This has been fixed. + +https://github.com/restic/restic/issues/4760 +https://github.com/restic/restic/pull/4761 diff --git a/internal/cache/file.go b/internal/cache/file.go index 48a38c1d387..1bfe922d24d 100644 --- a/internal/cache/file.go +++ b/internal/cache/file.go @@ -165,7 +165,8 @@ func (c *Cache) Clear(t restic.FileType, valid restic.IDSet) error { continue } - if err = fs.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil { + // ignore ErrNotExist to gracefully handle multiple processes running Clear() concurrently + if err = fs.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil && !errors.Is(err, os.ErrNotExist) { return err } } From bf054c09d21a2f83d8974e34614eb700954057e5 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 31 Jan 2024 20:48:03 +0100 Subject: [PATCH 100/893] backup: Ignore xattr.list permission error for parent directories On FreeBSD, limited users may not be able to even list xattrs for the parent directories above the snapshot source paths. As this can cause the backup to fail, just ignore those errors. --- changelog/unreleased/issue-3600 | 11 ++++++++++ internal/archiver/archiver.go | 14 +++++++------ internal/archiver/archiver_test.go | 4 ++-- internal/archiver/archiver_unix_test.go | 2 +- internal/archiver/file_saver.go | 4 ++-- internal/archiver/file_saver_test.go | 4 ++-- internal/restic/node.go | 13 +++++++----- internal/restic/node_aix.go | 4 ++++ internal/restic/node_netbsd.go | 4 ++++ internal/restic/node_openbsd.go | 4 ++++ internal/restic/node_test.go | 10 ++++++--- internal/restic/node_unix_test.go | 2 +- internal/restic/node_windows.go | 4 ++++ internal/restic/node_windows_test.go | 2 +- internal/restic/node_xattr.go | 8 +++++++ internal/restic/node_xattr_test.go | 28 +++++++++++++++++++++++++ internal/restic/tree_test.go | 4 ++-- 17 files changed, 97 insertions(+), 25 deletions(-) create mode 100644 changelog/unreleased/issue-3600 create mode 100644 internal/restic/node_xattr_test.go diff --git a/changelog/unreleased/issue-3600 b/changelog/unreleased/issue-3600 new file mode 100644 index 00000000000..0da66d3821e --- /dev/null +++ b/changelog/unreleased/issue-3600 @@ -0,0 +1,11 @@ +Bugfix: `backup` works if xattrs above the backup target cannot be read + +When backup targets are specified using absolute paths, then `backup` also +includes information about the parent folders of the backup targets in the +snapshot. If the extended attributes for some of these folders could not be +read due to missing permissions, this caused the backup to fail. This has been +fixed. + +https://github.com/restic/restic/issues/3600 +https://github.com/restic/restic/pull/4668 +https://forum.restic.net/t/parent-directories-above-the-snapshot-source-path-fatal-error-permission-denied/7216 diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 050a0e2c7b3..146ff3a7ccb 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -237,8 +237,8 @@ func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s I } // nodeFromFileInfo returns the restic node from an os.FileInfo. -func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo) (*restic.Node, error) { - node, err := restic.NodeFromFileInfo(filename, fi) +func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { + node, err := restic.NodeFromFileInfo(filename, fi, ignoreXattrListError) if !arch.WithAtime { node.AccessTime = node.ModTime } @@ -289,7 +289,7 @@ func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) { debug.Log("%v %v", snPath, dir) - treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi) + treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi, false) if err != nil { return FutureNode{}, err } @@ -444,7 +444,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous debug.Log("%v hasn't changed, using old list of blobs", target) arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start)) arch.CompleteBlob(previous.Size) - node, err := arch.nodeFromFileInfo(snPath, target, fi) + node, err := arch.nodeFromFileInfo(snPath, target, fi, false) if err != nil { return FutureNode{}, false, err } @@ -540,7 +540,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous default: debug.Log(" %v other", target) - node, err := arch.nodeFromFileInfo(snPath, target, fi) + node, err := arch.nodeFromFileInfo(snPath, target, fi, false) if err != nil { return FutureNode{}, false, err } @@ -623,7 +623,9 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, } debug.Log("%v, dir node data loaded from %v", snPath, atree.FileInfoPath) - node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi) + // in some cases reading xattrs for directories above the backup target is not allowed + // thus ignore errors for such folders. + node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi, true) if err != nil { return FutureNode{}, 0, err } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 841c8f2ce8f..b1ea4b6b659 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -556,7 +556,7 @@ func rename(t testing.TB, oldname, newname string) { } func nodeFromFI(t testing.TB, filename string, fi os.FileInfo) *restic.Node { - node, err := restic.NodeFromFileInfo(filename, fi) + node, err := restic.NodeFromFileInfo(filename, fi, false) if err != nil { t.Fatal(err) } @@ -2230,7 +2230,7 @@ func TestMetadataChanged(t *testing.T) { // get metadata fi := lstat(t, "testfile") - want, err := restic.NodeFromFileInfo("testfile", fi) + want, err := restic.NodeFromFileInfo("testfile", fi, false) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 9462420ddf3..a6b1aad2e15 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -48,7 +48,7 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { func statAndSnapshot(t *testing.T, repo restic.Repository, name string) (*restic.Node, *restic.Node) { fi := lstat(t, name) - want, err := restic.NodeFromFileInfo(name, fi) + want, err := restic.NodeFromFileInfo(name, fi, false) rtest.OK(t, err) _, node := snapshot(t, repo, fs.Local{}, nil, name) diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index 7f11bff8a6b..d10334301ac 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -29,7 +29,7 @@ type FileSaver struct { CompleteBlob func(bytes uint64) - NodeFromFileInfo func(snPath, filename string, fi os.FileInfo) (*restic.Node, error) + NodeFromFileInfo func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) } // NewFileSaver returns a new file saver. A worker pool with fileWorkers is @@ -156,7 +156,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat debug.Log("%v", snPath) - node, err := s.NodeFromFileInfo(snPath, f.Name(), fi) + node, err := s.NodeFromFileInfo(snPath, f.Name(), fi, false) if err != nil { _ = f.Close() completeError(err) diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go index ced9d796e05..409bdedd037 100644 --- a/internal/archiver/file_saver_test.go +++ b/internal/archiver/file_saver_test.go @@ -49,8 +49,8 @@ func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Cont } s := NewFileSaver(ctx, wg, saveBlob, pol, workers, workers) - s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo) (*restic.Node, error) { - return restic.NodeFromFileInfo(filename, fi) + s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { + return restic.NodeFromFileInfo(filename, fi, ignoreXattrListError) } return s, ctx, wg diff --git a/internal/restic/node.go b/internal/restic/node.go index e7688aada0b..9613cf3c292 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -134,7 +134,7 @@ func (node Node) String() string { // NodeFromFileInfo returns a new node from the given path and FileInfo. It // returns the first error that is encountered, together with a node. -func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { +func NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*Node, error) { mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky node := &Node{ Path: path, @@ -148,7 +148,7 @@ func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { node.Size = uint64(fi.Size()) } - err := node.fillExtra(path, fi) + err := node.fillExtra(path, fi, ignoreXattrListError) return node, err } @@ -675,7 +675,7 @@ func lookupGroup(gid uint32) string { return group } -func (node *Node) fillExtra(path string, fi os.FileInfo) error { +func (node *Node) fillExtra(path string, fi os.FileInfo, ignoreXattrListError bool) error { stat, ok := toStatT(fi.Sys()) if !ok { // fill minimal info with current values for uid, gid @@ -719,7 +719,7 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error { allowExtended, err := node.fillGenericAttributes(path, fi, stat) if allowExtended { // Skip processing ExtendedAttributes if allowExtended is false. - errEx := node.fillExtendedAttributes(path) + errEx := node.fillExtendedAttributes(path, ignoreXattrListError) if err == nil { err = errEx } else { @@ -729,10 +729,13 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error { return err } -func (node *Node) fillExtendedAttributes(path string) error { +func (node *Node) fillExtendedAttributes(path string, ignoreListError bool) error { xattrs, err := Listxattr(path) debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err) if err != nil { + if ignoreListError && IsListxattrPermissionError(err) { + return nil + } return err } diff --git a/internal/restic/node_aix.go b/internal/restic/node_aix.go index def46bd6055..8ee9022c92c 100644 --- a/internal/restic/node_aix.go +++ b/internal/restic/node_aix.go @@ -33,6 +33,10 @@ func Listxattr(path string) ([]string, error) { return nil, nil } +func IsListxattrPermissionError(_ error) bool { + return false +} + // Setxattr is a no-op on AIX. func Setxattr(path, name string, data []byte) error { return nil diff --git a/internal/restic/node_netbsd.go b/internal/restic/node_netbsd.go index 1a47299be93..cf1fa36bd5f 100644 --- a/internal/restic/node_netbsd.go +++ b/internal/restic/node_netbsd.go @@ -23,6 +23,10 @@ func Listxattr(path string) ([]string, error) { return nil, nil } +func IsListxattrPermissionError(_ error) bool { + return false +} + // Setxattr is a no-op on netbsd. func Setxattr(path, name string, data []byte) error { return nil diff --git a/internal/restic/node_openbsd.go b/internal/restic/node_openbsd.go index e60eb9dc831..4f1c0dacb7a 100644 --- a/internal/restic/node_openbsd.go +++ b/internal/restic/node_openbsd.go @@ -23,6 +23,10 @@ func Listxattr(path string) ([]string, error) { return nil, nil } +func IsListxattrPermissionError(_ error) bool { + return false +} + // Setxattr is a no-op on openbsd. func Setxattr(path, name string, data []byte) error { return nil diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index d9fa02ac8eb..ea271faab8a 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" ) @@ -31,7 +32,7 @@ func BenchmarkNodeFillUser(t *testing.B) { t.ResetTimer() for i := 0; i < t.N; i++ { - _, err := NodeFromFileInfo(path, fi) + _, err := NodeFromFileInfo(path, fi, false) rtest.OK(t, err) } @@ -55,7 +56,7 @@ func BenchmarkNodeFromFileInfo(t *testing.B) { t.ResetTimer() for i := 0; i < t.N; i++ { - _, err := NodeFromFileInfo(path, fi) + _, err := NodeFromFileInfo(path, fi, false) if err != nil { t.Fatal(err) } @@ -227,8 +228,11 @@ func TestNodeRestoreAt(t *testing.T) { fi, err := os.Lstat(nodePath) rtest.OK(t, err) - n2, err := NodeFromFileInfo(nodePath, fi) + n2, err := NodeFromFileInfo(nodePath, fi, false) rtest.OK(t, err) + n3, err := NodeFromFileInfo(nodePath, fi, true) + rtest.OK(t, err) + rtest.Assert(t, n2.Equals(*n3), "unexpected node info mismatch %v", cmp.Diff(n2, n3)) rtest.Assert(t, test.Name == n2.Name, "%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name) diff --git a/internal/restic/node_unix_test.go b/internal/restic/node_unix_test.go index 374326bf7b8..9ea7b172595 100644 --- a/internal/restic/node_unix_test.go +++ b/internal/restic/node_unix_test.go @@ -128,7 +128,7 @@ func TestNodeFromFileInfo(t *testing.T) { return } - node, err := NodeFromFileInfo(test.filename, fi) + node, err := NodeFromFileInfo(test.filename, fi, false) if err != nil { t.Fatal(err) } diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 5875c3ccd89..7766a1ddf4c 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -78,6 +78,10 @@ func Listxattr(path string) ([]string, error) { return nil, nil } +func IsListxattrPermissionError(_ error) bool { + return false +} + // Setxattr associates name and data together as an attribute of path. func Setxattr(path, name string, data []byte) error { return nil diff --git a/internal/restic/node_windows_test.go b/internal/restic/node_windows_test.go index 501d5a98a26..5a5a0a61c3b 100644 --- a/internal/restic/node_windows_test.go +++ b/internal/restic/node_windows_test.go @@ -165,7 +165,7 @@ func restoreAndGetNode(t *testing.T, tempDir string, testNode Node, warningExpec fi, err := os.Lstat(testPath) test.OK(t, errors.Wrapf(err, "Could not Lstat for path: %s", testPath)) - nodeFromFileInfo, err := NodeFromFileInfo(testPath, fi) + nodeFromFileInfo, err := NodeFromFileInfo(testPath, fi, false) test.OK(t, errors.Wrapf(err, "Could not get NodeFromFileInfo for path: %s", testPath)) return testPath, nodeFromFileInfo diff --git a/internal/restic/node_xattr.go b/internal/restic/node_xattr.go index 0b2d5d5523b..8b080e74f8f 100644 --- a/internal/restic/node_xattr.go +++ b/internal/restic/node_xattr.go @@ -25,6 +25,14 @@ func Listxattr(path string) ([]string, error) { return l, handleXattrErr(err) } +func IsListxattrPermissionError(err error) bool { + var xerr *xattr.Error + if errors.As(err, &xerr) { + return xerr.Op == "xattr.list" && errors.Is(xerr.Err, os.ErrPermission) + } + return false +} + // Setxattr associates name and data together as an attribute of path. func Setxattr(path, name string, data []byte) error { return handleXattrErr(xattr.LSet(path, name, data)) diff --git a/internal/restic/node_xattr_test.go b/internal/restic/node_xattr_test.go new file mode 100644 index 00000000000..5ce77bd2805 --- /dev/null +++ b/internal/restic/node_xattr_test.go @@ -0,0 +1,28 @@ +//go:build darwin || freebsd || linux || solaris +// +build darwin freebsd linux solaris + +package restic + +import ( + "os" + "testing" + + "github.com/pkg/xattr" + rtest "github.com/restic/restic/internal/test" +) + +func TestIsListxattrPermissionError(t *testing.T) { + xerr := &xattr.Error{ + Op: "xattr.list", + Name: "test", + Err: os.ErrPermission, + } + err := handleXattrErr(xerr) + rtest.Assert(t, err != nil, "missing error") + rtest.Assert(t, IsListxattrPermissionError(err), "expected IsListxattrPermissionError to return true for %v", err) + + xerr.Err = os.ErrNotExist + err = handleXattrErr(xerr) + rtest.Assert(t, err != nil, "missing error") + rtest.Assert(t, !IsListxattrPermissionError(err), "expected IsListxattrPermissionError to return false for %v", err) +} diff --git a/internal/restic/tree_test.go b/internal/restic/tree_test.go index da674eb1c0c..67ecec897ed 100644 --- a/internal/restic/tree_test.go +++ b/internal/restic/tree_test.go @@ -86,7 +86,7 @@ func TestNodeComparison(t *testing.T) { fi, err := os.Lstat("tree_test.go") rtest.OK(t, err) - node, err := restic.NodeFromFileInfo("tree_test.go", fi) + node, err := restic.NodeFromFileInfo("tree_test.go", fi, false) rtest.OK(t, err) n2 := *node @@ -127,7 +127,7 @@ func TestTreeEqualSerialization(t *testing.T) { for _, fn := range files[:i] { fi, err := os.Lstat(fn) rtest.OK(t, err) - node, err := restic.NodeFromFileInfo(fn, fi) + node, err := restic.NodeFromFileInfo(fn, fi, false) rtest.OK(t, err) rtest.OK(t, tree.Insert(node)) From 591b421c4a7ef2862273c56eadd61246b37b3e85 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 29 Mar 2024 13:51:59 +0100 Subject: [PATCH 101/893] Deprecate s3legacy layout --- changelog/unreleased/issue-4602 | 14 ++++++++++++-- cmd/restic/cmd_restore_integration_test.go | 2 ++ internal/backend/layout/layout.go | 11 +++++++++++ internal/backend/layout/layout_test.go | 3 +++ internal/backend/local/config.go | 2 +- internal/backend/local/layout_test.go | 2 ++ internal/backend/s3/config.go | 2 +- internal/backend/sftp/config.go | 2 +- internal/backend/sftp/layout_test.go | 2 ++ internal/feature/registry.go | 10 ++++++---- 10 files changed, 41 insertions(+), 9 deletions(-) diff --git a/changelog/unreleased/issue-4602 b/changelog/unreleased/issue-4602 index 3cba63876f5..7532bcb1e60 100644 --- a/changelog/unreleased/issue-4602 +++ b/changelog/unreleased/issue-4602 @@ -1,7 +1,7 @@ -Change: Deprecate legacy index format +Change: Deprecate legacy index format and s3legacy layout Support for the legacy index format used by restic before version 0.2.0 has -been depreacted and will be removed in the next minor restic version. You can +been deprecated and will be removed in the next minor restic version. You can use `restic repair index` to update the index to the current format. It is possible to temporarily reenable support for the legacy index format by @@ -9,5 +9,15 @@ setting the environment variable `RESTIC_FEATURES=deprecate-legacy-index=false`. Note that this feature flag will be removed in the next minor restic version. +Support for the s3legacy layout used for the S3 backend before restic 0.7.0 +has been deprecated and will be removed in the next minor restic version. You +can migrate your S3 repository using `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout`. + +It is possible to temporarily reenable support for the legacy s3layout by +setting the environment variable +`RESTIC_FEATURES=deprecate-s3-legacy-layout=false`. Note that this feature flag +will be removed in the next minor restic version. + https://github.com/restic/restic/issues/4602 https://github.com/restic/restic/pull/4724 +https://github.com/restic/restic/pull/4743 diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 2c7cbe1fb67..806c7584b05 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -274,6 +275,7 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) { } func TestRestoreLocalLayout(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() env, cleanup := withTestEnvironment(t) defer cleanup() diff --git a/internal/backend/layout/layout.go b/internal/backend/layout/layout.go index b600566a47f..052fd66ca24 100644 --- a/internal/backend/layout/layout.go +++ b/internal/backend/layout/layout.go @@ -10,6 +10,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" ) @@ -93,6 +94,8 @@ func hasBackendFile(ctx context.Context, fs Filesystem, dir string) (bool, error // cannot be detected automatically. var ErrLayoutDetectionFailed = errors.New("auto-detecting the filesystem layout failed") +var ErrLegacyLayoutFound = errors.New("detected legacy S3 layout. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your repository") + // DetectLayout tries to find out which layout is used in a local (or sftp) // filesystem at the given path. If repo is nil, an instance of LocalFilesystem // is used. @@ -123,6 +126,10 @@ func DetectLayout(ctx context.Context, repo Filesystem, dir string) (Layout, err } if foundKeyFile && !foundKeysFile { + if feature.Flag.Enabled(feature.DeprecateS3LegacyLayout) { + return nil, ErrLegacyLayoutFound + } + debug.Log("found s3 layout at %v", dir) return &S3LegacyLayout{ Path: dir, @@ -145,6 +152,10 @@ func ParseLayout(ctx context.Context, repo Filesystem, layout, defaultLayout, pa Join: repo.Join, } case "s3legacy": + if feature.Flag.Enabled(feature.DeprecateS3LegacyLayout) { + return nil, ErrLegacyLayoutFound + } + l = &S3LegacyLayout{ Path: path, Join: repo.Join, diff --git a/internal/backend/layout/layout_test.go b/internal/backend/layout/layout_test.go index 998f5aeb694..55a0749c9e0 100644 --- a/internal/backend/layout/layout_test.go +++ b/internal/backend/layout/layout_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) @@ -352,6 +353,7 @@ func TestS3LegacyLayout(t *testing.T) { } func TestDetectLayout(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { @@ -389,6 +391,7 @@ func TestDetectLayout(t *testing.T) { } func TestParseLayout(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { diff --git a/internal/backend/local/config.go b/internal/backend/local/config.go index dc5e7948cfa..e08f05550bb 100644 --- a/internal/backend/local/config.go +++ b/internal/backend/local/config.go @@ -10,7 +10,7 @@ import ( // Config holds all information needed to open a local repository. type Config struct { Path string - Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"` + Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect) (deprecated)"` Connections uint `option:"connections" help:"set a limit for the number of concurrent operations (default: 2)"` } diff --git a/internal/backend/local/layout_test.go b/internal/backend/local/layout_test.go index 46f3996bb72..00c91376af8 100644 --- a/internal/backend/local/layout_test.go +++ b/internal/backend/local/layout_test.go @@ -6,10 +6,12 @@ import ( "testing" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) func TestLayout(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { diff --git a/internal/backend/s3/config.go b/internal/backend/s3/config.go index b4d44399fea..4aea4c3d11b 100644 --- a/internal/backend/s3/config.go +++ b/internal/backend/s3/config.go @@ -20,7 +20,7 @@ type Config struct { Secret options.SecretString Bucket string Prefix string - Layout string `option:"layout" help:"use this backend layout (default: auto-detect)"` + Layout string `option:"layout" help:"use this backend layout (default: auto-detect) (deprecated)"` StorageClass string `option:"storage-class" help:"set S3 storage class (STANDARD, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or REDUCED_REDUNDANCY)"` Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` diff --git a/internal/backend/sftp/config.go b/internal/backend/sftp/config.go index 65af50d196a..aa8ac7bff5e 100644 --- a/internal/backend/sftp/config.go +++ b/internal/backend/sftp/config.go @@ -13,7 +13,7 @@ import ( type Config struct { User, Host, Port, Path string - Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"` + Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect) (deprecated)"` Command string `option:"command" help:"specify command to create sftp connection"` Args string `option:"args" help:"specify arguments for ssh"` diff --git a/internal/backend/sftp/layout_test.go b/internal/backend/sftp/layout_test.go index 9cf24a75365..8bb7eac0199 100644 --- a/internal/backend/sftp/layout_test.go +++ b/internal/backend/sftp/layout_test.go @@ -8,6 +8,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/sftp" + "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) @@ -16,6 +17,7 @@ func TestLayout(t *testing.T) { t.Skip("sftp server binary not available") } + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 4693b890976..2d2e45edf76 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -5,13 +5,15 @@ var Flag = New() // flag names are written in kebab-case const ( - DeprecateLegacyIndex FlagName = "deprecate-legacy-index" - DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" + DeprecateLegacyIndex FlagName = "deprecate-legacy-index" + DeprecateS3LegacyLayout FlagName = "deprecate-s3-legacy-layout" + DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" ) func init() { Flag.SetFlags(map[FlagName]FlagDesc{ - DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, - DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, + DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, + DeprecateS3LegacyLayout: {Type: Beta, Description: "disable support for S3 legacy layout used up to restic 0.7.0. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your S3 repository if necessary."}, + DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, }) } From 739d11c2eba4f85e66254e0daadde9c9300a22c7 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Apr 2024 00:11:27 +0200 Subject: [PATCH 102/893] forget: replace usage of DeleteFilesChecked This simplifies refactoring prune into the repository package. --- cmd/restic/cmd_forget.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index d634576c0e6..080b1747290 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -267,7 +267,17 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption if len(removeSnIDs) > 0 { if !opts.DryRun { - err := DeleteFilesChecked(ctx, gopts, repo, removeSnIDs, restic.SnapshotFile) + bar := newProgressMax(!gopts.JSON && !gopts.Quiet, 0, "files deleted") + err := restic.ParallelRemove(ctx, repo, removeSnIDs, restic.SnapshotFile, func(id restic.ID, err error) error { + if err != nil { + Warnf("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id) + } + if !gopts.JSON && gopts.verbosity > 2 { + Verbosef("removed %v/%v\n", restic.SnapshotFile, id) + } + return nil + }, bar) + bar.Done() if err != nil { return err } From 32a234b67ede617e7e6b83496dbbe3aeedacb25b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Apr 2024 00:51:20 +0200 Subject: [PATCH 103/893] prune/forget/repair index: convert output to use progress.Printer --- cmd/restic/cmd_forget.go | 53 +++--- cmd/restic/cmd_forget_integration_test.go | 5 +- cmd/restic/cmd_prune.go | 164 +++++++++--------- cmd/restic/cmd_prune_integration_test.go | 17 +- cmd/restic/cmd_repair_index.go | 37 ++-- .../cmd_repair_index_integration_test.go | 14 +- cmd/restic/cmd_repair_packs.go | 6 +- cmd/restic/delete.go | 21 +-- cmd/restic/integration_test.go | 9 +- 9 files changed, 177 insertions(+), 149 deletions(-) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 080b1747290..f6fc5379c21 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -8,6 +8,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" ) @@ -33,7 +34,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, args) + term, cancel := setupTermstatus() + defer cancel() + return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, term, args) }, } @@ -152,7 +155,7 @@ func verifyForgetOptions(opts *ForgetOptions) error { return nil } -func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, args []string) error { +func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { err := verifyForgetOptions(&opts) if err != nil { return err @@ -173,6 +176,12 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } defer unlock() + verbosity := gopts.verbosity + if gopts.JSON { + verbosity = 0 + } + printer := newTerminalProgressPrinter(verbosity, term) + var snapshots restic.Snapshots removeSnIDs := restic.NewIDSet() @@ -210,15 +219,11 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } if policy.Empty() && len(args) == 0 { - if !gopts.JSON { - Verbosef("no policy was specified, no snapshots will be removed\n") - } + printer.P("no policy was specified, no snapshots will be removed\n") } if !policy.Empty() { - if !gopts.JSON { - Verbosef("Applying Policy: %v\n", policy) - } + printer.P("Applying Policy: %v\n", policy) for k, snapshotGroup := range snapshotGroups { if gopts.Verbose >= 1 && !gopts.JSON { @@ -241,16 +246,16 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy) if len(keep) != 0 && !gopts.Quiet && !gopts.JSON { - Printf("keep %d snapshots:\n", len(keep)) + printer.P("keep %d snapshots:\n", len(keep)) PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact) - Printf("\n") + printer.P("\n") } fg.Keep = asJSONSnapshots(keep) if len(remove) != 0 && !gopts.Quiet && !gopts.JSON { - Printf("remove %d snapshots:\n", len(remove)) + printer.P("remove %d snapshots:\n", len(remove)) PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact) - Printf("\n") + printer.P("\n") } fg.Remove = asJSONSnapshots(remove) @@ -267,14 +272,12 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption if len(removeSnIDs) > 0 { if !opts.DryRun { - bar := newProgressMax(!gopts.JSON && !gopts.Quiet, 0, "files deleted") + bar := printer.NewCounter("files deleted") err := restic.ParallelRemove(ctx, repo, removeSnIDs, restic.SnapshotFile, func(id restic.ID, err error) error { if err != nil { - Warnf("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id) - } - if !gopts.JSON && gopts.verbosity > 2 { - Verbosef("removed %v/%v\n", restic.SnapshotFile, id) + printer.E("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id) } + printer.VV("removed %v/%v\n", restic.SnapshotFile, id) return nil }, bar) bar.Done() @@ -282,9 +285,7 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption return err } } else { - if !gopts.JSON { - Printf("Would have removed the following snapshots:\n%v\n\n", removeSnIDs) - } + printer.P("Would have removed the following snapshots:\n%v\n\n", removeSnIDs) } } @@ -296,15 +297,13 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } if len(removeSnIDs) > 0 && opts.Prune { - if !gopts.JSON { - if opts.DryRun { - Verbosef("%d snapshots would be removed, running prune dry run\n", len(removeSnIDs)) - } else { - Verbosef("%d snapshots have been removed, running prune\n", len(removeSnIDs)) - } + if opts.DryRun { + printer.P("%d snapshots would be removed, running prune dry run\n", len(removeSnIDs)) + } else { + printer.P("%d snapshots have been removed, running prune\n", len(removeSnIDs)) } pruneOptions.DryRun = opts.DryRun - return runPruneWithRepo(ctx, pruneOptions, gopts, repo, removeSnIDs) + return runPruneWithRepo(ctx, pruneOptions, gopts, repo, removeSnIDs, term) } return nil diff --git a/cmd/restic/cmd_forget_integration_test.go b/cmd/restic/cmd_forget_integration_test.go index 1c027a2408e..e4cdb744e81 100644 --- a/cmd/restic/cmd_forget_integration_test.go +++ b/cmd/restic/cmd_forget_integration_test.go @@ -5,6 +5,7 @@ import ( "testing" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) { @@ -12,5 +13,7 @@ func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) { pruneOpts := PruneOptions{ MaxUnused: "5%", } - rtest.OK(t, runForget(context.TODO(), opts, pruneOpts, gopts, args)) + rtest.OK(t, withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runForget(context.TODO(), opts, pruneOpts, gopts, term, args) + })) } diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 3a9a8c33cea..833e72ae7ee 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -16,6 +16,7 @@ import ( "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/progress" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" ) @@ -38,7 +39,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return runPrune(cmd.Context(), pruneOptions, globalOptions) + term, cancel := setupTermstatus() + defer cancel() + return runPrune(cmd.Context(), pruneOptions, globalOptions, term) }, } @@ -138,7 +141,7 @@ func verifyPruneOptions(opts *PruneOptions) error { return nil } -func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error { +func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term *termstatus.Terminal) error { err := verifyPruneOptions(&opts) if err != nil { return err @@ -170,10 +173,10 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error opts.unsafeRecovery = true } - return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet()) + return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet(), term) } -func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet) error { +func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet, term *termstatus.Terminal) error { // we do not need index updates while pruning! repo.DisableAutoIndexUpdate() @@ -181,24 +184,26 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption Print("warning: running prune without a cache, this may be very slow!\n") } - Verbosef("loading indexes...\n") + printer := newTerminalProgressPrinter(gopts.verbosity, term) + + printer.P("loading indexes...\n") // loading the index before the snapshots is ok, as we use an exclusive lock here - bar := newIndexProgress(gopts.Quiet, gopts.JSON) + bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) err := repo.LoadIndex(ctx, bar) if err != nil { return err } - plan, stats, err := planPrune(ctx, opts, repo, ignoreSnapshots, gopts.Quiet) + plan, stats, err := planPrune(ctx, opts, repo, ignoreSnapshots, printer) if err != nil { return err } if opts.DryRun { - Verbosef("\nWould have made the following changes:") + printer.P("\nWould have made the following changes:") } - err = printPruneStats(stats) + err = printPruneStats(printer, stats) if err != nil { return err } @@ -206,7 +211,7 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption // Trigger GC to reset garbage collection threshold runtime.GC() - return doPrune(ctx, opts, gopts, repo, plan) + return doPrune(ctx, opts, repo, plan, printer) } type pruneStats struct { @@ -264,22 +269,22 @@ type packInfoWithID struct { // planPrune selects which files to rewrite and which to delete and which blobs to keep. // Also some summary statistics are returned. -func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (prunePlan, pruneStats, error) { +func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (prunePlan, pruneStats, error) { var stats pruneStats - usedBlobs, err := getUsedBlobs(ctx, repo, ignoreSnapshots, quiet) + usedBlobs, err := getUsedBlobs(ctx, repo, ignoreSnapshots, printer) if err != nil { return prunePlan{}, stats, err } - Verbosef("searching used packs...\n") - keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats) + printer.P("searching used packs...\n") + keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer) if err != nil { return prunePlan{}, stats, err } - Verbosef("collecting packs for deletion and repacking\n") - plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, quiet) + printer.P("collecting packs for deletion and repacking\n") + plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer) if err != nil { return prunePlan{}, stats, err } @@ -308,7 +313,7 @@ func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, i return plan, stats, nil } -func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *pruneStats) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { +func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *pruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { // iterate over all blobs in index to find out which blobs are duplicates // The counter in usedBlobs describes how many instances of the blob exist in the repository index // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist @@ -337,7 +342,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re } if len(missingBlobs) != 0 { - Warnf("%v not found in the index\n\n"+ + printer.E("%v not found in the index\n\n"+ "Integrity check failed: Data seems to be missing.\n"+ "Will not start prune to prevent (additional) data loss!\n"+ "Please report this error (along with the output of the 'prune' run) at\n"+ @@ -458,7 +463,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re return usedBlobs, indexPack, nil } -func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *pruneStats, quiet bool) (prunePlan, error) { +func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *pruneStats, printer progress.Printer) (prunePlan, error) { removePacksFirst := restic.NewIDSet() removePacks := restic.NewIDSet() repackPacks := restic.NewIDSet() @@ -474,12 +479,13 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi } // loop over all packs and decide what to do - bar := newProgressMax(!quiet, uint64(len(indexPack)), "packs processed") + bar := printer.NewCounter("packs processed") + bar.SetMax(uint64(len(indexPack))) err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { p, ok := indexPack[id] if !ok { // Pack was not referenced in index and is not used => immediately remove! - Verboseff("will remove pack %v as it is unused and not indexed\n", id.Str()) + printer.V("will remove pack %v as it is unused and not indexed\n", id.Str()) removePacksFirst.Insert(id) stats.size.unref += uint64(packSize) return nil @@ -489,7 +495,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi // Pack size does not fit and pack is needed => error // If the pack is not needed, this is no error, the pack can // and will be simply removed, see below. - Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n", + printer.E("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n", id.Str(), p.unusedSize+p.usedSize, packSize) return errorSizeNotMatching } @@ -562,16 +568,16 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi } if len(indexPack) != 0 { - Warnf("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) + printer.E("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) for id := range indexPack { - Warnf(" %v\n", id) + printer.E(" %v\n", id) } return prunePlan{}, errorPacksMissing } if len(ignorePacks) != 0 { - Warnf("Missing but unneeded pack files are referenced in the index, will be repaired\n") + printer.E("Missing but unneeded pack files are referenced in the index, will be repaired\n") for id := range ignorePacks { - Warnf("will forget missing pack file %v\n", id) + printer.E("will forget missing pack file %v\n", id) } } @@ -657,43 +663,43 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi } // printPruneStats prints out the statistics -func printPruneStats(stats pruneStats) error { - Verboseff("\nused: %10d blobs / %s\n", stats.blobs.used, ui.FormatBytes(stats.size.used)) +func printPruneStats(printer progress.Printer, stats pruneStats) error { + printer.V("\nused: %10d blobs / %s\n", stats.blobs.used, ui.FormatBytes(stats.size.used)) if stats.blobs.duplicate > 0 { - Verboseff("duplicates: %10d blobs / %s\n", stats.blobs.duplicate, ui.FormatBytes(stats.size.duplicate)) + printer.V("duplicates: %10d blobs / %s\n", stats.blobs.duplicate, ui.FormatBytes(stats.size.duplicate)) } - Verboseff("unused: %10d blobs / %s\n", stats.blobs.unused, ui.FormatBytes(stats.size.unused)) + printer.V("unused: %10d blobs / %s\n", stats.blobs.unused, ui.FormatBytes(stats.size.unused)) if stats.size.unref > 0 { - Verboseff("unreferenced: %s\n", ui.FormatBytes(stats.size.unref)) + printer.V("unreferenced: %s\n", ui.FormatBytes(stats.size.unref)) } totalBlobs := stats.blobs.used + stats.blobs.unused + stats.blobs.duplicate totalSize := stats.size.used + stats.size.duplicate + stats.size.unused + stats.size.unref unusedSize := stats.size.duplicate + stats.size.unused - Verboseff("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize)) - Verboseff("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize)) + printer.V("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize)) + printer.V("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize)) - Verbosef("\nto repack: %10d blobs / %s\n", stats.blobs.repack, ui.FormatBytes(stats.size.repack)) - Verbosef("this removes: %10d blobs / %s\n", stats.blobs.repackrm, ui.FormatBytes(stats.size.repackrm)) - Verbosef("to delete: %10d blobs / %s\n", stats.blobs.remove, ui.FormatBytes(stats.size.remove+stats.size.unref)) + printer.P("\nto repack: %10d blobs / %s\n", stats.blobs.repack, ui.FormatBytes(stats.size.repack)) + printer.P("this removes: %10d blobs / %s\n", stats.blobs.repackrm, ui.FormatBytes(stats.size.repackrm)) + printer.P("to delete: %10d blobs / %s\n", stats.blobs.remove, ui.FormatBytes(stats.size.remove+stats.size.unref)) totalPruneSize := stats.size.remove + stats.size.repackrm + stats.size.unref - Verbosef("total prune: %10d blobs / %s\n", stats.blobs.remove+stats.blobs.repackrm, ui.FormatBytes(totalPruneSize)) + printer.P("total prune: %10d blobs / %s\n", stats.blobs.remove+stats.blobs.repackrm, ui.FormatBytes(totalPruneSize)) if stats.size.uncompressed > 0 { - Verbosef("not yet compressed: %s\n", ui.FormatBytes(stats.size.uncompressed)) + printer.P("not yet compressed: %s\n", ui.FormatBytes(stats.size.uncompressed)) } - Verbosef("remaining: %10d blobs / %s\n", totalBlobs-(stats.blobs.remove+stats.blobs.repackrm), ui.FormatBytes(totalSize-totalPruneSize)) + printer.P("remaining: %10d blobs / %s\n", totalBlobs-(stats.blobs.remove+stats.blobs.repackrm), ui.FormatBytes(totalSize-totalPruneSize)) unusedAfter := unusedSize - stats.size.remove - stats.size.repackrm - Verbosef("unused size after prune: %s (%s of remaining size)\n", + printer.P("unused size after prune: %s (%s of remaining size)\n", ui.FormatBytes(unusedAfter), ui.FormatPercent(unusedAfter, totalSize-totalPruneSize)) - Verbosef("\n") - Verboseff("totally used packs: %10d\n", stats.packs.used) - Verboseff("partly used packs: %10d\n", stats.packs.partlyUsed) - Verboseff("unused packs: %10d\n\n", stats.packs.unused) - - Verboseff("to keep: %10d packs\n", stats.packs.keep) - Verboseff("to repack: %10d packs\n", stats.packs.repack) - Verboseff("to delete: %10d packs\n", stats.packs.remove) + printer.P("\n") + printer.V("totally used packs: %10d\n", stats.packs.used) + printer.V("partly used packs: %10d\n", stats.packs.partlyUsed) + printer.V("unused packs: %10d\n\n", stats.packs.unused) + + printer.V("to keep: %10d packs\n", stats.packs.keep) + printer.V("to repack: %10d packs\n", stats.packs.repack) + printer.V("to delete: %10d packs\n", stats.packs.remove) if stats.packs.unref > 0 { - Verboseff("to delete: %10d unreferenced packs\n\n", stats.packs.unref) + printer.V("to delete: %10d unreferenced packs\n\n", stats.packs.unref) } return nil } @@ -704,29 +710,28 @@ func printPruneStats(stats pruneStats) error { // - rebuild the index while ignoring all files that will be deleted // - delete the files // plan.removePacks and plan.ignorePacks are modified in this function. -func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo restic.Repository, plan prunePlan) (err error) { +func doPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, plan prunePlan, printer progress.Printer) (err error) { if opts.DryRun { - if !gopts.JSON && gopts.verbosity >= 2 { - Printf("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") - if len(plan.removePacksFirst) > 0 { - Printf("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) - } - Printf("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks) - Printf("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks) + printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") + if len(plan.removePacksFirst) > 0 { + printer.V("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) } + printer.V("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks) + printer.V("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks) // Always quit here if DryRun was set! return nil } // unreferenced packs can be safely deleted first if len(plan.removePacksFirst) != 0 { - Verbosef("deleting unreferenced packs\n") - DeleteFiles(ctx, gopts, repo, plan.removePacksFirst, restic.PackFile) + printer.P("deleting unreferenced packs\n") + DeleteFiles(ctx, repo, plan.removePacksFirst, restic.PackFile, printer) } if len(plan.repackPacks) != 0 { - Verbosef("repacking packs\n") - bar := newProgressMax(!gopts.Quiet, uint64(len(plan.repackPacks)), "packs repacked") + printer.P("repacking packs\n") + bar := printer.NewCounter("packs repacked") + bar.SetMax(uint64(len(plan.repackPacks))) _, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar) bar.Done() if err != nil { @@ -737,7 +742,7 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r plan.removePacks.Merge(plan.repackPacks) if len(plan.keepBlobs) != 0 { - Warnf("%v was not repacked\n\n"+ + printer.E("%v was not repacked\n\n"+ "Integrity check failed.\n"+ "Please report this error (along with the output of the 'prune' run) at\n"+ "https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs) @@ -755,56 +760,54 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r } if opts.unsafeRecovery { - Verbosef("deleting index files\n") + printer.P("deleting index files\n") indexFiles := repo.Index().(*index.MasterIndex).IDs() - err = DeleteFilesChecked(ctx, gopts, repo, indexFiles, restic.IndexFile) + err = DeleteFilesChecked(ctx, repo, indexFiles, restic.IndexFile, printer) if err != nil { return errors.Fatalf("%s", err) } } else if len(plan.ignorePacks) != 0 { - err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, false) + err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, false, printer) if err != nil { return errors.Fatalf("%s", err) } } if len(plan.removePacks) != 0 { - Verbosef("removing %d old packs\n", len(plan.removePacks)) - DeleteFiles(ctx, gopts, repo, plan.removePacks, restic.PackFile) + printer.P("removing %d old packs\n", len(plan.removePacks)) + DeleteFiles(ctx, repo, plan.removePacks, restic.PackFile, printer) } if opts.unsafeRecovery { - err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, true) + err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) if err != nil { return errors.Fatalf("%s", err) } } - Verbosef("done\n") + printer.P("done\n") return nil } -func rebuildIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool) error { - Verbosef("rebuilding index\n") +func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error { + printer.P("rebuilding index\n") - bar := newProgressMax(!gopts.Quiet, 0, "packs processed") + bar := printer.NewCounter("packs processed") return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{ SaveProgress: bar, DeleteProgress: func() *progress.Counter { - return newProgressMax(!gopts.Quiet, 0, "old indexes deleted") + return printer.NewCounter("old indexes deleted") }, DeleteReport: func(id restic.ID, _ error) { - if gopts.verbosity > 2 { - Verbosef("removed index %v\n", id.String()) - } + printer.VV("removed index %v\n", id.String()) }, SkipDeletion: skipDeletion, }) } -func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (usedBlobs restic.CountedBlobSet, err error) { +func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (usedBlobs restic.CountedBlobSet, err error) { var snapshotTrees restic.IDs - Verbosef("loading all snapshots...\n") + printer.P("loading all snapshots...\n") err = restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots, func(id restic.ID, sn *restic.Snapshot, err error) error { if err != nil { @@ -819,11 +822,12 @@ func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots r return nil, errors.Fatalf("failed loading snapshot: %v", err) } - Verbosef("finding data that is still in use for %d snapshots\n", len(snapshotTrees)) + printer.P("finding data that is still in use for %d snapshots\n", len(snapshotTrees)) usedBlobs = restic.NewCountedBlobSet() - bar := newProgressMax(!quiet, uint64(len(snapshotTrees)), "snapshots") + bar := printer.NewCounter("snapshots") + bar.SetMax(uint64(len(snapshotTrees))) defer bar.Done() err = restic.FindUsedBlobs(ctx, repo, snapshotTrees, usedBlobs, bar) diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index ebfa7ae4e30..4c21940c4d6 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -8,6 +8,7 @@ import ( "github.com/restic/restic/internal/backend" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) { @@ -16,7 +17,9 @@ func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) { defer func() { gopts.backendTestHook = oldHook }() - rtest.OK(t, runPrune(context.TODO(), opts, gopts)) + rtest.OK(t, withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runPrune(context.TODO(), opts, gopts, term) + })) } func TestPrune(t *testing.T) { @@ -84,7 +87,9 @@ func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) { pruneOpts := PruneOptions{ MaxUnused: "5%", } - return runForget(context.TODO(), opts, pruneOpts, gopts, args) + return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runForget(context.TODO(), opts, pruneOpts, gopts, term, args) + }) }) rtest.OK(t, err) @@ -138,7 +143,9 @@ func TestPruneWithDamagedRepository(t *testing.T) { env.gopts.backendTestHook = oldHook }() // prune should fail - rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing, + rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runPrune(context.TODO(), pruneDefaultOptions, env.gopts, term) + }) == errorPacksMissing, "prune should have reported index not complete error") } @@ -218,7 +225,9 @@ func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, o testRunPrune(t, env.gopts, optionsPrune) testRunCheck(t, env.gopts) } else { - rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil, + rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runPrune(context.TODO(), optionsPrune, env.gopts, term) + }) != nil, "prune should have reported an error") } } diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index 1ac743348ed..19db1d03f41 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -7,6 +7,8 @@ import ( "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -25,7 +27,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions) + term, cancel := setupTermstatus() + defer cancel() + return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions, term) }, } @@ -55,17 +59,19 @@ func init() { } } -func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error { +func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, term *termstatus.Terminal) error { ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } defer unlock() - return rebuildIndex(ctx, opts, gopts, repo) + printer := newTerminalProgressPrinter(gopts.verbosity, term) + + return rebuildIndex(ctx, opts, repo, printer) } -func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, repo *repository.Repository) error { +func rebuildIndex(ctx context.Context, opts RepairIndexOptions, repo *repository.Repository, printer progress.Printer) error { var obsoleteIndexes restic.IDs packSizeFromList := make(map[restic.ID]int64) packSizeFromIndex := make(map[restic.ID]int64) @@ -81,11 +87,11 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti return err } } else { - Verbosef("loading indexes...\n") + printer.P("loading indexes...\n") mi := index.NewMasterIndex() err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error { if err != nil { - Warnf("removing invalid index %v: %v\n", id, err) + printer.E("removing invalid index %v: %v\n", id, err) obsoleteIndexes = append(obsoleteIndexes, id) return nil } @@ -109,7 +115,7 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti packSizeFromIndex = pack.Size(ctx, repo.Index(), false) } - Verbosef("getting pack files to read...\n") + printer.P("getting pack files to read...\n") err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { size, ok := packSizeFromIndex[id] if !ok || size != packSize { @@ -118,9 +124,9 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti removePacks.Insert(id) } if !ok { - Warnf("adding pack file to index %v\n", id) + printer.E("adding pack file to index %v\n", id) } else if size != packSize { - Warnf("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size) + printer.E("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size) } delete(packSizeFromIndex, id) return nil @@ -132,12 +138,13 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti // forget pack files that are referenced in the index but do not exist // when rebuilding the index removePacks.Insert(id) - Warnf("removing not found pack file %v\n", id) + printer.E("removing not found pack file %v\n", id) } if len(packSizeFromList) > 0 { - Verbosef("reading pack files\n") - bar := newProgressMax(!gopts.Quiet, uint64(len(packSizeFromList)), "packs") + printer.P("reading pack files\n") + bar := printer.NewCounter("packs") + bar.SetMax(uint64(len(packSizeFromList))) invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar) bar.Done() if err != nil { @@ -145,15 +152,15 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti } for _, id := range invalidFiles { - Verboseff("skipped incomplete pack file: %v\n", id) + printer.V("skipped incomplete pack file: %v\n", id) } } - err = rebuildIndexFiles(ctx, gopts, repo, removePacks, obsoleteIndexes, false) + err = rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer) if err != nil { return err } - Verbosef("done\n") + printer.P("done\n") return nil } diff --git a/cmd/restic/cmd_repair_index_integration_test.go b/cmd/restic/cmd_repair_index_integration_test.go index e3271361ae0..e1a3dfe0303 100644 --- a/cmd/restic/cmd_repair_index_integration_test.go +++ b/cmd/restic/cmd_repair_index_integration_test.go @@ -13,12 +13,15 @@ import ( "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) { rtest.OK(t, withRestoreGlobalOptions(func() error { - globalOptions.stdout = io.Discard - return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts) + return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + globalOptions.stdout = io.Discard + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts, term) + }) })) } @@ -126,12 +129,13 @@ func TestRebuildIndexFailsOnAppendOnly(t *testing.T) { rtest.SetupTarTestFixture(t, env.base, datafile) err := withRestoreGlobalOptions(func() error { - globalOptions.stdout = io.Discard - env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { return &appendOnlyBackend{r}, nil } - return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts) + return withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + globalOptions.stdout = io.Discard + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term) + }) }) if err == nil { diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index 00dee076b33..7489e1b3c48 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -58,14 +58,14 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T } defer unlock() - bar := newIndexProgress(gopts.Quiet, gopts.JSON) + printer := newTerminalProgressPrinter(gopts.verbosity, term) + + bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) err = repo.LoadIndex(ctx, bar) if err != nil { return errors.Fatalf("%s", err) } - printer := newTerminalProgressPrinter(gopts.verbosity, term) - printer.P("saving backup copies of pack files to current folder") for id := range ids { f, err := os.OpenFile("pack-"+id.String(), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o666) diff --git a/cmd/restic/delete.go b/cmd/restic/delete.go index c3a7e039dd7..34f71d91aed 100644 --- a/cmd/restic/delete.go +++ b/cmd/restic/delete.go @@ -4,38 +4,35 @@ import ( "context" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" ) // DeleteFiles deletes the given fileList of fileType in parallel // it will print a warning if there is an error, but continue deleting the remaining files -func DeleteFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) { - _ = deleteFiles(ctx, gopts, true, repo, fileList, fileType) +func DeleteFiles(ctx context.Context, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) { + _ = deleteFiles(ctx, true, repo, fileList, fileType, printer) } // DeleteFilesChecked deletes the given fileList of fileType in parallel // if an error occurs, it will cancel and return this error -func DeleteFilesChecked(ctx context.Context, gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error { - return deleteFiles(ctx, gopts, false, repo, fileList, fileType) +func DeleteFilesChecked(ctx context.Context, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { + return deleteFiles(ctx, false, repo, fileList, fileType, printer) } // deleteFiles deletes the given fileList of fileType in parallel // if ignoreError=true, it will print a warning if there was an error, else it will abort. -func deleteFiles(ctx context.Context, gopts GlobalOptions, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error { - bar := newProgressMax(!gopts.JSON && !gopts.Quiet, 0, "files deleted") +func deleteFiles(ctx context.Context, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { + bar := printer.NewCounter("files deleted") defer bar.Done() return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error { if err != nil { - if !gopts.JSON { - Warnf("unable to remove %v/%v from the repository\n", fileType, id) - } + printer.E("unable to remove %v/%v from the repository\n", fileType, id) if !ignoreError { return err } } - if !gopts.JSON && gopts.verbosity > 2 { - Verbosef("removed %v/%v\n", fileType, id) - } + printer.VV("removed %v/%v\n", fileType, id) return nil }, bar) } diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 21be571e23d..a7b66add895 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -12,6 +12,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func TestCheckRestoreNoLock(t *testing.T) { @@ -88,8 +89,12 @@ func TestListOnce(t *testing.T) { testRunPrune(t, env.gopts, pruneOpts) rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil)) - rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts)) - rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts)) + rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term) + })) + rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts, term) + })) } type writeToOnly struct { From 866ddf5698503c3a4159c43c95494f434bfc0f1f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Apr 2024 19:12:04 +0200 Subject: [PATCH 104/893] repair index: refactor code into repository package --- cmd/restic/cmd_repair_index.go | 96 +--------------------- internal/repository/repair_index.go | 118 ++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+), 93 deletions(-) create mode 100644 internal/repository/repair_index.go diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index 19db1d03f41..50ba16e33b9 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -3,11 +3,7 @@ package main import ( "context" - "github.com/restic/restic/internal/index" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/ui/progress" "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -68,99 +64,13 @@ func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalO printer := newTerminalProgressPrinter(gopts.verbosity, term) - return rebuildIndex(ctx, opts, repo, printer) -} - -func rebuildIndex(ctx context.Context, opts RepairIndexOptions, repo *repository.Repository, printer progress.Printer) error { - var obsoleteIndexes restic.IDs - packSizeFromList := make(map[restic.ID]int64) - packSizeFromIndex := make(map[restic.ID]int64) - removePacks := restic.NewIDSet() - - if opts.ReadAllPacks { - // get list of old index files but start with empty index - err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error { - obsoleteIndexes = append(obsoleteIndexes, id) - return nil - }) - if err != nil { - return err - } - } else { - printer.P("loading indexes...\n") - mi := index.NewMasterIndex() - err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error { - if err != nil { - printer.E("removing invalid index %v: %v\n", id, err) - obsoleteIndexes = append(obsoleteIndexes, id) - return nil - } - - mi.Insert(idx) - return nil - }) - if err != nil { - return err - } - - err = mi.MergeFinalIndexes() - if err != nil { - return err - } - - err = repo.SetIndex(mi) - if err != nil { - return err - } - packSizeFromIndex = pack.Size(ctx, repo.Index(), false) - } - - printer.P("getting pack files to read...\n") - err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { - size, ok := packSizeFromIndex[id] - if !ok || size != packSize { - // Pack was not referenced in index or size does not match - packSizeFromList[id] = packSize - removePacks.Insert(id) - } - if !ok { - printer.E("adding pack file to index %v\n", id) - } else if size != packSize { - printer.E("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size) - } - delete(packSizeFromIndex, id) - return nil - }) + err = repository.RepairIndex(ctx, repo, repository.RepairIndexOptions{ + ReadAllPacks: opts.ReadAllPacks, + }, printer) if err != nil { return err } - for id := range packSizeFromIndex { - // forget pack files that are referenced in the index but do not exist - // when rebuilding the index - removePacks.Insert(id) - printer.E("removing not found pack file %v\n", id) - } - - if len(packSizeFromList) > 0 { - printer.P("reading pack files\n") - bar := printer.NewCounter("packs") - bar.SetMax(uint64(len(packSizeFromList))) - invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar) - bar.Done() - if err != nil { - return err - } - - for _, id := range invalidFiles { - printer.V("skipped incomplete pack file: %v\n", id) - } - } - err = rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer) - if err != nil { - return err - } printer.P("done\n") - return nil } diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go new file mode 100644 index 00000000000..7cf598fa5c6 --- /dev/null +++ b/internal/repository/repair_index.go @@ -0,0 +1,118 @@ +package repository + +import ( + "context" + + "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" +) + +type RepairIndexOptions struct { + ReadAllPacks bool +} + +func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, printer progress.Printer) error { + var obsoleteIndexes restic.IDs + packSizeFromList := make(map[restic.ID]int64) + packSizeFromIndex := make(map[restic.ID]int64) + removePacks := restic.NewIDSet() + + if opts.ReadAllPacks { + // get list of old index files but start with empty index + err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error { + obsoleteIndexes = append(obsoleteIndexes, id) + return nil + }) + if err != nil { + return err + } + } else { + printer.P("loading indexes...\n") + mi := index.NewMasterIndex() + err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error { + if err != nil { + printer.E("removing invalid index %v: %v\n", id, err) + obsoleteIndexes = append(obsoleteIndexes, id) + return nil + } + + mi.Insert(idx) + return nil + }) + if err != nil { + return err + } + + err = mi.MergeFinalIndexes() + if err != nil { + return err + } + + err = repo.SetIndex(mi) + if err != nil { + return err + } + packSizeFromIndex = pack.Size(ctx, repo.Index(), false) + } + + printer.P("getting pack files to read...\n") + err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { + size, ok := packSizeFromIndex[id] + if !ok || size != packSize { + // Pack was not referenced in index or size does not match + packSizeFromList[id] = packSize + removePacks.Insert(id) + } + if !ok { + printer.E("adding pack file to index %v\n", id) + } else if size != packSize { + printer.E("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size) + } + delete(packSizeFromIndex, id) + return nil + }) + if err != nil { + return err + } + for id := range packSizeFromIndex { + // forget pack files that are referenced in the index but do not exist + // when rebuilding the index + removePacks.Insert(id) + printer.E("removing not found pack file %v\n", id) + } + + if len(packSizeFromList) > 0 { + printer.P("reading pack files\n") + bar := printer.NewCounter("packs") + bar.SetMax(uint64(len(packSizeFromList))) + invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar) + bar.Done() + if err != nil { + return err + } + + for _, id := range invalidFiles { + printer.V("skipped incomplete pack file: %v\n", id) + } + } + + return rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer) +} + +func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error { + printer.P("rebuilding index\n") + + bar := printer.NewCounter("packs processed") + return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{ + SaveProgress: bar, + DeleteProgress: func() *progress.Counter { + return printer.NewCounter("old indexes deleted") + }, + DeleteReport: func(id restic.ID, _ error) { + printer.VV("removed index %v\n", id.String()) + }, + SkipDeletion: skipDeletion, + }) +} From df9d4b455d0154589b2d317d7aa3eac4a350dd34 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Apr 2024 19:17:28 +0200 Subject: [PATCH 105/893] prune: prepare for moving code to repository package --- cmd/restic/cmd_prune.go | 220 ++++++++++++++++++++-------------------- 1 file changed, 111 insertions(+), 109 deletions(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 833e72ae7ee..eef66cc8eae 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -194,7 +194,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption return err } - plan, stats, err := planPrune(ctx, opts, repo, ignoreSnapshots, printer) + plan, stats, err := PlanPrune(ctx, opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { + return getUsedBlobs(ctx, repo, ignoreSnapshots, printer) + }, printer) if err != nil { return err } @@ -211,40 +213,40 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption // Trigger GC to reset garbage collection threshold runtime.GC() - return doPrune(ctx, opts, repo, plan, printer) + return DoPrune(ctx, opts, repo, plan, printer) } -type pruneStats struct { - blobs struct { - used uint - duplicate uint - unused uint - remove uint - repack uint - repackrm uint - } - size struct { - used uint64 - duplicate uint64 - unused uint64 - remove uint64 - repack uint64 - repackrm uint64 - unref uint64 - uncompressed uint64 - } - packs struct { - used uint - unused uint - partlyUsed uint - unref uint - keep uint - repack uint - remove uint +type PruneStats struct { + Blobs struct { + Used uint + Duplicate uint + Unused uint + Remove uint + Repack uint + Repackrm uint + } + Size struct { + Used uint64 + Duplicate uint64 + Unused uint64 + Remove uint64 + Repack uint64 + Repackrm uint64 + Unref uint64 + Uncompressed uint64 + } + Packs struct { + Used uint + Unused uint + PartlyUsed uint + Unref uint + Keep uint + Repack uint + Remove uint } } -type prunePlan struct { +type PrunePlan struct { removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) repackPacks restic.IDSet // packs to repack keepBlobs restic.CountedBlobSet // blobs to keep during repacking @@ -267,26 +269,26 @@ type packInfoWithID struct { mustCompress bool } -// planPrune selects which files to rewrite and which to delete and which blobs to keep. +// PlanPrune selects which files to rewrite and which to delete and which blobs to keep. // Also some summary statistics are returned. -func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (prunePlan, pruneStats, error) { - var stats pruneStats +func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (PrunePlan, PruneStats, error) { + var stats PruneStats - usedBlobs, err := getUsedBlobs(ctx, repo, ignoreSnapshots, printer) + usedBlobs, err := getUsedBlobs(ctx, repo) if err != nil { - return prunePlan{}, stats, err + return PrunePlan{}, stats, err } printer.P("searching used packs...\n") keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer) if err != nil { - return prunePlan{}, stats, err + return PrunePlan{}, stats, err } printer.P("collecting packs for deletion and repacking\n") plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer) if err != nil { - return prunePlan{}, stats, err + return PrunePlan{}, stats, err } if len(plan.repackPacks) != 0 { @@ -313,7 +315,7 @@ func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, i return plan, stats, nil } -func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *pruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { +func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { // iterate over all blobs in index to find out which blobs are duplicates // The counter in usedBlobs describes how many instances of the blob exist in the repository index // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist @@ -384,20 +386,20 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re ip.unusedBlobs++ // count as duplicate, will later on change one copy to be counted as used - stats.size.duplicate += size - stats.blobs.duplicate++ + stats.Size.Duplicate += size + stats.Blobs.Duplicate++ case dupCount == 1: // used blob, not duplicate ip.usedSize += size ip.usedBlobs++ - stats.size.used += size - stats.blobs.used++ + stats.Size.Used += size + stats.Blobs.Used++ default: // unused blob ip.unusedSize += size ip.unusedBlobs++ - stats.size.unused += size - stats.blobs.unused++ + stats.Size.Unused += size + stats.Blobs.Unused++ } if !blob.IsCompressed() { ip.uncompressed = true @@ -431,10 +433,10 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re ip.unusedSize -= size ip.unusedBlobs-- // same for the global statistics - stats.size.used += size - stats.blobs.used++ - stats.size.duplicate -= size - stats.blobs.duplicate-- + stats.Size.Used += size + stats.Blobs.Used++ + stats.Size.Duplicate -= size + stats.Blobs.Duplicate-- // let other occurrences remain marked as unused usedBlobs[bh] = 1 default: @@ -463,7 +465,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re return usedBlobs, indexPack, nil } -func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *pruneStats, printer progress.Printer) (prunePlan, error) { +func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *PruneStats, printer progress.Printer) (PrunePlan, error) { removePacksFirst := restic.NewIDSet() removePacks := restic.NewIDSet() repackPacks := restic.NewIDSet() @@ -487,7 +489,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi // Pack was not referenced in index and is not used => immediately remove! printer.V("will remove pack %v as it is unused and not indexed\n", id.Str()) removePacksFirst.Insert(id) - stats.size.unref += uint64(packSize) + stats.Size.Unref += uint64(packSize) return nil } @@ -503,15 +505,15 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi // statistics switch { case p.usedBlobs == 0: - stats.packs.unused++ + stats.Packs.Unused++ case p.unusedBlobs == 0: - stats.packs.used++ + stats.Packs.Used++ default: - stats.packs.partlyUsed++ + stats.Packs.PartlyUsed++ } if p.uncompressed { - stats.size.uncompressed += p.unusedSize + p.usedSize + stats.Size.Uncompressed += p.unusedSize + p.usedSize } mustCompress := false if repoVersion >= 2 { @@ -525,17 +527,17 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi case p.usedBlobs == 0: // All blobs in pack are no longer used => remove pack! removePacks.Insert(id) - stats.blobs.remove += p.unusedBlobs - stats.size.remove += p.unusedSize + stats.Blobs.Remove += p.unusedBlobs + stats.Size.Remove += p.unusedSize case opts.RepackCachableOnly && p.tpe == restic.DataBlob: // if this is a data pack and --repack-cacheable-only is set => keep pack! - stats.packs.keep++ + stats.Packs.Keep++ case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress: if packSize >= int64(targetPackSize) { // All blobs in pack are used and not mixed => keep pack! - stats.packs.keep++ + stats.Packs.Keep++ } else { repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) } @@ -551,7 +553,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi }) bar.Done() if err != nil { - return prunePlan{}, err + return PrunePlan{}, err } // At this point indexPacks contains only missing packs! @@ -561,8 +563,8 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi for id, p := range indexPack { if p.usedBlobs == 0 { ignorePacks.Insert(id) - stats.blobs.remove += p.unusedBlobs - stats.size.remove += p.unusedSize + stats.Blobs.Remove += p.unusedBlobs + stats.Size.Remove += p.unusedSize delete(indexPack, id) } } @@ -572,7 +574,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi for id := range indexPack { printer.E(" %v\n", id) } - return prunePlan{}, errorPacksMissing + return PrunePlan{}, errorPacksMissing } if len(ignorePacks) != 0 { printer.E("Missing but unneeded pack files are referenced in the index, will be repaired\n") @@ -584,7 +586,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi if len(repackSmallCandidates) < 10 { // too few small files to be worth the trouble, this also prevents endlessly repacking // if there is just a single pack file below the target size - stats.packs.keep += uint(len(repackSmallCandidates)) + stats.Packs.Keep += uint(len(repackSmallCandidates)) } else { repackCandidates = append(repackCandidates, repackSmallCandidates...) } @@ -612,26 +614,26 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi repack := func(id restic.ID, p packInfo) { repackPacks.Insert(id) - stats.blobs.repack += p.unusedBlobs + p.usedBlobs - stats.size.repack += p.unusedSize + p.usedSize - stats.blobs.repackrm += p.unusedBlobs - stats.size.repackrm += p.unusedSize + stats.Blobs.Repack += p.unusedBlobs + p.usedBlobs + stats.Size.Repack += p.unusedSize + p.usedSize + stats.Blobs.Repackrm += p.unusedBlobs + stats.Size.Repackrm += p.unusedSize if p.uncompressed { - stats.size.uncompressed -= p.unusedSize + p.usedSize + stats.Size.Uncompressed -= p.unusedSize + p.usedSize } } // calculate limit for number of unused bytes in the repo after repacking - maxUnusedSizeAfter := opts.maxUnusedBytes(stats.size.used) + maxUnusedSizeAfter := opts.maxUnusedBytes(stats.Size.Used) for _, p := range repackCandidates { - reachedUnusedSizeAfter := (stats.size.unused-stats.size.remove-stats.size.repackrm < maxUnusedSizeAfter) - reachedRepackSize := stats.size.repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes + reachedUnusedSizeAfter := (stats.Size.Unused-stats.Size.Remove-stats.Size.Repackrm < maxUnusedSizeAfter) + reachedRepackSize := stats.Size.Repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize) switch { case reachedRepackSize: - stats.packs.keep++ + stats.Packs.Keep++ case p.tpe != restic.DataBlob, p.mustCompress: // repacking non-data packs / uncompressed-trees is only limited by repackSize @@ -639,23 +641,23 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi case reachedUnusedSizeAfter && packIsLargeEnough: // for all other packs stop repacking if tolerated unused size is reached. - stats.packs.keep++ + stats.Packs.Keep++ default: repack(p.ID, p.packInfo) } } - stats.packs.unref = uint(len(removePacksFirst)) - stats.packs.repack = uint(len(repackPacks)) - stats.packs.remove = uint(len(removePacks)) + stats.Packs.Unref = uint(len(removePacksFirst)) + stats.Packs.Repack = uint(len(repackPacks)) + stats.Packs.Remove = uint(len(removePacks)) if repo.Config().Version < 2 { // compression not supported for repository format version 1 - stats.size.uncompressed = 0 + stats.Size.Uncompressed = 0 } - return prunePlan{removePacksFirst: removePacksFirst, + return PrunePlan{removePacksFirst: removePacksFirst, removePacks: removePacks, repackPacks: repackPacks, ignorePacks: ignorePacks, @@ -663,54 +665,54 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi } // printPruneStats prints out the statistics -func printPruneStats(printer progress.Printer, stats pruneStats) error { - printer.V("\nused: %10d blobs / %s\n", stats.blobs.used, ui.FormatBytes(stats.size.used)) - if stats.blobs.duplicate > 0 { - printer.V("duplicates: %10d blobs / %s\n", stats.blobs.duplicate, ui.FormatBytes(stats.size.duplicate)) - } - printer.V("unused: %10d blobs / %s\n", stats.blobs.unused, ui.FormatBytes(stats.size.unused)) - if stats.size.unref > 0 { - printer.V("unreferenced: %s\n", ui.FormatBytes(stats.size.unref)) - } - totalBlobs := stats.blobs.used + stats.blobs.unused + stats.blobs.duplicate - totalSize := stats.size.used + stats.size.duplicate + stats.size.unused + stats.size.unref - unusedSize := stats.size.duplicate + stats.size.unused +func printPruneStats(printer progress.Printer, stats PruneStats) error { + printer.V("\nused: %10d blobs / %s\n", stats.Blobs.Used, ui.FormatBytes(stats.Size.Used)) + if stats.Blobs.Duplicate > 0 { + printer.V("duplicates: %10d blobs / %s\n", stats.Blobs.Duplicate, ui.FormatBytes(stats.Size.Duplicate)) + } + printer.V("unused: %10d blobs / %s\n", stats.Blobs.Unused, ui.FormatBytes(stats.Size.Unused)) + if stats.Size.Unref > 0 { + printer.V("unreferenced: %s\n", ui.FormatBytes(stats.Size.Unref)) + } + totalBlobs := stats.Blobs.Used + stats.Blobs.Unused + stats.Blobs.Duplicate + totalSize := stats.Size.Used + stats.Size.Duplicate + stats.Size.Unused + stats.Size.Unref + unusedSize := stats.Size.Duplicate + stats.Size.Unused printer.V("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize)) printer.V("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize)) - printer.P("\nto repack: %10d blobs / %s\n", stats.blobs.repack, ui.FormatBytes(stats.size.repack)) - printer.P("this removes: %10d blobs / %s\n", stats.blobs.repackrm, ui.FormatBytes(stats.size.repackrm)) - printer.P("to delete: %10d blobs / %s\n", stats.blobs.remove, ui.FormatBytes(stats.size.remove+stats.size.unref)) - totalPruneSize := stats.size.remove + stats.size.repackrm + stats.size.unref - printer.P("total prune: %10d blobs / %s\n", stats.blobs.remove+stats.blobs.repackrm, ui.FormatBytes(totalPruneSize)) - if stats.size.uncompressed > 0 { - printer.P("not yet compressed: %s\n", ui.FormatBytes(stats.size.uncompressed)) + printer.P("\nto repack: %10d blobs / %s\n", stats.Blobs.Repack, ui.FormatBytes(stats.Size.Repack)) + printer.P("this removes: %10d blobs / %s\n", stats.Blobs.Repackrm, ui.FormatBytes(stats.Size.Repackrm)) + printer.P("to delete: %10d blobs / %s\n", stats.Blobs.Remove, ui.FormatBytes(stats.Size.Remove+stats.Size.Unref)) + totalPruneSize := stats.Size.Remove + stats.Size.Repackrm + stats.Size.Unref + printer.P("total prune: %10d blobs / %s\n", stats.Blobs.Remove+stats.Blobs.Repackrm, ui.FormatBytes(totalPruneSize)) + if stats.Size.Uncompressed > 0 { + printer.P("not yet compressed: %s\n", ui.FormatBytes(stats.Size.Uncompressed)) } - printer.P("remaining: %10d blobs / %s\n", totalBlobs-(stats.blobs.remove+stats.blobs.repackrm), ui.FormatBytes(totalSize-totalPruneSize)) - unusedAfter := unusedSize - stats.size.remove - stats.size.repackrm + printer.P("remaining: %10d blobs / %s\n", totalBlobs-(stats.Blobs.Remove+stats.Blobs.Repackrm), ui.FormatBytes(totalSize-totalPruneSize)) + unusedAfter := unusedSize - stats.Size.Remove - stats.Size.Repackrm printer.P("unused size after prune: %s (%s of remaining size)\n", ui.FormatBytes(unusedAfter), ui.FormatPercent(unusedAfter, totalSize-totalPruneSize)) printer.P("\n") - printer.V("totally used packs: %10d\n", stats.packs.used) - printer.V("partly used packs: %10d\n", stats.packs.partlyUsed) - printer.V("unused packs: %10d\n\n", stats.packs.unused) + printer.V("totally used packs: %10d\n", stats.Packs.Used) + printer.V("partly used packs: %10d\n", stats.Packs.PartlyUsed) + printer.V("unused packs: %10d\n\n", stats.Packs.Unused) - printer.V("to keep: %10d packs\n", stats.packs.keep) - printer.V("to repack: %10d packs\n", stats.packs.repack) - printer.V("to delete: %10d packs\n", stats.packs.remove) - if stats.packs.unref > 0 { - printer.V("to delete: %10d unreferenced packs\n\n", stats.packs.unref) + printer.V("to keep: %10d packs\n", stats.Packs.Keep) + printer.V("to repack: %10d packs\n", stats.Packs.Repack) + printer.V("to delete: %10d packs\n", stats.Packs.Remove) + if stats.Packs.Unref > 0 { + printer.V("to delete: %10d unreferenced packs\n\n", stats.Packs.Unref) } return nil } -// doPrune does the actual pruning: +// DoPrune does the actual pruning: // - remove unreferenced packs first // - repack given pack files while keeping the given blobs // - rebuild the index while ignoring all files that will be deleted // - delete the files // plan.removePacks and plan.ignorePacks are modified in this function. -func doPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, plan prunePlan, printer progress.Printer) (err error) { +func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, plan PrunePlan, printer progress.Printer) (err error) { if opts.DryRun { printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") if len(plan.removePacksFirst) > 0 { From fc3b548625793579cdb05bd38587744bd3f21b00 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Apr 2024 19:21:21 +0200 Subject: [PATCH 106/893] prune: move logic into repository package --- cmd/restic/cmd_prune.go | 576 +--------------------- cmd/restic/cmd_prune_integration_test.go | 3 +- cmd/restic/delete.go | 38 -- internal/repository/prune.go | 581 +++++++++++++++++++++++ 4 files changed, 599 insertions(+), 599 deletions(-) delete mode 100644 cmd/restic/delete.go create mode 100644 internal/repository/prune.go diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index eef66cc8eae..578414f527f 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -4,14 +4,11 @@ import ( "context" "math" "runtime" - "sort" "strconv" "strings" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" @@ -21,10 +18,6 @@ import ( "github.com/spf13/cobra" ) -var errorIndexIncomplete = errors.Fatal("index is not complete") -var errorPacksMissing = errors.Fatal("packs from index missing in repo") -var errorSizeNotMatching = errors.Fatal("pack size does not match calculated size from index") - var cmdPrune = &cobra.Command{ Use: "prune [flags]", Short: "Remove unneeded data from the repository", @@ -194,14 +187,26 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption return err } - plan, stats, err := PlanPrune(ctx, opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { + popts := repository.PruneOptions{ + DryRun: opts.DryRun, + UnsafeRecovery: opts.unsafeRecovery, + + MaxUnusedBytes: opts.maxUnusedBytes, + MaxRepackBytes: opts.MaxRepackBytes, + + RepackCachableOnly: opts.RepackCachableOnly, + RepackSmall: opts.RepackSmall, + RepackUncompressed: opts.RepackUncompressed, + } + + plan, stats, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { return getUsedBlobs(ctx, repo, ignoreSnapshots, printer) }, printer) if err != nil { return err } - if opts.DryRun { + if popts.DryRun { printer.P("\nWould have made the following changes:") } @@ -213,459 +218,11 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption // Trigger GC to reset garbage collection threshold runtime.GC() - return DoPrune(ctx, opts, repo, plan, printer) -} - -type PruneStats struct { - Blobs struct { - Used uint - Duplicate uint - Unused uint - Remove uint - Repack uint - Repackrm uint - } - Size struct { - Used uint64 - Duplicate uint64 - Unused uint64 - Remove uint64 - Repack uint64 - Repackrm uint64 - Unref uint64 - Uncompressed uint64 - } - Packs struct { - Used uint - Unused uint - PartlyUsed uint - Unref uint - Keep uint - Repack uint - Remove uint - } -} - -type PrunePlan struct { - removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) - repackPacks restic.IDSet // packs to repack - keepBlobs restic.CountedBlobSet // blobs to keep during repacking - removePacks restic.IDSet // packs to remove - ignorePacks restic.IDSet // packs to ignore when rebuilding the index -} - -type packInfo struct { - usedBlobs uint - unusedBlobs uint - usedSize uint64 - unusedSize uint64 - tpe restic.BlobType - uncompressed bool -} - -type packInfoWithID struct { - ID restic.ID - packInfo - mustCompress bool -} - -// PlanPrune selects which files to rewrite and which to delete and which blobs to keep. -// Also some summary statistics are returned. -func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (PrunePlan, PruneStats, error) { - var stats PruneStats - - usedBlobs, err := getUsedBlobs(ctx, repo) - if err != nil { - return PrunePlan{}, stats, err - } - - printer.P("searching used packs...\n") - keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer) - if err != nil { - return PrunePlan{}, stats, err - } - - printer.P("collecting packs for deletion and repacking\n") - plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer) - if err != nil { - return PrunePlan{}, stats, err - } - - if len(plan.repackPacks) != 0 { - blobCount := keepBlobs.Len() - // when repacking, we do not want to keep blobs which are - // already contained in kept packs, so delete them from keepBlobs - repo.Index().Each(ctx, func(blob restic.PackedBlob) { - if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) { - return - } - keepBlobs.Delete(blob.BlobHandle) - }) - - if keepBlobs.Len() < blobCount/2 { - // replace with copy to shrink map to necessary size if there's a chance to benefit - keepBlobs = keepBlobs.Copy() - } - } else { - // keepBlobs is only needed if packs are repacked - keepBlobs = nil - } - plan.keepBlobs = keepBlobs - - return plan, stats, nil -} - -func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { - // iterate over all blobs in index to find out which blobs are duplicates - // The counter in usedBlobs describes how many instances of the blob exist in the repository index - // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist - idx.Each(ctx, func(blob restic.PackedBlob) { - bh := blob.BlobHandle - count, ok := usedBlobs[bh] - if ok { - if count < math.MaxUint8 { - // don't overflow, but saturate count at 255 - // this can lead to a non-optimal pack selection, but won't cause - // problems otherwise - count++ - } - - usedBlobs[bh] = count - } - }) - - // Check if all used blobs have been found in index - missingBlobs := restic.NewBlobSet() - for bh, count := range usedBlobs { - if count == 0 { - // blob does not exist in any pack files - missingBlobs.Insert(bh) - } - } - - if len(missingBlobs) != 0 { - printer.E("%v not found in the index\n\n"+ - "Integrity check failed: Data seems to be missing.\n"+ - "Will not start prune to prevent (additional) data loss!\n"+ - "Please report this error (along with the output of the 'prune' run) at\n"+ - "https://github.com/restic/restic/issues/new/choose\n", missingBlobs) - return nil, nil, errorIndexIncomplete - } - - indexPack := make(map[restic.ID]packInfo) - - // save computed pack header size - for pid, hdrSize := range pack.Size(ctx, idx, true) { - // initialize tpe with NumBlobTypes to indicate it's not set - indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)} - } - - hasDuplicates := false - // iterate over all blobs in index to generate packInfo - idx.Each(ctx, func(blob restic.PackedBlob) { - ip := indexPack[blob.PackID] - - // Set blob type if not yet set - if ip.tpe == restic.NumBlobTypes { - ip.tpe = blob.Type - } - - // mark mixed packs with "Invalid blob type" - if ip.tpe != blob.Type { - ip.tpe = restic.InvalidBlob - } - - bh := blob.BlobHandle - size := uint64(blob.Length) - dupCount := usedBlobs[bh] - switch { - case dupCount >= 2: - hasDuplicates = true - // mark as unused for now, we will later on select one copy - ip.unusedSize += size - ip.unusedBlobs++ - - // count as duplicate, will later on change one copy to be counted as used - stats.Size.Duplicate += size - stats.Blobs.Duplicate++ - case dupCount == 1: // used blob, not duplicate - ip.usedSize += size - ip.usedBlobs++ - - stats.Size.Used += size - stats.Blobs.Used++ - default: // unused blob - ip.unusedSize += size - ip.unusedBlobs++ - - stats.Size.Unused += size - stats.Blobs.Unused++ - } - if !blob.IsCompressed() { - ip.uncompressed = true - } - // update indexPack - indexPack[blob.PackID] = ip - }) - - // if duplicate blobs exist, those will be set to either "used" or "unused": - // - mark only one occurrence of duplicate blobs as used - // - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used" - // - if there are no used blobs in a pack, possibly mark duplicates as "unused" - if hasDuplicates { - // iterate again over all blobs in index (this is pretty cheap, all in-mem) - idx.Each(ctx, func(blob restic.PackedBlob) { - bh := blob.BlobHandle - count, ok := usedBlobs[bh] - // skip non-duplicate, aka. normal blobs - // count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining - if !ok || count == 1 { - return - } - - ip := indexPack[blob.PackID] - size := uint64(blob.Length) - switch { - case ip.usedBlobs > 0, count == 0: - // other used blobs in pack or "last" occurrence -> transition to used - ip.usedSize += size - ip.usedBlobs++ - ip.unusedSize -= size - ip.unusedBlobs-- - // same for the global statistics - stats.Size.Used += size - stats.Blobs.Used++ - stats.Size.Duplicate -= size - stats.Blobs.Duplicate-- - // let other occurrences remain marked as unused - usedBlobs[bh] = 1 - default: - // remain unused and decrease counter - count-- - if count == 1 { - // setting count to 1 would lead to forgetting that this blob had duplicates - // thus use the special value zero. This will select the last instance of the blob for keeping. - count = 0 - } - usedBlobs[bh] = count - } - // update indexPack - indexPack[blob.PackID] = ip - }) - } - - // Sanity check. If no duplicates exist, all blobs have value 1. After handling - // duplicates, this also applies to duplicates. - for _, count := range usedBlobs { - if count != 1 { - panic("internal error during blob selection") - } - } - - return usedBlobs, indexPack, nil -} - -func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *PruneStats, printer progress.Printer) (PrunePlan, error) { - removePacksFirst := restic.NewIDSet() - removePacks := restic.NewIDSet() - repackPacks := restic.NewIDSet() - - var repackCandidates []packInfoWithID - var repackSmallCandidates []packInfoWithID - repoVersion := repo.Config().Version - // only repack very small files by default - targetPackSize := repo.PackSize() / 25 - if opts.RepackSmall { - // consider files with at least 80% of the target size as large enough - targetPackSize = repo.PackSize() / 5 * 4 - } - - // loop over all packs and decide what to do - bar := printer.NewCounter("packs processed") - bar.SetMax(uint64(len(indexPack))) - err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { - p, ok := indexPack[id] - if !ok { - // Pack was not referenced in index and is not used => immediately remove! - printer.V("will remove pack %v as it is unused and not indexed\n", id.Str()) - removePacksFirst.Insert(id) - stats.Size.Unref += uint64(packSize) - return nil - } - - if p.unusedSize+p.usedSize != uint64(packSize) && p.usedBlobs != 0 { - // Pack size does not fit and pack is needed => error - // If the pack is not needed, this is no error, the pack can - // and will be simply removed, see below. - printer.E("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n", - id.Str(), p.unusedSize+p.usedSize, packSize) - return errorSizeNotMatching - } - - // statistics - switch { - case p.usedBlobs == 0: - stats.Packs.Unused++ - case p.unusedBlobs == 0: - stats.Packs.Used++ - default: - stats.Packs.PartlyUsed++ - } - - if p.uncompressed { - stats.Size.Uncompressed += p.unusedSize + p.usedSize - } - mustCompress := false - if repoVersion >= 2 { - // repo v2: always repack tree blobs if uncompressed - // compress data blobs if requested - mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed - } - - // decide what to do - switch { - case p.usedBlobs == 0: - // All blobs in pack are no longer used => remove pack! - removePacks.Insert(id) - stats.Blobs.Remove += p.unusedBlobs - stats.Size.Remove += p.unusedSize - - case opts.RepackCachableOnly && p.tpe == restic.DataBlob: - // if this is a data pack and --repack-cacheable-only is set => keep pack! - stats.Packs.Keep++ - - case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress: - if packSize >= int64(targetPackSize) { - // All blobs in pack are used and not mixed => keep pack! - stats.Packs.Keep++ - } else { - repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) - } - - default: - // all other packs are candidates for repacking - repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) - } - - delete(indexPack, id) - bar.Add(1) - return nil - }) - bar.Done() - if err != nil { - return PrunePlan{}, err - } - - // At this point indexPacks contains only missing packs! - - // missing packs that are not needed can be ignored - ignorePacks := restic.NewIDSet() - for id, p := range indexPack { - if p.usedBlobs == 0 { - ignorePacks.Insert(id) - stats.Blobs.Remove += p.unusedBlobs - stats.Size.Remove += p.unusedSize - delete(indexPack, id) - } - } - - if len(indexPack) != 0 { - printer.E("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) - for id := range indexPack { - printer.E(" %v\n", id) - } - return PrunePlan{}, errorPacksMissing - } - if len(ignorePacks) != 0 { - printer.E("Missing but unneeded pack files are referenced in the index, will be repaired\n") - for id := range ignorePacks { - printer.E("will forget missing pack file %v\n", id) - } - } - - if len(repackSmallCandidates) < 10 { - // too few small files to be worth the trouble, this also prevents endlessly repacking - // if there is just a single pack file below the target size - stats.Packs.Keep += uint(len(repackSmallCandidates)) - } else { - repackCandidates = append(repackCandidates, repackSmallCandidates...) - } - - // Sort repackCandidates such that packs with highest ratio unused/used space are picked first. - // This is equivalent to sorting by unused / total space. - // Instead of unused[i] / used[i] > unused[j] / used[j] we use - // unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64 - // Moreover packs containing trees and too small packs are sorted to the beginning - sort.Slice(repackCandidates, func(i, j int) bool { - pi := repackCandidates[i].packInfo - pj := repackCandidates[j].packInfo - switch { - case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob: - return true - case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob: - return false - case pi.unusedSize+pi.usedSize < uint64(targetPackSize) && pj.unusedSize+pj.usedSize >= uint64(targetPackSize): - return true - case pj.unusedSize+pj.usedSize < uint64(targetPackSize) && pi.unusedSize+pi.usedSize >= uint64(targetPackSize): - return false - } - return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize - }) - - repack := func(id restic.ID, p packInfo) { - repackPacks.Insert(id) - stats.Blobs.Repack += p.unusedBlobs + p.usedBlobs - stats.Size.Repack += p.unusedSize + p.usedSize - stats.Blobs.Repackrm += p.unusedBlobs - stats.Size.Repackrm += p.unusedSize - if p.uncompressed { - stats.Size.Uncompressed -= p.unusedSize + p.usedSize - } - } - - // calculate limit for number of unused bytes in the repo after repacking - maxUnusedSizeAfter := opts.maxUnusedBytes(stats.Size.Used) - - for _, p := range repackCandidates { - reachedUnusedSizeAfter := (stats.Size.Unused-stats.Size.Remove-stats.Size.Repackrm < maxUnusedSizeAfter) - reachedRepackSize := stats.Size.Repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes - packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize) - - switch { - case reachedRepackSize: - stats.Packs.Keep++ - - case p.tpe != restic.DataBlob, p.mustCompress: - // repacking non-data packs / uncompressed-trees is only limited by repackSize - repack(p.ID, p.packInfo) - - case reachedUnusedSizeAfter && packIsLargeEnough: - // for all other packs stop repacking if tolerated unused size is reached. - stats.Packs.Keep++ - - default: - repack(p.ID, p.packInfo) - } - } - - stats.Packs.Unref = uint(len(removePacksFirst)) - stats.Packs.Repack = uint(len(repackPacks)) - stats.Packs.Remove = uint(len(removePacks)) - - if repo.Config().Version < 2 { - // compression not supported for repository format version 1 - stats.Size.Uncompressed = 0 - } - - return PrunePlan{removePacksFirst: removePacksFirst, - removePacks: removePacks, - repackPacks: repackPacks, - ignorePacks: ignorePacks, - }, nil + return repository.DoPrune(ctx, popts, repo, plan, printer) } // printPruneStats prints out the statistics -func printPruneStats(printer progress.Printer, stats PruneStats) error { +func printPruneStats(printer progress.Printer, stats repository.PruneStats) error { printer.V("\nused: %10d blobs / %s\n", stats.Blobs.Used, ui.FormatBytes(stats.Size.Used)) if stats.Blobs.Duplicate > 0 { printer.V("duplicates: %10d blobs / %s\n", stats.Blobs.Duplicate, ui.FormatBytes(stats.Size.Duplicate)) @@ -706,107 +263,6 @@ func printPruneStats(printer progress.Printer, stats PruneStats) error { return nil } -// DoPrune does the actual pruning: -// - remove unreferenced packs first -// - repack given pack files while keeping the given blobs -// - rebuild the index while ignoring all files that will be deleted -// - delete the files -// plan.removePacks and plan.ignorePacks are modified in this function. -func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, plan PrunePlan, printer progress.Printer) (err error) { - if opts.DryRun { - printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") - if len(plan.removePacksFirst) > 0 { - printer.V("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) - } - printer.V("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks) - printer.V("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks) - // Always quit here if DryRun was set! - return nil - } - - // unreferenced packs can be safely deleted first - if len(plan.removePacksFirst) != 0 { - printer.P("deleting unreferenced packs\n") - DeleteFiles(ctx, repo, plan.removePacksFirst, restic.PackFile, printer) - } - - if len(plan.repackPacks) != 0 { - printer.P("repacking packs\n") - bar := printer.NewCounter("packs repacked") - bar.SetMax(uint64(len(plan.repackPacks))) - _, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar) - bar.Done() - if err != nil { - return errors.Fatal(err.Error()) - } - - // Also remove repacked packs - plan.removePacks.Merge(plan.repackPacks) - - if len(plan.keepBlobs) != 0 { - printer.E("%v was not repacked\n\n"+ - "Integrity check failed.\n"+ - "Please report this error (along with the output of the 'prune' run) at\n"+ - "https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs) - return errors.Fatal("internal error: blobs were not repacked") - } - - // allow GC of the blob set - plan.keepBlobs = nil - } - - if len(plan.ignorePacks) == 0 { - plan.ignorePacks = plan.removePacks - } else { - plan.ignorePacks.Merge(plan.removePacks) - } - - if opts.unsafeRecovery { - printer.P("deleting index files\n") - indexFiles := repo.Index().(*index.MasterIndex).IDs() - err = DeleteFilesChecked(ctx, repo, indexFiles, restic.IndexFile, printer) - if err != nil { - return errors.Fatalf("%s", err) - } - } else if len(plan.ignorePacks) != 0 { - err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, false, printer) - if err != nil { - return errors.Fatalf("%s", err) - } - } - - if len(plan.removePacks) != 0 { - printer.P("removing %d old packs\n", len(plan.removePacks)) - DeleteFiles(ctx, repo, plan.removePacks, restic.PackFile, printer) - } - - if opts.unsafeRecovery { - err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) - if err != nil { - return errors.Fatalf("%s", err) - } - } - - printer.P("done\n") - return nil -} - -func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error { - printer.P("rebuilding index\n") - - bar := printer.NewCounter("packs processed") - return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{ - SaveProgress: bar, - DeleteProgress: func() *progress.Counter { - return printer.NewCounter("old indexes deleted") - }, - DeleteReport: func(id restic.ID, _ error) { - printer.VV("removed index %v\n", id.String()) - }, - SkipDeletion: skipDeletion, - }) -} - func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (usedBlobs restic.CountedBlobSet, err error) { var snapshotTrees restic.IDs printer.P("loading all snapshots...\n") diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index 4c21940c4d6..f5d2e1f6b16 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/repository" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/termstatus" ) @@ -145,7 +146,7 @@ func TestPruneWithDamagedRepository(t *testing.T) { // prune should fail rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { return runPrune(context.TODO(), pruneDefaultOptions, env.gopts, term) - }) == errorPacksMissing, + }) == repository.ErrPacksMissing, "prune should have reported index not complete error") } diff --git a/cmd/restic/delete.go b/cmd/restic/delete.go deleted file mode 100644 index 34f71d91aed..00000000000 --- a/cmd/restic/delete.go +++ /dev/null @@ -1,38 +0,0 @@ -package main - -import ( - "context" - - "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/ui/progress" -) - -// DeleteFiles deletes the given fileList of fileType in parallel -// it will print a warning if there is an error, but continue deleting the remaining files -func DeleteFiles(ctx context.Context, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) { - _ = deleteFiles(ctx, true, repo, fileList, fileType, printer) -} - -// DeleteFilesChecked deletes the given fileList of fileType in parallel -// if an error occurs, it will cancel and return this error -func DeleteFilesChecked(ctx context.Context, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { - return deleteFiles(ctx, false, repo, fileList, fileType, printer) -} - -// deleteFiles deletes the given fileList of fileType in parallel -// if ignoreError=true, it will print a warning if there was an error, else it will abort. -func deleteFiles(ctx context.Context, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { - bar := printer.NewCounter("files deleted") - defer bar.Done() - - return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error { - if err != nil { - printer.E("unable to remove %v/%v from the repository\n", fileType, id) - if !ignoreError { - return err - } - } - printer.VV("removed %v/%v\n", fileType, id) - return nil - }, bar) -} diff --git a/internal/repository/prune.go b/internal/repository/prune.go new file mode 100644 index 00000000000..653705bf41f --- /dev/null +++ b/internal/repository/prune.go @@ -0,0 +1,581 @@ +package repository + +import ( + "context" + "math" + "sort" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" +) + +var ErrIndexIncomplete = errors.Fatal("index is not complete") +var ErrPacksMissing = errors.Fatal("packs from index missing in repo") +var ErrSizeNotMatching = errors.Fatal("pack size does not match calculated size from index") + +// PruneOptions collects all options for the cleanup command. +type PruneOptions struct { + DryRun bool + UnsafeRecovery bool + + MaxUnusedBytes func(used uint64) (unused uint64) // calculates the number of unused bytes after repacking, according to MaxUnused + MaxRepackBytes uint64 + + RepackCachableOnly bool + RepackSmall bool + RepackUncompressed bool +} + +type PruneStats struct { + Blobs struct { + Used uint + Duplicate uint + Unused uint + Remove uint + Repack uint + Repackrm uint + } + Size struct { + Used uint64 + Duplicate uint64 + Unused uint64 + Remove uint64 + Repack uint64 + Repackrm uint64 + Unref uint64 + Uncompressed uint64 + } + Packs struct { + Used uint + Unused uint + PartlyUsed uint + Unref uint + Keep uint + Repack uint + Remove uint + } +} + +type PrunePlan struct { + removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) + repackPacks restic.IDSet // packs to repack + keepBlobs restic.CountedBlobSet // blobs to keep during repacking + removePacks restic.IDSet // packs to remove + ignorePacks restic.IDSet // packs to ignore when rebuilding the index +} + +type packInfo struct { + usedBlobs uint + unusedBlobs uint + usedSize uint64 + unusedSize uint64 + tpe restic.BlobType + uncompressed bool +} + +type packInfoWithID struct { + ID restic.ID + packInfo + mustCompress bool +} + +// PlanPrune selects which files to rewrite and which to delete and which blobs to keep. +// Also some summary statistics are returned. +func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (PrunePlan, PruneStats, error) { + var stats PruneStats + + usedBlobs, err := getUsedBlobs(ctx, repo) + if err != nil { + return PrunePlan{}, stats, err + } + + printer.P("searching used packs...\n") + keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer) + if err != nil { + return PrunePlan{}, stats, err + } + + printer.P("collecting packs for deletion and repacking\n") + plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer) + if err != nil { + return PrunePlan{}, stats, err + } + + if len(plan.repackPacks) != 0 { + blobCount := keepBlobs.Len() + // when repacking, we do not want to keep blobs which are + // already contained in kept packs, so delete them from keepBlobs + repo.Index().Each(ctx, func(blob restic.PackedBlob) { + if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) { + return + } + keepBlobs.Delete(blob.BlobHandle) + }) + + if keepBlobs.Len() < blobCount/2 { + // replace with copy to shrink map to necessary size if there's a chance to benefit + keepBlobs = keepBlobs.Copy() + } + } else { + // keepBlobs is only needed if packs are repacked + keepBlobs = nil + } + plan.keepBlobs = keepBlobs + + return plan, stats, nil +} + +func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { + // iterate over all blobs in index to find out which blobs are duplicates + // The counter in usedBlobs describes how many instances of the blob exist in the repository index + // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist + idx.Each(ctx, func(blob restic.PackedBlob) { + bh := blob.BlobHandle + count, ok := usedBlobs[bh] + if ok { + if count < math.MaxUint8 { + // don't overflow, but saturate count at 255 + // this can lead to a non-optimal pack selection, but won't cause + // problems otherwise + count++ + } + + usedBlobs[bh] = count + } + }) + + // Check if all used blobs have been found in index + missingBlobs := restic.NewBlobSet() + for bh, count := range usedBlobs { + if count == 0 { + // blob does not exist in any pack files + missingBlobs.Insert(bh) + } + } + + if len(missingBlobs) != 0 { + printer.E("%v not found in the index\n\n"+ + "Integrity check failed: Data seems to be missing.\n"+ + "Will not start prune to prevent (additional) data loss!\n"+ + "Please report this error (along with the output of the 'prune' run) at\n"+ + "https://github.com/restic/restic/issues/new/choose\n", missingBlobs) + return nil, nil, ErrIndexIncomplete + } + + indexPack := make(map[restic.ID]packInfo) + + // save computed pack header size + for pid, hdrSize := range pack.Size(ctx, idx, true) { + // initialize tpe with NumBlobTypes to indicate it's not set + indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)} + } + + hasDuplicates := false + // iterate over all blobs in index to generate packInfo + idx.Each(ctx, func(blob restic.PackedBlob) { + ip := indexPack[blob.PackID] + + // Set blob type if not yet set + if ip.tpe == restic.NumBlobTypes { + ip.tpe = blob.Type + } + + // mark mixed packs with "Invalid blob type" + if ip.tpe != blob.Type { + ip.tpe = restic.InvalidBlob + } + + bh := blob.BlobHandle + size := uint64(blob.Length) + dupCount := usedBlobs[bh] + switch { + case dupCount >= 2: + hasDuplicates = true + // mark as unused for now, we will later on select one copy + ip.unusedSize += size + ip.unusedBlobs++ + + // count as duplicate, will later on change one copy to be counted as used + stats.Size.Duplicate += size + stats.Blobs.Duplicate++ + case dupCount == 1: // used blob, not duplicate + ip.usedSize += size + ip.usedBlobs++ + + stats.Size.Used += size + stats.Blobs.Used++ + default: // unused blob + ip.unusedSize += size + ip.unusedBlobs++ + + stats.Size.Unused += size + stats.Blobs.Unused++ + } + if !blob.IsCompressed() { + ip.uncompressed = true + } + // update indexPack + indexPack[blob.PackID] = ip + }) + + // if duplicate blobs exist, those will be set to either "used" or "unused": + // - mark only one occurrence of duplicate blobs as used + // - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used" + // - if there are no used blobs in a pack, possibly mark duplicates as "unused" + if hasDuplicates { + // iterate again over all blobs in index (this is pretty cheap, all in-mem) + idx.Each(ctx, func(blob restic.PackedBlob) { + bh := blob.BlobHandle + count, ok := usedBlobs[bh] + // skip non-duplicate, aka. normal blobs + // count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining + if !ok || count == 1 { + return + } + + ip := indexPack[blob.PackID] + size := uint64(blob.Length) + switch { + case ip.usedBlobs > 0, count == 0: + // other used blobs in pack or "last" occurrence -> transition to used + ip.usedSize += size + ip.usedBlobs++ + ip.unusedSize -= size + ip.unusedBlobs-- + // same for the global statistics + stats.Size.Used += size + stats.Blobs.Used++ + stats.Size.Duplicate -= size + stats.Blobs.Duplicate-- + // let other occurrences remain marked as unused + usedBlobs[bh] = 1 + default: + // remain unused and decrease counter + count-- + if count == 1 { + // setting count to 1 would lead to forgetting that this blob had duplicates + // thus use the special value zero. This will select the last instance of the blob for keeping. + count = 0 + } + usedBlobs[bh] = count + } + // update indexPack + indexPack[blob.PackID] = ip + }) + } + + // Sanity check. If no duplicates exist, all blobs have value 1. After handling + // duplicates, this also applies to duplicates. + for _, count := range usedBlobs { + if count != 1 { + panic("internal error during blob selection") + } + } + + return usedBlobs, indexPack, nil +} + +func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *PruneStats, printer progress.Printer) (PrunePlan, error) { + removePacksFirst := restic.NewIDSet() + removePacks := restic.NewIDSet() + repackPacks := restic.NewIDSet() + + var repackCandidates []packInfoWithID + var repackSmallCandidates []packInfoWithID + repoVersion := repo.Config().Version + // only repack very small files by default + targetPackSize := repo.PackSize() / 25 + if opts.RepackSmall { + // consider files with at least 80% of the target size as large enough + targetPackSize = repo.PackSize() / 5 * 4 + } + + // loop over all packs and decide what to do + bar := printer.NewCounter("packs processed") + bar.SetMax(uint64(len(indexPack))) + err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { + p, ok := indexPack[id] + if !ok { + // Pack was not referenced in index and is not used => immediately remove! + printer.V("will remove pack %v as it is unused and not indexed\n", id.Str()) + removePacksFirst.Insert(id) + stats.Size.Unref += uint64(packSize) + return nil + } + + if p.unusedSize+p.usedSize != uint64(packSize) && p.usedBlobs != 0 { + // Pack size does not fit and pack is needed => error + // If the pack is not needed, this is no error, the pack can + // and will be simply removed, see below. + printer.E("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n", + id.Str(), p.unusedSize+p.usedSize, packSize) + return ErrSizeNotMatching + } + + // statistics + switch { + case p.usedBlobs == 0: + stats.Packs.Unused++ + case p.unusedBlobs == 0: + stats.Packs.Used++ + default: + stats.Packs.PartlyUsed++ + } + + if p.uncompressed { + stats.Size.Uncompressed += p.unusedSize + p.usedSize + } + mustCompress := false + if repoVersion >= 2 { + // repo v2: always repack tree blobs if uncompressed + // compress data blobs if requested + mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed + } + + // decide what to do + switch { + case p.usedBlobs == 0: + // All blobs in pack are no longer used => remove pack! + removePacks.Insert(id) + stats.Blobs.Remove += p.unusedBlobs + stats.Size.Remove += p.unusedSize + + case opts.RepackCachableOnly && p.tpe == restic.DataBlob: + // if this is a data pack and --repack-cacheable-only is set => keep pack! + stats.Packs.Keep++ + + case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress: + if packSize >= int64(targetPackSize) { + // All blobs in pack are used and not mixed => keep pack! + stats.Packs.Keep++ + } else { + repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) + } + + default: + // all other packs are candidates for repacking + repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) + } + + delete(indexPack, id) + bar.Add(1) + return nil + }) + bar.Done() + if err != nil { + return PrunePlan{}, err + } + + // At this point indexPacks contains only missing packs! + + // missing packs that are not needed can be ignored + ignorePacks := restic.NewIDSet() + for id, p := range indexPack { + if p.usedBlobs == 0 { + ignorePacks.Insert(id) + stats.Blobs.Remove += p.unusedBlobs + stats.Size.Remove += p.unusedSize + delete(indexPack, id) + } + } + + if len(indexPack) != 0 { + printer.E("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) + for id := range indexPack { + printer.E(" %v\n", id) + } + return PrunePlan{}, ErrPacksMissing + } + if len(ignorePacks) != 0 { + printer.E("Missing but unneeded pack files are referenced in the index, will be repaired\n") + for id := range ignorePacks { + printer.E("will forget missing pack file %v\n", id) + } + } + + if len(repackSmallCandidates) < 10 { + // too few small files to be worth the trouble, this also prevents endlessly repacking + // if there is just a single pack file below the target size + stats.Packs.Keep += uint(len(repackSmallCandidates)) + } else { + repackCandidates = append(repackCandidates, repackSmallCandidates...) + } + + // Sort repackCandidates such that packs with highest ratio unused/used space are picked first. + // This is equivalent to sorting by unused / total space. + // Instead of unused[i] / used[i] > unused[j] / used[j] we use + // unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64 + // Moreover packs containing trees and too small packs are sorted to the beginning + sort.Slice(repackCandidates, func(i, j int) bool { + pi := repackCandidates[i].packInfo + pj := repackCandidates[j].packInfo + switch { + case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob: + return true + case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob: + return false + case pi.unusedSize+pi.usedSize < uint64(targetPackSize) && pj.unusedSize+pj.usedSize >= uint64(targetPackSize): + return true + case pj.unusedSize+pj.usedSize < uint64(targetPackSize) && pi.unusedSize+pi.usedSize >= uint64(targetPackSize): + return false + } + return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize + }) + + repack := func(id restic.ID, p packInfo) { + repackPacks.Insert(id) + stats.Blobs.Repack += p.unusedBlobs + p.usedBlobs + stats.Size.Repack += p.unusedSize + p.usedSize + stats.Blobs.Repackrm += p.unusedBlobs + stats.Size.Repackrm += p.unusedSize + if p.uncompressed { + stats.Size.Uncompressed -= p.unusedSize + p.usedSize + } + } + + // calculate limit for number of unused bytes in the repo after repacking + maxUnusedSizeAfter := opts.MaxUnusedBytes(stats.Size.Used) + + for _, p := range repackCandidates { + reachedUnusedSizeAfter := (stats.Size.Unused-stats.Size.Remove-stats.Size.Repackrm < maxUnusedSizeAfter) + reachedRepackSize := stats.Size.Repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes + packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize) + + switch { + case reachedRepackSize: + stats.Packs.Keep++ + + case p.tpe != restic.DataBlob, p.mustCompress: + // repacking non-data packs / uncompressed-trees is only limited by repackSize + repack(p.ID, p.packInfo) + + case reachedUnusedSizeAfter && packIsLargeEnough: + // for all other packs stop repacking if tolerated unused size is reached. + stats.Packs.Keep++ + + default: + repack(p.ID, p.packInfo) + } + } + + stats.Packs.Unref = uint(len(removePacksFirst)) + stats.Packs.Repack = uint(len(repackPacks)) + stats.Packs.Remove = uint(len(removePacks)) + + if repo.Config().Version < 2 { + // compression not supported for repository format version 1 + stats.Size.Uncompressed = 0 + } + + return PrunePlan{removePacksFirst: removePacksFirst, + removePacks: removePacks, + repackPacks: repackPacks, + ignorePacks: ignorePacks, + }, nil +} + +// DoPrune does the actual pruning: +// - remove unreferenced packs first +// - repack given pack files while keeping the given blobs +// - rebuild the index while ignoring all files that will be deleted +// - delete the files +// plan.removePacks and plan.ignorePacks are modified in this function. +func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, plan PrunePlan, printer progress.Printer) (err error) { + if opts.DryRun { + printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") + if len(plan.removePacksFirst) > 0 { + printer.V("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) + } + printer.V("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks) + printer.V("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks) + // Always quit here if DryRun was set! + return nil + } + + // unreferenced packs can be safely deleted first + if len(plan.removePacksFirst) != 0 { + printer.P("deleting unreferenced packs\n") + _ = deleteFiles(ctx, true, repo, plan.removePacksFirst, restic.PackFile, printer) + } + + if len(plan.repackPacks) != 0 { + printer.P("repacking packs\n") + bar := printer.NewCounter("packs repacked") + bar.SetMax(uint64(len(plan.repackPacks))) + _, err := Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar) + bar.Done() + if err != nil { + return errors.Fatal(err.Error()) + } + + // Also remove repacked packs + plan.removePacks.Merge(plan.repackPacks) + + if len(plan.keepBlobs) != 0 { + printer.E("%v was not repacked\n\n"+ + "Integrity check failed.\n"+ + "Please report this error (along with the output of the 'prune' run) at\n"+ + "https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs) + return errors.Fatal("internal error: blobs were not repacked") + } + + // allow GC of the blob set + plan.keepBlobs = nil + } + + if len(plan.ignorePacks) == 0 { + plan.ignorePacks = plan.removePacks + } else { + plan.ignorePacks.Merge(plan.removePacks) + } + + if opts.UnsafeRecovery { + printer.P("deleting index files\n") + indexFiles := repo.Index().(*index.MasterIndex).IDs() + err = deleteFiles(ctx, false, repo, indexFiles, restic.IndexFile, printer) + if err != nil { + return errors.Fatalf("%s", err) + } + } else if len(plan.ignorePacks) != 0 { + err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, false, printer) + if err != nil { + return errors.Fatalf("%s", err) + } + } + + if len(plan.removePacks) != 0 { + printer.P("removing %d old packs\n", len(plan.removePacks)) + _ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer) + } + + if opts.UnsafeRecovery { + err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) + if err != nil { + return errors.Fatalf("%s", err) + } + } + + printer.P("done\n") + return nil +} + +// deleteFiles deletes the given fileList of fileType in parallel +// if ignoreError=true, it will print a warning if there was an error, else it will abort. +func deleteFiles(ctx context.Context, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { + bar := printer.NewCounter("files deleted") + defer bar.Done() + + return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error { + if err != nil { + printer.E("unable to remove %v/%v from the repository\n", fileType, id) + if !ignoreError { + return err + } + } + printer.VV("removed %v/%v\n", fileType, id) + return nil + }, bar) +} From 85e4021619c58b73fd017758b0565e12b459028f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Apr 2024 19:49:03 +0200 Subject: [PATCH 107/893] prune: move additional option checks to repository --- cmd/restic/cmd_prune.go | 8 -------- cmd/restic/cmd_prune_integration_test.go | 2 +- internal/repository/prune.go | 12 ++++++++++++ 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 578414f527f..0ade0b39d26 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -150,14 +150,6 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term } defer unlock() - if repo.Connections() < 2 { - return errors.Fatal("prune requires a backend connection limit of at least two") - } - - if repo.Config().Version < 2 && opts.RepackUncompressed { - return errors.Fatal("compression requires at least repository format version 2") - } - if opts.UnsafeNoSpaceRecovery != "" { repoID := repo.Config().ID if opts.UnsafeNoSpaceRecovery != repoID { diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index f5d2e1f6b16..715adea9a6f 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -35,7 +35,7 @@ func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) { } t.Run("0"+suffix, func(t *testing.T) { opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery} - checkOpts := CheckOptions{ReadData: true, CheckUnused: true} + checkOpts := CheckOptions{ReadData: true, CheckUnused: !unsafeNoSpaceRecovery} testPrune(t, opts, checkOpts) }) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 653705bf41f..5ebe91f035a 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -2,6 +2,7 @@ package repository import ( "context" + "fmt" "math" "sort" @@ -87,6 +88,17 @@ type packInfoWithID struct { func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (PrunePlan, PruneStats, error) { var stats PruneStats + if opts.UnsafeRecovery { + // prevent repacking data to make sure users cannot get stuck. + opts.MaxRepackBytes = 0 + } + if repo.Connections() < 2 { + return PrunePlan{}, stats, fmt.Errorf("prune requires a backend connection limit of at least two") + } + if repo.Config().Version < 2 && opts.RepackUncompressed { + return PrunePlan{}, stats, fmt.Errorf("compression requires at least repository format version 2") + } + usedBlobs, err := getUsedBlobs(ctx, repo) if err != nil { return PrunePlan{}, stats, err From 4c9a10ca3736ff862daa7dfcd4da6cfed57f116d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 10 Apr 2024 21:31:53 +0200 Subject: [PATCH 108/893] repair packs: deduplicate index rebuild --- internal/repository/repair_pack.go | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/internal/repository/repair_pack.go b/internal/repository/repair_pack.go index 2e036889953..a4261517ac4 100644 --- a/internal/repository/repair_pack.go +++ b/internal/repository/repair_pack.go @@ -60,19 +60,7 @@ func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, } // remove salvaged packs from index - printer.P("rebuilding index") - - bar = printer.NewCounter("packs processed") - err = repo.Index().Save(ctx, repo, ids, nil, restic.MasterIndexSaveOpts{ - SaveProgress: bar, - DeleteProgress: func() *progress.Counter { - return printer.NewCounter("old indexes deleted") - }, - DeleteReport: func(id restic.ID, _ error) { - printer.VV("removed index %v", id.String()) - }, - }) - + err = rebuildIndexFiles(ctx, repo, ids, nil, false, printer) if err != nil { return err } From 7ba5e95a82adc59c91c5cf5bc6970cc04cd456d5 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:42:26 +0200 Subject: [PATCH 109/893] check: allow tests to only verify pack&index integrity --- internal/archiver/archiver_test.go | 10 +++++----- internal/checker/testing.go | 24 +++++++++++++----------- internal/restic/testing_test.go | 2 +- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 841c8f2ce8f..71fae003f96 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1430,7 +1430,7 @@ func TestArchiverSnapshot(t *testing.T) { } TestEnsureSnapshot(t, repo, snapshotID, want) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) // check that the snapshot contains the targets with absolute paths for i, target := range sn.Paths { @@ -1590,7 +1590,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { } TestEnsureSnapshot(t, repo, snapshotID, want) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) }) } } @@ -1794,7 +1794,7 @@ func TestArchiverParent(t *testing.T) { t.Logf("second backup saved as %v", secondSnapshotID.Str()) t.Logf("testfs: %v", testFS) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) }) } } @@ -1927,7 +1927,7 @@ func TestArchiverErrorReporting(t *testing.T) { } TestEnsureSnapshot(t, repo, snapshotID, want) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) }) } } @@ -2288,7 +2288,7 @@ func TestMetadataChanged(t *testing.T) { // make sure the content matches TestEnsureFileContent(context.Background(), t, repo, "testfile", node3, files["testfile"].(TestFile)) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) } func TestRacyFileSwap(t *testing.T) { diff --git a/internal/checker/testing.go b/internal/checker/testing.go index fe1679393ba..9e949af026f 100644 --- a/internal/checker/testing.go +++ b/internal/checker/testing.go @@ -8,7 +8,7 @@ import ( ) // TestCheckRepo runs the checker on repo. -func TestCheckRepo(t testing.TB, repo restic.Repository) { +func TestCheckRepo(t testing.TB, repo restic.Repository, skipStructure bool) { chkr := New(repo, true) hints, errs := chkr.LoadIndex(context.TODO(), nil) @@ -33,18 +33,20 @@ func TestCheckRepo(t testing.TB, repo restic.Repository) { t.Error(err) } - // structure - errChan = make(chan error) - go chkr.Structure(context.TODO(), nil, errChan) + if !skipStructure { + // structure + errChan = make(chan error) + go chkr.Structure(context.TODO(), nil, errChan) - for err := range errChan { - t.Error(err) - } + for err := range errChan { + t.Error(err) + } - // unused blobs - blobs := chkr.UnusedBlobs(context.TODO()) - if len(blobs) > 0 { - t.Errorf("unused blobs found: %v", blobs) + // unused blobs + blobs := chkr.UnusedBlobs(context.TODO()) + if len(blobs) > 0 { + t.Errorf("unused blobs found: %v", blobs) + } } // read data diff --git a/internal/restic/testing_test.go b/internal/restic/testing_test.go index ae8f8dd3483..0a0c43892e2 100644 --- a/internal/restic/testing_test.go +++ b/internal/restic/testing_test.go @@ -45,7 +45,7 @@ func TestCreateSnapshot(t *testing.T) { t.Fatalf("snapshot has zero tree ID") } - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) } func BenchmarkTestCreateSnapshot(t *testing.B) { From 35277b7797d60aed97a85703c919b8f7ca7c8dcc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:43:18 +0200 Subject: [PATCH 110/893] backend/mem: cleanup not found error message --- internal/backend/mem/mem_backend.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go index eea5b060e4c..8b115b18729 100644 --- a/internal/backend/mem/mem_backend.go +++ b/internal/backend/mem/mem_backend.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/base64" + "fmt" "hash" "io" "net/http" @@ -41,7 +42,7 @@ func NewFactory() location.Factory { ) } -var errNotFound = errors.New("not found") +var errNotFound = fmt.Errorf("not found") const connectionCount = 2 From eda9f7beb45931b68f1844cdf2516ba4f5c6fe4b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:44:13 +0200 Subject: [PATCH 111/893] ui/progress: add helper to print messages during tests --- internal/ui/progress/printer.go | 35 +++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/internal/ui/progress/printer.go b/internal/ui/progress/printer.go index a671621e98b..a2bc4c4b547 100644 --- a/internal/ui/progress/printer.go +++ b/internal/ui/progress/printer.go @@ -1,5 +1,7 @@ package progress +import "testing" + // A Printer can can return a new counter or print messages // at different log levels. // It must be safe to call its methods from concurrent goroutines. @@ -28,3 +30,36 @@ func (*NoopPrinter) P(_ string, _ ...interface{}) {} func (*NoopPrinter) V(_ string, _ ...interface{}) {} func (*NoopPrinter) VV(_ string, _ ...interface{}) {} + +// TestPrinter prints messages during testing +type TestPrinter struct { + t testing.TB +} + +func NewTestPrinter(t testing.TB) *TestPrinter { + return &TestPrinter{ + t: t, + } +} + +var _ Printer = (*TestPrinter)(nil) + +func (p *TestPrinter) NewCounter(_ string) *Counter { + return nil +} + +func (p *TestPrinter) E(msg string, args ...interface{}) { + p.t.Logf("error: "+msg, args...) +} + +func (p *TestPrinter) P(msg string, args ...interface{}) { + p.t.Logf("print: "+msg, args...) +} + +func (p *TestPrinter) V(msg string, args ...interface{}) { + p.t.Logf("verbose: "+msg, args...) +} + +func (p *TestPrinter) VV(msg string, args ...interface{}) { + p.t.Logf("verbose2: "+msg, args...) +} From c65459cd8a1ca1044e20a923fc94be0beb62859f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:48:40 +0200 Subject: [PATCH 112/893] repository: speed up tests --- internal/repository/repack_test.go | 18 +++++++++++++----- internal/repository/repair_pack_test.go | 2 +- internal/repository/repository_test.go | 5 ++--- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index e5e46ac2af8..e3b4a599607 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -18,7 +18,7 @@ func randomSize(min, max int) int { return rand.Intn(max-min) + min } -func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32) { +func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32, smallBlobs bool) { var wg errgroup.Group repo.StartPackUploader(context.TODO(), &wg) @@ -30,7 +30,11 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl if rand.Float32() < pData { tpe = restic.DataBlob - length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data + if smallBlobs { + length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB of data + } else { + length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data + } } else { tpe = restic.TreeBlob length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB @@ -219,7 +223,9 @@ func testRepack(t *testing.T, version uint) { rand.Seed(seed) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 100, 0.7) + // add a small amount of blobs twice to create multiple pack files + createRandomBlobs(t, repo, 10, 0.7, false) + createRandomBlobs(t, repo, 10, 0.7, false) packsBefore := listPacks(t, repo) @@ -302,7 +308,9 @@ func testRepackCopy(t *testing.T, version uint) { rand.Seed(seed) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 100, 0.7) + // add a small amount of blobs twice to create multiple pack files + createRandomBlobs(t, repo, 10, 0.7, false) + createRandomBlobs(t, repo, 10, 0.7, false) flush(t, repo) _, keepBlobs := selectBlobs(t, repo, 0.2) @@ -343,7 +351,7 @@ func testRepackWrongBlob(t *testing.T, version uint) { rand.Seed(seed) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 5, 0.7) + createRandomBlobs(t, repo, 5, 0.7, false) createRandomWrongBlob(t, repo) // just keep all blobs, but also rewrite every pack diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index b950245aae0..c5cdf5ed52e 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -109,7 +109,7 @@ func testRepairBrokenPack(t *testing.T, version uint) { rand.Seed(seed) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 5, 0.7) + createRandomBlobs(t, repo, 5, 0.7, true) packsBefore := listPacks(t, repo) blobsBefore := listBlobs(repo) diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 98ff560fe08..b013c482362 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -242,8 +242,7 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (* } func TestRepositoryLoadUnpackedBroken(t *testing.T) { - repo, cleanup := repository.TestFromFixture(t, repoFixture) - defer cleanup() + repo := repository.TestRepository(t) data := rtest.Random(23, 12345) id := restic.Hash(data) @@ -252,7 +251,7 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) { data[0] ^= 0xff // store broken file - err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, nil)) + err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, repo.Backend().Hasher())) rtest.OK(t, err) // without a retry backend this will just return an error that the file is broken From b25fc2c89d2eb36710fd48749c0584883c85aca5 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:49:04 +0200 Subject: [PATCH 113/893] repository: remove redundant flushes from tests --- internal/repository/repack_test.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index e3b4a599607..cd40c31743a 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -170,12 +170,6 @@ func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs rest } } -func flush(t *testing.T, repo restic.Repository) { - if err := repo.Flush(context.TODO()); err != nil { - t.Fatalf("repo.SaveIndex() %v", err) - } -} - func rebuildIndex(t *testing.T, repo restic.Repository) { err := repo.SetIndex(index.NewMasterIndex()) rtest.OK(t, err) @@ -239,8 +233,6 @@ func testRepack(t *testing.T, version uint) { packsBefore, packsAfter) } - flush(t, repo) - removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2) removePacks := findPacksForBlobs(t, repo, removeBlobs) @@ -311,7 +303,6 @@ func testRepackCopy(t *testing.T, version uint) { // add a small amount of blobs twice to create multiple pack files createRandomBlobs(t, repo, 10, 0.7, false) createRandomBlobs(t, repo, 10, 0.7, false) - flush(t, repo) _, keepBlobs := selectBlobs(t, repo, 0.2) copyPacks := findPacksForBlobs(t, repo, keepBlobs) From 7d1b9cde3438c71aa11e37fa3fa5279aa15d7ae4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:49:33 +0200 Subject: [PATCH 114/893] repository: use normal Init method in tests --- internal/repository/testing.go | 7 +++++-- internal/restic/config.go | 16 ---------------- 2 files changed, 5 insertions(+), 18 deletions(-) diff --git a/internal/repository/testing.go b/internal/repository/testing.go index 874d179cedb..9fb643a46e8 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -60,8 +60,11 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o t.Fatalf("TestRepository(): new repo failed: %v", err) } - cfg := restic.TestCreateConfig(t, testChunkerPol, version) - err = repo.init(context.TODO(), test.TestPassword, cfg) + if version == 0 { + version = restic.StableRepoVersion + } + pol := testChunkerPol + err = repo.Init(context.TODO(), version, test.TestPassword, &pol) if err != nil { t.Fatalf("TestRepository(): initialize repo failed: %v", err) } diff --git a/internal/restic/config.go b/internal/restic/config.go index 67af259ba2f..3fb61cc137f 100644 --- a/internal/restic/config.go +++ b/internal/restic/config.go @@ -51,22 +51,6 @@ func CreateConfig(version uint) (Config, error) { return cfg, nil } -// TestCreateConfig creates a config for use within tests. -func TestCreateConfig(t testing.TB, pol chunker.Pol, version uint) (cfg Config) { - cfg.ChunkerPolynomial = pol - - cfg.ID = NewRandomID().String() - if version == 0 { - version = StableRepoVersion - } - if version < MinRepoVersion || version > MaxRepoVersion { - t.Fatalf("version %d is out of range", version) - } - cfg.Version = version - - return cfg -} - var checkPolynomial = true var checkPolynomialOnce sync.Once From 310db03c0e90c1a448599daf32036166be906345 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:51:27 +0200 Subject: [PATCH 115/893] repair index: improve log output if index cannot be deleted The operation will always fail with an error if an index cannot be deleted. Thus, this change is purely cosmetic. --- internal/repository/repair_index.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index 7cf598fa5c6..ccf8bcdb032 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -110,8 +110,12 @@ func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks DeleteProgress: func() *progress.Counter { return printer.NewCounter("old indexes deleted") }, - DeleteReport: func(id restic.ID, _ error) { - printer.VV("removed index %v\n", id.String()) + DeleteReport: func(id restic.ID, err error) { + if err != nil { + printer.VV("failed to remove index %v: %v\n", id.String(), err) + } else { + printer.VV("removed index %v\n", id.String()) + } }, SkipDeletion: skipDeletion, }) From 8d507c1372aed2285bcc2231782204d516d5b1ed Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:53:08 +0200 Subject: [PATCH 116/893] repository: add basic test for RepairIndex --- internal/repository/repack_test.go | 6 +- internal/repository/repair_index_test.go | 79 ++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 internal/repository/repair_index_test.go diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index cd40c31743a..2f786710135 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -125,8 +125,12 @@ func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 } func listPacks(t *testing.T, repo restic.Lister) restic.IDSet { + return listFiles(t, repo, restic.PackFile) +} + +func listFiles(t *testing.T, repo restic.Lister, tpe backend.FileType) restic.IDSet { list := restic.NewIDSet() - err := repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + err := repo.List(context.TODO(), tpe, func(id restic.ID, size int64) error { list.Insert(id) return nil }) diff --git a/internal/repository/repair_index_test.go b/internal/repository/repair_index_test.go new file mode 100644 index 00000000000..adaee3832fa --- /dev/null +++ b/internal/repository/repair_index_test.go @@ -0,0 +1,79 @@ +package repository_test + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" +) + +func listIndex(t *testing.T, repo restic.Lister) restic.IDSet { + return listFiles(t, repo, restic.IndexFile) +} + +func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, repo *repository.Repository)) { + repo := repository.TestRepository(t).(*repository.Repository) + createRandomBlobs(t, repo, 4, 0.5, true) + createRandomBlobs(t, repo, 5, 0.5, true) + indexes := listIndex(t, repo) + t.Logf("old indexes %v", indexes) + + damage(t, repo) + + repo = repository.TestOpenBackend(t, repo.Backend()).(*repository.Repository) + rtest.OK(t, repository.RepairIndex(context.TODO(), repo, repository.RepairIndexOptions{ + ReadAllPacks: readAllPacks, + }, &progress.NoopPrinter{})) + + newIndexes := listIndex(t, repo) + old := indexes.Intersect(newIndexes) + rtest.Assert(t, len(old) == 0, "expected old indexes to be removed, found %v", old) + + checker.TestCheckRepo(t, repo, true) +} + +func TestRebuildIndex(t *testing.T) { + for _, test := range []struct { + name string + damage func(t *testing.T, repo *repository.Repository) + }{ + { + "valid index", + func(t *testing.T, repo *repository.Repository) {}, + }, + { + "damaged index", + func(t *testing.T, repo *repository.Repository) { + index := listIndex(t, repo).List()[0] + replaceFile(t, repo, backend.Handle{Type: restic.IndexFile, Name: index.String()}, func(b []byte) []byte { + b[0] ^= 0xff + return b + }) + }, + }, + { + "missing index", + func(t *testing.T, repo *repository.Repository) { + index := listIndex(t, repo).List()[0] + rtest.OK(t, repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: index.String()})) + }, + }, + { + "missing pack", + func(t *testing.T, repo *repository.Repository) { + pack := listPacks(t, repo).List()[0] + rtest.OK(t, repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: pack.String()})) + }, + }, + } { + t.Run(test.name, func(t *testing.T) { + testRebuildIndex(t, false, test.damage) + testRebuildIndex(t, true, test.damage) + }) + } +} From d8622c86eb40c4c3751a4fb818a36ecbe62cd291 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 12:32:29 +0200 Subject: [PATCH 117/893] prune: clean up internal interface --- cmd/restic/cmd_prune.go | 6 +++--- internal/repository/prune.go | 40 +++++++++++++++++++++++++----------- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 0ade0b39d26..ea5acddf337 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -191,7 +191,7 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption RepackUncompressed: opts.RepackUncompressed, } - plan, stats, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { + plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { return getUsedBlobs(ctx, repo, ignoreSnapshots, printer) }, printer) if err != nil { @@ -202,7 +202,7 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption printer.P("\nWould have made the following changes:") } - err = printPruneStats(printer, stats) + err = printPruneStats(printer, plan.Stats()) if err != nil { return err } @@ -210,7 +210,7 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption // Trigger GC to reset garbage collection threshold runtime.GC() - return repository.DoPrune(ctx, popts, repo, plan, printer) + return plan.Execute(ctx, printer) } // printPruneStats prints out the statistics diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 5ebe91f035a..d34f3c88f9b 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -66,6 +66,10 @@ type PrunePlan struct { keepBlobs restic.CountedBlobSet // blobs to keep during repacking removePacks restic.IDSet // packs to remove ignorePacks restic.IDSet // packs to ignore when rebuilding the index + + repo restic.Repository + stats PruneStats + opts PruneOptions } type packInfo struct { @@ -85,7 +89,7 @@ type packInfoWithID struct { // PlanPrune selects which files to rewrite and which to delete and which blobs to keep. // Also some summary statistics are returned. -func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (PrunePlan, PruneStats, error) { +func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (*PrunePlan, error) { var stats PruneStats if opts.UnsafeRecovery { @@ -93,27 +97,27 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g opts.MaxRepackBytes = 0 } if repo.Connections() < 2 { - return PrunePlan{}, stats, fmt.Errorf("prune requires a backend connection limit of at least two") + return nil, fmt.Errorf("prune requires a backend connection limit of at least two") } if repo.Config().Version < 2 && opts.RepackUncompressed { - return PrunePlan{}, stats, fmt.Errorf("compression requires at least repository format version 2") + return nil, fmt.Errorf("compression requires at least repository format version 2") } usedBlobs, err := getUsedBlobs(ctx, repo) if err != nil { - return PrunePlan{}, stats, err + return nil, err } printer.P("searching used packs...\n") keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer) if err != nil { - return PrunePlan{}, stats, err + return nil, err } printer.P("collecting packs for deletion and repacking\n") plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer) if err != nil { - return PrunePlan{}, stats, err + return nil, err } if len(plan.repackPacks) != 0 { @@ -137,7 +141,11 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g } plan.keepBlobs = keepBlobs - return plan, stats, nil + plan.repo = repo + plan.stats = stats + plan.opts = opts + + return &plan, nil } func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { @@ -489,14 +497,18 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi }, nil } -// DoPrune does the actual pruning: +func (plan *PrunePlan) Stats() PruneStats { + return plan.stats +} + +// Execute does the actual pruning: // - remove unreferenced packs first // - repack given pack files while keeping the given blobs // - rebuild the index while ignoring all files that will be deleted // - delete the files // plan.removePacks and plan.ignorePacks are modified in this function. -func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, plan PrunePlan, printer progress.Printer) (err error) { - if opts.DryRun { +func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (err error) { + if plan.opts.DryRun { printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") if len(plan.removePacksFirst) > 0 { printer.V("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) @@ -507,6 +519,10 @@ func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, pla return nil } + repo := plan.repo + // make sure the plan can only be used once + plan.repo = nil + // unreferenced packs can be safely deleted first if len(plan.removePacksFirst) != 0 { printer.P("deleting unreferenced packs\n") @@ -544,7 +560,7 @@ func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, pla plan.ignorePacks.Merge(plan.removePacks) } - if opts.UnsafeRecovery { + if plan.opts.UnsafeRecovery { printer.P("deleting index files\n") indexFiles := repo.Index().(*index.MasterIndex).IDs() err = deleteFiles(ctx, false, repo, indexFiles, restic.IndexFile, printer) @@ -563,7 +579,7 @@ func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, pla _ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer) } - if opts.UnsafeRecovery { + if plan.opts.UnsafeRecovery { err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) if err != nil { return errors.Fatalf("%s", err) From 038586dc9d5908f363a617a748b266e241df2540 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 12:32:48 +0200 Subject: [PATCH 118/893] repository: add minimal test for prune --- internal/repository/prune_test.go | 93 +++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 internal/repository/prune_test.go diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go new file mode 100644 index 00000000000..13bf58adc58 --- /dev/null +++ b/internal/repository/prune_test.go @@ -0,0 +1,93 @@ +package repository_test + +import ( + "context" + "math" + "testing" + + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" +) + +func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { + repo := repository.TestRepository(t).(*repository.Repository) + createRandomBlobs(t, repo, 4, 0.5, true) + createRandomBlobs(t, repo, 5, 0.5, true) + keep, _ := selectBlobs(t, repo, 0.5) + + plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { + return restic.NewCountedBlobSet(keep.List()...), nil + }, &progress.NoopPrinter{}) + rtest.OK(t, err) + + rtest.OK(t, plan.Execute(context.TODO(), &progress.NoopPrinter{})) + + repo = repository.TestOpenBackend(t, repo.Backend()).(*repository.Repository) + checker.TestCheckRepo(t, repo, true) + + if errOnUnused { + existing := listBlobs(repo) + rtest.Assert(t, existing.Equals(keep), "unexpected blobs, wanted %v got %v", keep, existing) + } +} + +func TestPrune(t *testing.T) { + for _, test := range []struct { + name string + opts repository.PruneOptions + errOnUnused bool + }{ + { + name: "0", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return 0 }, + }, + errOnUnused: true, + }, + { + name: "50", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 2 }, + }, + }, + { + name: "unlimited", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return math.MaxUint64 }, + }, + }, + { + name: "cachableonly", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 20 }, + RepackCachableOnly: true, + }, + }, + { + name: "small", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return math.MaxUint64 }, + RepackSmall: true, + }, + errOnUnused: true, + }, + } { + t.Run(test.name, func(t *testing.T) { + testPrune(t, test.opts, test.errOnUnused) + }) + t.Run(test.name+"-recovery", func(t *testing.T) { + opts := test.opts + opts.UnsafeRecovery = true + // unsafeNoSpaceRecovery does not repack partially used pack files + testPrune(t, opts, false) + }) + } +} From defd7ae729f3832e566ec618868112d615207b4a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 13:46:21 +0200 Subject: [PATCH 119/893] prune/repair index: reset in-memory index after command The current in-memory index becomes stale after prune or repair index have run. Thus, just drop the in-memory index altogether once these commands have finished. --- internal/repository/prune.go | 7 +++++++ internal/repository/repair_index.go | 9 ++++++++- internal/repository/repository.go | 18 +++++++++++++++--- internal/restic/repository.go | 1 + 4 files changed, 31 insertions(+), 4 deletions(-) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index d34f3c88f9b..8900fffaa4f 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -586,6 +586,13 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e } } + if err != nil { + return err + } + + // drop outdated in-memory index + repo.ClearIndex() + printer.P("done\n") return nil } diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index ccf8bcdb032..63e10413278 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -98,7 +98,14 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, } } - return rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer) + err = rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer) + if err != nil { + return err + } + + // drop outdated in-memory index + repo.ClearIndex() + return nil } func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error { diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 8e34c712559..f163c6a19a4 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -142,9 +142,6 @@ func (r *Repository) DisableAutoIndexUpdate() { // setConfig assigns the given config and updates the repository parameters accordingly func (r *Repository) setConfig(cfg restic.Config) { r.cfg = cfg - if r.cfg.Version >= 2 { - r.idx.MarkCompressed() - } } // Config returns the repository configuration. @@ -637,9 +634,21 @@ func (r *Repository) Index() restic.MasterIndex { // SetIndex instructs the repository to use the given index. func (r *Repository) SetIndex(i restic.MasterIndex) error { r.idx = i.(*index.MasterIndex) + r.configureIndex() return r.prepareCache() } +func (r *Repository) ClearIndex() { + r.idx = index.NewMasterIndex() + r.configureIndex() +} + +func (r *Repository) configureIndex() { + if r.cfg.Version >= 2 { + r.idx.MarkCompressed() + } +} + // LoadIndex loads all index files from the backend in parallel and stores them func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { debug.Log("Loading index") @@ -662,6 +671,9 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { defer p.Done() } + // reset in-memory index before loading it from the repository + r.ClearIndex() + err = index.ForAllIndexes(ctx, indexList, r, func(_ restic.ID, idx *index.Index, _ bool, err error) error { if err != nil { return err diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 66cc22ea95b..89c54ffbb1b 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -26,6 +26,7 @@ type Repository interface { Index() MasterIndex LoadIndex(context.Context, *progress.Counter) error + ClearIndex() SetIndex(MasterIndex) error LookupBlobSize(ID, BlobType) (uint, bool) From 09587e6c08a84fff1687715d03d3b27b8dd6c9ed Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 13:57:19 +0200 Subject: [PATCH 120/893] repository: duplicate a few blobs in prune tests --- internal/repository/prune_test.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go index 13bf58adc58..bff221f492c 100644 --- a/internal/repository/prune_test.go +++ b/internal/repository/prune_test.go @@ -10,6 +10,7 @@ import ( "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/progress" + "golang.org/x/sync/errgroup" ) func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { @@ -18,6 +19,17 @@ func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { createRandomBlobs(t, repo, 5, 0.5, true) keep, _ := selectBlobs(t, repo, 0.5) + var wg errgroup.Group + repo.StartPackUploader(context.TODO(), &wg) + // duplicate a few blobs to exercise those code paths + for blob := range keep { + buf, err := repo.LoadBlob(context.TODO(), blob.Type, blob.ID, nil) + rtest.OK(t, err) + _, _, _, err = repo.SaveBlob(context.TODO(), blob.Type, buf, blob.ID, true) + rtest.OK(t, err) + } + rtest.OK(t, repo.Flush(context.TODO())) + plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { return restic.NewCountedBlobSet(keep.List()...), nil }, &progress.NoopPrinter{}) From c9191ea72c569dfcc647d6349137447625e19195 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 14:17:40 +0200 Subject: [PATCH 121/893] forget: cleanup verbose output on snapshot deletion error --- cmd/restic/cmd_forget.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index f6fc5379c21..9018da21173 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -276,8 +276,9 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption err := restic.ParallelRemove(ctx, repo, removeSnIDs, restic.SnapshotFile, func(id restic.ID, err error) error { if err != nil { printer.E("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id) + } else { + printer.VV("removed %v/%v\n", restic.SnapshotFile, id) } - printer.VV("removed %v/%v\n", restic.SnapshotFile, id) return nil }, bar) bar.Done() From 001bb716764c8ec62674b7e610a003f00a626fd3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 17 Apr 2024 18:32:30 +0200 Subject: [PATCH 122/893] repair packs: Properly close backup files --- cmd/restic/cmd_repair_packs.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index 00dee076b33..a7b448d85a6 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -82,6 +82,10 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T return err }) if err != nil { + _ = f.Close() + return err + } + if err := f.Close(); err != nil { return err } } From a82ed71de7a35b64a1925799c12812a0163dfea3 Mon Sep 17 00:00:00 2001 From: coderwander <770732124@qq.com> Date: Tue, 16 Apr 2024 11:22:26 +0800 Subject: [PATCH 123/893] Fix struct names Signed-off-by: coderwander <770732124@qq.com> --- internal/fs/vss_windows.go | 2 +- internal/walker/walker_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 8c9b8942b47..d75567d25ac 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -190,7 +190,7 @@ func (e *vssError) Error() string { return fmt.Sprintf("VSS error: %s: %s (%#x)", e.text, e.hresult.Str(), e.hresult) } -// VssError encapsulates errors returned from calling VSS api. +// vssTextError encapsulates errors returned from calling VSS api. type vssTextError struct { text string } diff --git a/internal/walker/walker_test.go b/internal/walker/walker_test.go index 0f0009107c5..75f80e57f37 100644 --- a/internal/walker/walker_test.go +++ b/internal/walker/walker_test.go @@ -13,7 +13,7 @@ import ( // TestTree is used to construct a list of trees for testing the walker. type TestTree map[string]interface{} -// TestNode is used to test the walker. +// TestFile is used to test the walker. type TestFile struct { Size uint64 } From 6aced61c72b3efafc4da065a035684bfcd1e514b Mon Sep 17 00:00:00 2001 From: will-ca <37680486+will-ca@users.noreply.github.com> Date: Thu, 18 Apr 2024 07:29:55 +0000 Subject: [PATCH 124/893] Tiny docs wording clarification. --- cmd/restic/cmd_stats.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 20d7a485c88..6bf0dbf1935 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -38,7 +38,7 @@ depending on what you are trying to calculate. The modes are: * restore-size: (default) Counts the size of the restored files. -* files-by-contents: Counts total size of files, where a file is +* files-by-contents: Counts total size of unique files, where a file is considered unique if it has unique contents. * raw-data: Counts the size of blobs in the repository, regardless of how many files reference them. From 10355c3fb69db2e5740525c261fad49e0f9a451e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 19 Apr 2024 20:48:43 +0200 Subject: [PATCH 125/893] repository: Better error message if blob is larger than 4GB --- internal/index/index.go | 5 ++--- internal/repository/repository.go | 5 +++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/internal/index/index.go b/internal/index/index.go index 1fb2c155edd..73128f7bb21 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "math" "sync" "time" @@ -69,11 +70,9 @@ func (idx *Index) addToPacks(id restic.ID) int { return len(idx.packs) - 1 } -const maxuint32 = 1<<32 - 1 - func (idx *Index) store(packIndex int, blob restic.Blob) { // assert that offset and length fit into uint32! - if blob.Offset > maxuint32 || blob.Length > maxuint32 || blob.UncompressedLength > maxuint32 { + if blob.Offset > math.MaxUint32 || blob.Length > math.MaxUint32 || blob.UncompressedLength > math.MaxUint32 { panic("offset or length does not fit in uint32. You have packs > 4GB!") } diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 8e34c712559..4198e574f84 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "io" + "math" "os" "runtime" "sort" @@ -917,6 +918,10 @@ func (r *Repository) Close() error { // occupies in the repo (compressed or not, including encryption overhead). func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) { + if int64(len(buf)) > math.MaxUint32 { + return restic.ID{}, false, 0, fmt.Errorf("blob is larger than 4GB") + } + // compute plaintext hash if not already set if id.IsNull() { // Special case the hash calculation for all zero chunks. This is especially From 6c6dceade37a2072e2c728136e0b3e4a6ea94202 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 19 Apr 2024 22:26:14 +0200 Subject: [PATCH 126/893] global: unify backend open and create --- cmd/restic/global.go | 52 +++++++++++++++++--------------------------- 1 file changed, 20 insertions(+), 32 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index cc47496f36c..c93fb4bce29 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -570,16 +570,13 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro return cfg, nil } -// Open the backend specified by a location config. -func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { +func innerOpen(ctx context.Context, s string, gopts GlobalOptions, opts options.Options, create bool) (backend.Backend, error) { debug.Log("parsing location %v", location.StripPassword(gopts.backends, s)) loc, err := location.Parse(gopts.backends, s) if err != nil { return nil, errors.Fatalf("parsing repository location failed: %v", err) } - var be backend.Backend - cfg, err := parseConfig(loc, opts) if err != nil { return nil, err @@ -599,7 +596,13 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio return nil, errors.Fatalf("invalid backend: %q", loc.Scheme) } - be, err = factory.Open(ctx, cfg, rt, lim) + var be backend.Backend + if create { + be, err = factory.Create(ctx, cfg, rt, nil) + } else { + be, err = factory.Open(ctx, cfg, rt, lim) + } + if err != nil { return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.backends, s), err) } @@ -615,6 +618,17 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio } } + return be, nil +} + +// Open the backend specified by a location config. +func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { + + be, err := innerOpen(ctx, s, gopts, opts, false) + if err != nil { + return nil, err + } + // check if config is there fi, err := be.Stat(ctx, backend.Handle{Type: restic.ConfigFile}) if err != nil { @@ -630,31 +644,5 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio // Create the backend specified by URI. func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { - debug.Log("parsing location %v", location.StripPassword(gopts.backends, s)) - loc, err := location.Parse(gopts.backends, s) - if err != nil { - return nil, err - } - - cfg, err := parseConfig(loc, opts) - if err != nil { - return nil, err - } - - rt, err := backend.Transport(globalOptions.TransportOptions) - if err != nil { - return nil, errors.Fatal(err.Error()) - } - - factory := gopts.backends.Lookup(loc.Scheme) - if factory == nil { - return nil, errors.Fatalf("invalid backend: %q", loc.Scheme) - } - - be, err := factory.Create(ctx, cfg, rt, nil) - if err != nil { - return nil, err - } - - return logger.New(sema.NewBackend(be)), nil + return innerOpen(ctx, s, gopts, opts, true) } From 621012dac08f88b844349d5a976a3f336a883be1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 31 Dec 2023 15:27:36 +0100 Subject: [PATCH 127/893] repository: Add blob loading fallback to LoadBlobsFromPack Try to retrieve individual blobs via LoadBlob if streaming did not work. --- internal/repository/repack.go | 9 +- internal/repository/repository.go | 35 ++++++-- .../repository/repository_internal_test.go | 86 +++++++++++++++++-- 3 files changed, 109 insertions(+), 21 deletions(-) diff --git a/internal/repository/repack.go b/internal/repository/repack.go index 5588984f6f9..e839a9c0fee 100644 --- a/internal/repository/repack.go +++ b/internal/repository/repack.go @@ -79,13 +79,8 @@ func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito for t := range downloadQueue { err := repo.LoadBlobsFromPack(wgCtx, t.PackID, t.Blobs, func(blob restic.BlobHandle, buf []byte, err error) error { if err != nil { - var ierr error - // check whether we can get a valid copy somewhere else - buf, ierr = repo.LoadBlob(wgCtx, blob.Type, blob.ID, nil) - if ierr != nil { - // no luck, return the original error - return err - } + // a required blob couldn't be retrieved + return err } keepMutex.Lock() diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 8e34c712559..f2cde014afd 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -943,6 +943,7 @@ func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte } type backendLoadFn func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error +type loadBlobFn func(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) // Skip sections with more than 4MB unused blobs const maxUnusedRange = 4 * 1024 * 1024 @@ -952,10 +953,10 @@ const maxUnusedRange = 4 * 1024 * 1024 // handleBlobFn is called at most once for each blob. If the callback returns an error, // then LoadBlobsFromPack will abort and not retry it. func (r *Repository) LoadBlobsFromPack(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { - return streamPack(ctx, r.Backend().Load, r.key, packID, blobs, handleBlobFn) + return streamPack(ctx, r.Backend().Load, r.LoadBlob, r.key, packID, blobs, handleBlobFn) } -func streamPack(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { +func streamPack(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { if len(blobs) == 0 { // nothing to do return nil @@ -974,7 +975,7 @@ func streamPack(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, pack } if blobs[i].Offset-lastPos > maxUnusedRange { // load everything up to the skipped file section - err := streamPackPart(ctx, beLoad, key, packID, blobs[lowerIdx:i], handleBlobFn) + err := streamPackPart(ctx, beLoad, loadBlobFn, key, packID, blobs[lowerIdx:i], handleBlobFn) if err != nil { return err } @@ -983,10 +984,10 @@ func streamPack(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, pack lastPos = blobs[i].Offset + blobs[i].Length } // load remainder - return streamPackPart(ctx, beLoad, key, packID, blobs[lowerIdx:], handleBlobFn) + return streamPackPart(ctx, beLoad, loadBlobFn, key, packID, blobs[lowerIdx:], handleBlobFn) } -func streamPackPart(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { +func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { h := backend.Handle{Type: restic.PackFile, Name: packID.String(), IsMetadata: false} dataStart := blobs[0].Offset @@ -1022,6 +1023,17 @@ func streamPackPart(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, return err } + if val.Err != nil && loadBlobFn != nil { + var ierr error + // check whether we can get a valid copy somewhere else + buf, ierr := loadBlobFn(ctx, val.Handle.Type, val.Handle.ID, nil) + if ierr == nil { + // success + val.Plaintext = buf + val.Err = nil + } + } + err = handleBlobFn(val.Handle, val.Plaintext, val.Err) if err != nil { cancel() @@ -1032,6 +1044,19 @@ func streamPackPart(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, } return nil }) + + // the context is only still valid if handleBlobFn never returned an error + if ctx.Err() == nil && loadBlobFn != nil { + // check whether we can get the remaining blobs somewhere else + for _, entry := range blobs { + buf, ierr := loadBlobFn(ctx, entry.Type, entry.ID, nil) + err = handleBlobFn(entry.BlobHandle, buf, ierr) + if err != nil { + break + } + } + } + return errors.Wrap(err, "StreamPack") } diff --git a/internal/repository/repository_internal_test.go b/internal/repository/repository_internal_test.go index 0c7115bc96d..1f71b17de5b 100644 --- a/internal/repository/repository_internal_test.go +++ b/internal/repository/repository_internal_test.go @@ -147,13 +147,7 @@ func TestStreamPack(t *testing.T) { func testStreamPack(t *testing.T, version uint) { // always use the same key for deterministic output - const jsonKey = `{"mac":{"k":"eQenuI8adktfzZMuC8rwdA==","r":"k8cfAly2qQSky48CQK7SBA=="},"encrypt":"MKO9gZnRiQFl8mDUurSDa9NMjiu9MUifUrODTHS05wo="}` - - var key crypto.Key - err := json.Unmarshal([]byte(jsonKey), &key) - if err != nil { - t.Fatal(err) - } + key := testKey(t) blobSizes := []int{ 5522811, @@ -276,7 +270,7 @@ func testStreamPack(t *testing.T, version uint) { loadCalls = 0 shortFirstLoad = test.shortFirstLoad - err = streamPack(ctx, load, &key, restic.ID{}, test.blobs, handleBlob) + err := streamPack(ctx, load, nil, &key, restic.ID{}, test.blobs, handleBlob) if err != nil { t.Fatal(err) } @@ -339,7 +333,7 @@ func testStreamPack(t *testing.T, version uint) { return err } - err = streamPack(ctx, load, &key, restic.ID{}, test.blobs, handleBlob) + err := streamPack(ctx, load, nil, &key, restic.ID{}, test.blobs, handleBlob) if err == nil { t.Fatalf("wanted error %v, got nil", test.err) } @@ -449,3 +443,77 @@ func TestUnpackedVerification(t *testing.T) { } } } + +func testKey(t *testing.T) crypto.Key { + const jsonKey = `{"mac":{"k":"eQenuI8adktfzZMuC8rwdA==","r":"k8cfAly2qQSky48CQK7SBA=="},"encrypt":"MKO9gZnRiQFl8mDUurSDa9NMjiu9MUifUrODTHS05wo="}` + + var key crypto.Key + err := json.Unmarshal([]byte(jsonKey), &key) + if err != nil { + t.Fatal(err) + } + return key +} + +func TestStreamPackFallback(t *testing.T) { + test := func(t *testing.T, failLoad bool) { + key := testKey(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + plaintext := rtest.Random(800, 42) + blobID := restic.Hash(plaintext) + blobs := []restic.Blob{ + { + Length: uint(crypto.CiphertextLength(len(plaintext))), + Offset: 0, + BlobHandle: restic.BlobHandle{ + ID: blobID, + Type: restic.DataBlob, + }, + }, + } + + var loadPack backendLoadFn + if failLoad { + loadPack = func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + return errors.New("load error") + } + } else { + loadPack = func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + // just return an empty array to provoke an error + data := make([]byte, length) + return fn(bytes.NewReader(data)) + } + } + + loadBlob := func(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) { + if id == blobID { + return plaintext, nil + } + return nil, errors.New("unknown blob") + } + + blobOK := false + handleBlob := func(blob restic.BlobHandle, buf []byte, err error) error { + rtest.OK(t, err) + rtest.Equals(t, blobID, blob.ID) + rtest.Equals(t, plaintext, buf) + blobOK = true + return err + } + + err := streamPack(ctx, loadPack, loadBlob, &key, restic.ID{}, blobs, handleBlob) + rtest.OK(t, err) + rtest.Assert(t, blobOK, "blob failed to load") + } + + t.Run("corrupted blob", func(t *testing.T) { + test(t, false) + }) + + // test fallback for failed pack loading + t.Run("failed load", func(t *testing.T) { + test(t, true) + }) +} From 666a0b0bdbc66129b5832de34cbaa6b1d0c3b2bb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 22 Apr 2024 20:53:31 +0200 Subject: [PATCH 128/893] repository: streamPack: replace streaming with chunked download Due to the interface of streamPack, we cannot guarantee that operations progress fast enough that the underlying connections remains open. This introduces partial failures which massively complicate the error handling. Switch to a simpler approach that retrieves the pack in chunks of 32MB. If a blob is larger than this limit, then it is downloaded separately. To avoid multiple copies in memory, an auxiliary interface `discardReader` is introduced that allows directly accessing the downloaded byte slices, while still supporting the streaming used by the `check` command. --- internal/checker/checker.go | 36 ++++++- internal/repository/repository.go | 166 ++++++++++++++++++------------ 2 files changed, 133 insertions(+), 69 deletions(-) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 28f55ce3ad2..d2fc42ca67d 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -561,7 +561,7 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r hrd := hashing.NewReader(rd, sha256.New()) bufRd.Reset(hrd) - it := repository.NewPackBlobIterator(id, bufRd, 0, blobs, r.Key(), dec) + it := repository.NewPackBlobIterator(id, newBufReader(bufRd), 0, blobs, r.Key(), dec) for { val, err := it.Next() if err == repository.ErrPackEOF { @@ -647,11 +647,41 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r return nil } +type bufReader struct { + rd *bufio.Reader + buf []byte +} + +func newBufReader(rd *bufio.Reader) *bufReader { + return &bufReader{ + rd: rd, + } +} + +func (b *bufReader) Discard(n int) (discarded int, err error) { + return b.rd.Discard(n) +} + +func (b *bufReader) ReadFull(n int) (buf []byte, err error) { + if cap(b.buf) < n { + b.buf = make([]byte, n) + } + b.buf = b.buf[:n] + + _, err = io.ReadFull(b.rd, b.buf) + if err != nil { + return nil, err + } + return b.buf, nil +} + // ReadData loads all data from the repository and checks the integrity. func (c *Checker) ReadData(ctx context.Context, errChan chan<- error) { c.ReadPacks(ctx, c.packs, nil, errChan) } +const maxStreamBufferSize = 4 * 1024 * 1024 + // ReadPacks loads data from specified packs and checks the integrity. func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *progress.Counter, errChan chan<- error) { defer close(errChan) @@ -669,9 +699,7 @@ func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *p // run workers for i := 0; i < workerCount; i++ { g.Go(func() error { - // create a buffer that is large enough to be reused by repository.StreamPack - // this ensures that we can read the pack header later on - bufRd := bufio.NewReaderSize(nil, repository.MaxStreamBufferSize) + bufRd := bufio.NewReaderSize(nil, maxStreamBufferSize) dec, err := zstd.NewReader(nil) if err != nil { panic(dec) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index f2cde014afd..41f22f307f1 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -1,7 +1,6 @@ package repository import ( - "bufio" "bytes" "context" "fmt" @@ -11,7 +10,6 @@ import ( "sort" "sync" - "github.com/cenkalti/backoff/v4" "github.com/klauspost/compress/zstd" "github.com/restic/chunker" "github.com/restic/restic/internal/backend" @@ -28,8 +26,6 @@ import ( "golang.org/x/sync/errgroup" ) -const MaxStreamBufferSize = 4 * 1024 * 1024 - const MinPackSize = 4 * 1024 * 1024 const DefaultPackSize = 16 * 1024 * 1024 const MaxPackSize = 128 * 1024 * 1024 @@ -951,7 +947,8 @@ const maxUnusedRange = 4 * 1024 * 1024 // LoadBlobsFromPack loads the listed blobs from the specified pack file. The plaintext blob is passed to // the handleBlobFn callback or an error if decryption failed or the blob hash does not match. // handleBlobFn is called at most once for each blob. If the callback returns an error, -// then LoadBlobsFromPack will abort and not retry it. +// then LoadBlobsFromPack will abort and not retry it. The buf passed to the callback is only valid within +// this specific call. The callback must not keep a reference to buf. func (r *Repository) LoadBlobsFromPack(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { return streamPack(ctx, r.Backend().Load, r.LoadBlob, r.key, packID, blobs, handleBlobFn) } @@ -968,12 +965,27 @@ func streamPack(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn lowerIdx := 0 lastPos := blobs[0].Offset + const maxChunkSize = 2 * DefaultPackSize + for i := 0; i < len(blobs); i++ { if blobs[i].Offset < lastPos { // don't wait for streamPackPart to fail return errors.Errorf("overlapping blobs in pack %v", packID) } + + chunkSizeAfter := (blobs[i].Offset + blobs[i].Length) - blobs[lowerIdx].Offset + split := false + // split if the chunk would become larger than maxChunkSize. Oversized chunks are + // handled by the requirement that the chunk contains at least one blob (i > lowerIdx) + if i > lowerIdx && chunkSizeAfter >= maxChunkSize { + split = true + } + // skip too large gaps as a new request is typically much cheaper than data transfers if blobs[i].Offset-lastPos > maxUnusedRange { + split = true + } + + if split { // load everything up to the skipped file section err := streamPackPart(ctx, beLoad, loadBlobFn, key, packID, blobs[lowerIdx:i], handleBlobFn) if err != nil { @@ -1001,75 +1013,108 @@ func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBl } defer dec.Close() - ctx, cancel := context.WithCancel(ctx) - // stream blobs in pack + data := make([]byte, int(dataEnd-dataStart)) err = beLoad(ctx, h, int(dataEnd-dataStart), int64(dataStart), func(rd io.Reader) error { - // prevent callbacks after cancellation - if ctx.Err() != nil { - return ctx.Err() - } - bufferSize := int(dataEnd - dataStart) - if bufferSize > MaxStreamBufferSize { - bufferSize = MaxStreamBufferSize - } - bufRd := bufio.NewReaderSize(rd, bufferSize) - it := NewPackBlobIterator(packID, bufRd, dataStart, blobs, key, dec) - - for { - val, err := it.Next() - if err == ErrPackEOF { - break - } else if err != nil { - return err - } - - if val.Err != nil && loadBlobFn != nil { - var ierr error - // check whether we can get a valid copy somewhere else - buf, ierr := loadBlobFn(ctx, val.Handle.Type, val.Handle.ID, nil) - if ierr == nil { - // success - val.Plaintext = buf - val.Err = nil + _, cerr := io.ReadFull(rd, data) + return cerr + }) + // prevent callbacks after cancellation + if ctx.Err() != nil { + return ctx.Err() + } + if err != nil { + // the context is only still valid if handleBlobFn never returned an error + if loadBlobFn != nil { + // check whether we can get the remaining blobs somewhere else + for _, entry := range blobs { + buf, ierr := loadBlobFn(ctx, entry.Type, entry.ID, nil) + err = handleBlobFn(entry.BlobHandle, buf, ierr) + if err != nil { + break } } + } + return errors.Wrap(err, "StreamPack") + } - err = handleBlobFn(val.Handle, val.Plaintext, val.Err) - if err != nil { - cancel() - return backoff.Permanent(err) - } - // ensure that each blob is only passed once to handleBlobFn - blobs = blobs[1:] + it := NewPackBlobIterator(packID, newByteReader(data), dataStart, blobs, key, dec) + + for { + val, err := it.Next() + if err == ErrPackEOF { + break + } else if err != nil { + return err } - return nil - }) - // the context is only still valid if handleBlobFn never returned an error - if ctx.Err() == nil && loadBlobFn != nil { - // check whether we can get the remaining blobs somewhere else - for _, entry := range blobs { - buf, ierr := loadBlobFn(ctx, entry.Type, entry.ID, nil) - err = handleBlobFn(entry.BlobHandle, buf, ierr) - if err != nil { - break + if val.Err != nil && loadBlobFn != nil { + var ierr error + // check whether we can get a valid copy somewhere else + buf, ierr := loadBlobFn(ctx, val.Handle.Type, val.Handle.ID, nil) + if ierr == nil { + // success + val.Plaintext = buf + val.Err = nil } } + + err = handleBlobFn(val.Handle, val.Plaintext, val.Err) + if err != nil { + return err + } + // ensure that each blob is only passed once to handleBlobFn + blobs = blobs[1:] } return errors.Wrap(err, "StreamPack") } +// discardReader allows the PackBlobIterator to perform zero copy +// reads if the underlying data source is a byte slice. +type discardReader interface { + Discard(n int) (discarded int, err error) + // ReadFull reads the next n bytes into a byte slice. The caller must not + // retain a reference to the byte. Modifications are only allowed within + // the boundaries of the returned slice. + ReadFull(n int) (buf []byte, err error) +} + +type byteReader struct { + buf []byte +} + +func newByteReader(buf []byte) *byteReader { + return &byteReader{ + buf: buf, + } +} + +func (b *byteReader) Discard(n int) (discarded int, err error) { + if len(b.buf) < n { + return 0, io.ErrUnexpectedEOF + } + b.buf = b.buf[n:] + return n, nil +} + +func (b *byteReader) ReadFull(n int) (buf []byte, err error) { + if len(b.buf) < n { + return nil, io.ErrUnexpectedEOF + } + buf = b.buf[:n] + b.buf = b.buf[n:] + return buf, nil +} + type PackBlobIterator struct { packID restic.ID - rd *bufio.Reader + rd discardReader currentOffset uint blobs []restic.Blob key *crypto.Key dec *zstd.Decoder - buf []byte decode []byte } @@ -1081,7 +1126,7 @@ type PackBlobValue struct { var ErrPackEOF = errors.New("reached EOF of pack file") -func NewPackBlobIterator(packID restic.ID, rd *bufio.Reader, currentOffset uint, +func NewPackBlobIterator(packID restic.ID, rd discardReader, currentOffset uint, blobs []restic.Blob, key *crypto.Key, dec *zstd.Decoder) *PackBlobIterator { return &PackBlobIterator{ packID: packID, @@ -1116,21 +1161,12 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) { h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} debug.Log(" process blob %v, skipped %d, %v", h, skipBytes, entry) - if uint(cap(b.buf)) < entry.Length { - b.buf = make([]byte, entry.Length) - } - b.buf = b.buf[:entry.Length] - - n, err := io.ReadFull(b.rd, b.buf) + buf, err := b.rd.ReadFull(int(entry.Length)) if err != nil { debug.Log(" read error %v", err) return PackBlobValue{}, fmt.Errorf("readFull: %w", err) } - if n != len(b.buf) { - return PackBlobValue{}, fmt.Errorf("read blob %v from %v: not enough bytes read, want %v, got %v", - h, b.packID.Str(), len(b.buf), n) - } b.currentOffset = entry.Offset + entry.Length if int(entry.Length) <= b.key.NonceSize() { @@ -1139,7 +1175,7 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) { } // decryption errors are likely permanent, give the caller a chance to skip them - nonce, ciphertext := b.buf[:b.key.NonceSize()], b.buf[b.key.NonceSize():] + nonce, ciphertext := buf[:b.key.NonceSize()], buf[b.key.NonceSize():] plaintext, err := b.key.Open(ciphertext[:0], nonce, ciphertext, nil) if err != nil { err = fmt.Errorf("decrypting blob %v from %v failed: %w", h, b.packID.Str(), err) From cf700d8794ea64b6b8ae95ddd742aeb4357f47d0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 22 Apr 2024 21:11:52 +0200 Subject: [PATCH 129/893] repository: streamPack: reuse zstd decoder --- internal/repository/repository.go | 18 ++++++------------ .../repository/repository_internal_test.go | 18 +++++++++++++++--- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 41f22f307f1..3ed9f7afa3c 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -950,10 +950,10 @@ const maxUnusedRange = 4 * 1024 * 1024 // then LoadBlobsFromPack will abort and not retry it. The buf passed to the callback is only valid within // this specific call. The callback must not keep a reference to buf. func (r *Repository) LoadBlobsFromPack(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { - return streamPack(ctx, r.Backend().Load, r.LoadBlob, r.key, packID, blobs, handleBlobFn) + return streamPack(ctx, r.Backend().Load, r.LoadBlob, r.getZstdDecoder(), r.key, packID, blobs, handleBlobFn) } -func streamPack(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { +func streamPack(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, dec *zstd.Decoder, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { if len(blobs) == 0 { // nothing to do return nil @@ -987,7 +987,7 @@ func streamPack(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn if split { // load everything up to the skipped file section - err := streamPackPart(ctx, beLoad, loadBlobFn, key, packID, blobs[lowerIdx:i], handleBlobFn) + err := streamPackPart(ctx, beLoad, loadBlobFn, dec, key, packID, blobs[lowerIdx:i], handleBlobFn) if err != nil { return err } @@ -996,10 +996,10 @@ func streamPack(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn lastPos = blobs[i].Offset + blobs[i].Length } // load remainder - return streamPackPart(ctx, beLoad, loadBlobFn, key, packID, blobs[lowerIdx:], handleBlobFn) + return streamPackPart(ctx, beLoad, loadBlobFn, dec, key, packID, blobs[lowerIdx:], handleBlobFn) } -func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { +func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, dec *zstd.Decoder, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { h := backend.Handle{Type: restic.PackFile, Name: packID.String(), IsMetadata: false} dataStart := blobs[0].Offset @@ -1007,14 +1007,8 @@ func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBl debug.Log("streaming pack %v (%d to %d bytes), blobs: %v", packID, dataStart, dataEnd, len(blobs)) - dec, err := zstd.NewReader(nil) - if err != nil { - panic(dec) - } - defer dec.Close() - data := make([]byte, int(dataEnd-dataStart)) - err = beLoad(ctx, h, int(dataEnd-dataStart), int64(dataStart), func(rd io.Reader) error { + err := beLoad(ctx, h, int(dataEnd-dataStart), int64(dataStart), func(rd io.Reader) error { _, cerr := io.ReadFull(rd, data) return cerr }) diff --git a/internal/repository/repository_internal_test.go b/internal/repository/repository_internal_test.go index 1f71b17de5b..16e6e848438 100644 --- a/internal/repository/repository_internal_test.go +++ b/internal/repository/repository_internal_test.go @@ -146,6 +146,12 @@ func TestStreamPack(t *testing.T) { } func testStreamPack(t *testing.T, version uint) { + dec, err := zstd.NewReader(nil) + if err != nil { + panic(dec) + } + defer dec.Close() + // always use the same key for deterministic output key := testKey(t) @@ -270,7 +276,7 @@ func testStreamPack(t *testing.T, version uint) { loadCalls = 0 shortFirstLoad = test.shortFirstLoad - err := streamPack(ctx, load, nil, &key, restic.ID{}, test.blobs, handleBlob) + err := streamPack(ctx, load, nil, dec, &key, restic.ID{}, test.blobs, handleBlob) if err != nil { t.Fatal(err) } @@ -333,7 +339,7 @@ func testStreamPack(t *testing.T, version uint) { return err } - err := streamPack(ctx, load, nil, &key, restic.ID{}, test.blobs, handleBlob) + err := streamPack(ctx, load, nil, dec, &key, restic.ID{}, test.blobs, handleBlob) if err == nil { t.Fatalf("wanted error %v, got nil", test.err) } @@ -456,6 +462,12 @@ func testKey(t *testing.T) crypto.Key { } func TestStreamPackFallback(t *testing.T) { + dec, err := zstd.NewReader(nil) + if err != nil { + panic(dec) + } + defer dec.Close() + test := func(t *testing.T, failLoad bool) { key := testKey(t) ctx, cancel := context.WithCancel(context.Background()) @@ -503,7 +515,7 @@ func TestStreamPackFallback(t *testing.T) { return err } - err := streamPack(ctx, loadPack, loadBlob, &key, restic.ID{}, blobs, handleBlob) + err := streamPack(ctx, loadPack, loadBlob, dec, &key, restic.ID{}, blobs, handleBlob) rtest.OK(t, err) rtest.Assert(t, blobOK, "blob failed to load") } From 20d8eed400ac4a83260c84a0621235c537fbff59 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 22 Apr 2024 21:12:20 +0200 Subject: [PATCH 130/893] repository: streamPack: separate requests for gap larger than 1MB With most cloud providers, traffic is much more expensive than API calls. Thus slightly bias streamPack towards a bit more API calls in exchange for slightly less traffic. --- internal/repository/repository.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 3ed9f7afa3c..84bac9ee350 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -941,8 +941,8 @@ func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte type backendLoadFn func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error type loadBlobFn func(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) -// Skip sections with more than 4MB unused blobs -const maxUnusedRange = 4 * 1024 * 1024 +// Skip sections with more than 1MB unused blobs +const maxUnusedRange = 1 * 1024 * 1024 // LoadBlobsFromPack loads the listed blobs from the specified pack file. The plaintext blob is passed to // the handleBlobFn callback or an error if decryption failed or the blob hash does not match. From 21a7cb405c82c850db475e2410fba4015fc605b4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 29 Mar 2024 23:28:59 +0100 Subject: [PATCH 131/893] check: replace cleanup handler --- cmd/restic/cmd_check.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 7bea641ae8e..83ebf89a6f5 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -199,10 +199,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } cleanup := prepareCheckCache(opts, &gopts) - AddCleanupHandler(func(code int) (int, error) { - cleanup() - return code, nil - }) + defer cleanup() if !gopts.NoLock { Verbosef("create exclusive lock for repository\n") From 93135dc705ba6f749dcb4c8eaa965a4c9cc4c456 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 29 Mar 2024 23:29:49 +0100 Subject: [PATCH 132/893] lock: drop cleanup handler --- cmd/restic/lock.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 69d433df172..99e199a6773 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -21,18 +21,11 @@ func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun boo Verbosef("%s", msg) } }, Warnf) - - unlock = lock.Unlock - // make sure that a repository is unlocked properly and after cancel() was - // called by the cleanup handler in global.go - AddCleanupHandler(func(code int) (int, error) { - lock.Unlock() - return code, nil - }) - if err != nil { return nil, nil, nil, err } + + unlock = lock.Unlock } else { repo.SetDryRun() } From 86c7909f41cc8e3e748a6447831c7c7243a37b69 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 30 Mar 2024 14:26:12 +0100 Subject: [PATCH 133/893] mount: use standalone shutdown hook via goroutine --- cmd/restic/cmd_mount.go | 48 ++++++++++++------------ cmd/restic/cmd_mount_integration_test.go | 3 +- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index cb2b1142d5d..1e4bff03e3e 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -152,28 +152,15 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args } } - AddCleanupHandler(func(code int) (int, error) { - debug.Log("running umount cleanup handler for mount at %v", mountpoint) - err := umount(mountpoint) - if err != nil { - Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err) - } - // replace error code of sigint - if code == 130 { - code = 0 - } - return code, nil - }) + systemFuse.Debug = func(msg interface{}) { + debug.Log("fuse: %v", msg) + } c, err := systemFuse.Mount(mountpoint, mountOptions...) if err != nil { return err } - systemFuse.Debug = func(msg interface{}) { - debug.Log("fuse: %v", msg) - } - cfg := fuse.Config{ OwnerIsRoot: opts.OwnerRoot, Filter: opts.SnapshotFilter, @@ -187,15 +174,26 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args Printf("When finished, quit with Ctrl-c here or umount the mountpoint.\n") debug.Log("serving mount at %v", mountpoint) - err = fs.Serve(c, root) - if err != nil { - return err - } - <-c.Ready - return c.MountError -} + done := make(chan struct{}) + + go func() { + defer close(done) + err = fs.Serve(c, root) + }() + + select { + case <-ctx.Done(): + debug.Log("running umount cleanup handler for mount at %v", mountpoint) + err := systemFuse.Unmount(mountpoint) + if err != nil { + Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err) + } + + return nil + case <-done: + // clean shutdown, nothing to do + } -func umount(mountpoint string) error { - return systemFuse.Unmount(mountpoint) + return err } diff --git a/cmd/restic/cmd_mount_integration_test.go b/cmd/restic/cmd_mount_integration_test.go index 590e1503047..d764b4e4f0f 100644 --- a/cmd/restic/cmd_mount_integration_test.go +++ b/cmd/restic/cmd_mount_integration_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + systemFuse "github.com/anacrolix/fuse" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -65,7 +66,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr func testRunUmount(t testing.TB, dir string) { var err error for i := 0; i < mountWait; i++ { - if err = umount(dir); err == nil { + if err = systemFuse.Unmount(dir); err == nil { t.Logf("directory %v umounted", dir) return } From eb710a28e8574972d71c0b898897c409f62d6563 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 29 Mar 2024 23:52:45 +0100 Subject: [PATCH 134/893] use standalone shutdown hook for readPasswordTerminal move terminal restoration into readPasswordTerminal --- cmd/restic/cmd_copy.go | 2 +- cmd/restic/cmd_init.go | 4 +- cmd/restic/cmd_key_add.go | 6 +- cmd/restic/cmd_key_passwd.go | 2 +- cmd/restic/global.go | 99 ++++++++++++++----------------- cmd/restic/secondary_repo.go | 5 +- cmd/restic/secondary_repo_test.go | 5 +- 7 files changed, 58 insertions(+), 65 deletions(-) diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 410134e41ec..de3958defa3 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -53,7 +53,7 @@ func init() { } func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []string) error { - secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "destination") + secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "destination") if err != nil { return err } diff --git a/cmd/restic/cmd_init.go b/cmd/restic/cmd_init.go index 7154279e8ba..e6ea694413e 100644 --- a/cmd/restic/cmd_init.go +++ b/cmd/restic/cmd_init.go @@ -80,7 +80,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args [] return err } - gopts.password, err = ReadPasswordTwice(gopts, + gopts.password, err = ReadPasswordTwice(ctx, gopts, "enter password for new repository: ", "enter password again: ") if err != nil { @@ -131,7 +131,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args [] func maybeReadChunkerPolynomial(ctx context.Context, opts InitOptions, gopts GlobalOptions) (*chunker.Pol, error) { if opts.CopyChunkerParameters { - otherGopts, _, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "secondary") + otherGopts, _, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "secondary") if err != nil { return nil, err } diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index 83e0cab7f5d..30675462785 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -60,7 +60,7 @@ func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, arg } func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyAddOptions) error { - pw, err := getNewPassword(gopts, opts.NewPasswordFile) + pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile) if err != nil { return err } @@ -83,7 +83,7 @@ func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOption // testKeyNewPassword is used to set a new password during integration testing. var testKeyNewPassword string -func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error) { +func getNewPassword(ctx context.Context, gopts GlobalOptions, newPasswordFile string) (string, error) { if testKeyNewPassword != "" { return testKeyNewPassword, nil } @@ -97,7 +97,7 @@ func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error) newopts := gopts newopts.password = "" - return ReadPasswordTwice(newopts, + return ReadPasswordTwice(ctx, newopts, "enter new password: ", "enter password again: ") } diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index 70abca6dc20..0836c4cfe3a 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -57,7 +57,7 @@ func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOption } func changePassword(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyPasswdOptions) error { - pw, err := getNewPassword(gopts, opts.NewPasswordFile) + pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile) if err != nil { return err } diff --git a/cmd/restic/global.go b/cmd/restic/global.go index cc47496f36c..9f1ec85a2d3 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -96,7 +96,6 @@ var globalOptions = GlobalOptions{ stderr: os.Stderr, } -var isReadingPassword bool var internalGlobalCtx context.Context func init() { @@ -165,8 +164,6 @@ func init() { // parse target pack size from env, on error the default value will be used targetPackSize, _ := strconv.ParseUint(os.Getenv("RESTIC_PACK_SIZE"), 10, 32) globalOptions.PackSize = uint(targetPackSize) - - restoreTerminal() } func stdinIsTerminal() bool { @@ -191,40 +188,6 @@ func stdoutTerminalWidth() int { return w } -// restoreTerminal installs a cleanup handler that restores the previous -// terminal state on exit. This handler is only intended to restore the -// terminal configuration if restic exits after receiving a signal. A regular -// program execution must revert changes to the terminal configuration itself. -// The terminal configuration is only restored while reading a password. -func restoreTerminal() { - if !term.IsTerminal(int(os.Stdout.Fd())) { - return - } - - fd := int(os.Stdout.Fd()) - state, err := term.GetState(fd) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) - return - } - - AddCleanupHandler(func(code int) (int, error) { - // Restoring the terminal configuration while restic runs in the - // background, causes restic to get stopped on unix systems with - // a SIGTTOU signal. Thus only restore the terminal settings if - // they might have been modified, which is the case while reading - // a password. - if !isReadingPassword { - return code, nil - } - err := term.Restore(fd, state) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err) - } - return code, err - }) -} - // ClearLine creates a platform dependent string to clear the current // line, so it can be overwritten. // @@ -333,24 +296,48 @@ func readPassword(in io.Reader) (password string, err error) { // readPasswordTerminal reads the password from the given reader which must be a // tty. Prompt is printed on the writer out before attempting to read the -// password. -func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password string, err error) { - fmt.Fprint(out, prompt) - isReadingPassword = true - buf, err := term.ReadPassword(int(in.Fd())) - isReadingPassword = false - fmt.Fprintln(out) +// password. If the context is canceled, the function leaks the password reading +// goroutine. +func readPasswordTerminal(ctx context.Context, in *os.File, out *os.File, prompt string) (password string, err error) { + fd := int(out.Fd()) + state, err := term.GetState(fd) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) + return "", err + } + + done := make(chan struct{}) + var buf []byte + + go func() { + defer close(done) + fmt.Fprint(out, prompt) + buf, err = term.ReadPassword(int(in.Fd())) + fmt.Fprintln(out) + }() + + select { + case <-ctx.Done(): + err := term.Restore(fd, state) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err) + } + return "", ctx.Err() + case <-done: + // clean shutdown, nothing to do + } + if err != nil { return "", errors.Wrap(err, "ReadPassword") } - password = string(buf) - return password, nil + return string(buf), nil } // ReadPassword reads the password from a password file, the environment -// variable RESTIC_PASSWORD or prompts the user. -func ReadPassword(opts GlobalOptions, prompt string) (string, error) { +// variable RESTIC_PASSWORD or prompts the user. If the context is canceled, +// the function leaks the password reading goroutine. +func ReadPassword(ctx context.Context, opts GlobalOptions, prompt string) (string, error) { if opts.password != "" { return opts.password, nil } @@ -361,7 +348,7 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) { ) if stdinIsTerminal() { - password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt) + password, err = readPasswordTerminal(ctx, os.Stdin, os.Stderr, prompt) } else { password, err = readPassword(os.Stdin) Verbosef("reading repository password from stdin\n") @@ -379,14 +366,15 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) { } // ReadPasswordTwice calls ReadPassword two times and returns an error when the -// passwords don't match. -func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, error) { - pw1, err := ReadPassword(gopts, prompt1) +// passwords don't match. If the context is canceled, the function leaks the +// password reading goroutine. +func ReadPasswordTwice(ctx context.Context, gopts GlobalOptions, prompt1, prompt2 string) (string, error) { + pw1, err := ReadPassword(ctx, gopts, prompt1) if err != nil { return "", err } if stdinIsTerminal() { - pw2, err := ReadPassword(gopts, prompt2) + pw2, err := ReadPassword(ctx, gopts, prompt2) if err != nil { return "", err } @@ -469,7 +457,10 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi } for ; passwordTriesLeft > 0; passwordTriesLeft-- { - opts.password, err = ReadPassword(opts, "enter password for repository: ") + opts.password, err = ReadPassword(ctx, opts, "enter password for repository: ") + if ctx.Err() != nil { + return nil, ctx.Err() + } if err != nil && passwordTriesLeft > 1 { opts.password = "" fmt.Printf("%s. Try again\n", err) diff --git a/cmd/restic/secondary_repo.go b/cmd/restic/secondary_repo.go index 4c46b60df44..2afd36a81b9 100644 --- a/cmd/restic/secondary_repo.go +++ b/cmd/restic/secondary_repo.go @@ -1,6 +1,7 @@ package main import ( + "context" "os" "github.com/restic/restic/internal/errors" @@ -56,7 +57,7 @@ func initSecondaryRepoOptions(f *pflag.FlagSet, opts *secondaryRepoOptions, repo opts.PasswordCommand = os.Getenv("RESTIC_FROM_PASSWORD_COMMAND") } -func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) { +func fillSecondaryGlobalOpts(ctx context.Context, opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) { if opts.Repo == "" && opts.RepositoryFile == "" && opts.LegacyRepo == "" && opts.LegacyRepositoryFile == "" { return GlobalOptions{}, false, errors.Fatal("Please specify a source repository location (--from-repo or --from-repository-file)") } @@ -109,7 +110,7 @@ func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, rep return GlobalOptions{}, false, err } } - dstGopts.password, err = ReadPassword(dstGopts, "enter password for "+repoPrefix+" repository: ") + dstGopts.password, err = ReadPassword(ctx, dstGopts, "enter password for "+repoPrefix+" repository: ") if err != nil { return GlobalOptions{}, false, err } diff --git a/cmd/restic/secondary_repo_test.go b/cmd/restic/secondary_repo_test.go index ff1a10b03cb..aa511ca992a 100644 --- a/cmd/restic/secondary_repo_test.go +++ b/cmd/restic/secondary_repo_test.go @@ -1,6 +1,7 @@ package main import ( + "context" "os" "path/filepath" "testing" @@ -170,7 +171,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) { // Test all valid cases for _, testCase := range validSecondaryRepoTestCases { - DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination") + DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination") rtest.OK(t, err) rtest.Equals(t, DstGOpts, testCase.DstGOpts) rtest.Equals(t, isFromRepo, testCase.FromRepo) @@ -178,7 +179,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) { // Test all invalid cases for _, testCase := range invalidSecondaryRepoTestCases { - _, _, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination") + _, _, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination") rtest.Assert(t, err != nil, "Expected error, but function did not return an error") } } From 699ef5e9def04297fbd40d08c197d1f9e9a8cdaa Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 5 Apr 2024 23:16:15 +0200 Subject: [PATCH 135/893] debug: replace cleanup handler usage in profiling setup --- cmd/restic/global_debug.go | 82 ++++++++++++++++++------------------ cmd/restic/global_release.go | 3 ++ cmd/restic/main.go | 3 ++ 3 files changed, 47 insertions(+), 41 deletions(-) diff --git a/cmd/restic/global_debug.go b/cmd/restic/global_debug.go index b798074d10b..502b2cf6ed3 100644 --- a/cmd/restic/global_debug.go +++ b/cmd/restic/global_debug.go @@ -15,23 +15,28 @@ import ( "github.com/pkg/profile" ) -var ( - listenProfile string - memProfilePath string - cpuProfilePath string - traceProfilePath string - blockProfilePath string - insecure bool -) +type ProfileOptions struct { + listen string + memPath string + cpuPath string + tracePath string + blockPath string + insecure bool +} + +var profileOpts ProfileOptions +var prof interface { + Stop() +} func init() { f := cmdRoot.PersistentFlags() - f.StringVar(&listenProfile, "listen-profile", "", "listen on this `address:port` for memory profiling") - f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`") - f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`") - f.StringVar(&traceProfilePath, "trace-profile", "", "write trace to `dir`") - f.StringVar(&blockProfilePath, "block-profile", "", "write block profile to `dir`") - f.BoolVar(&insecure, "insecure-kdf", false, "use insecure KDF settings") + f.StringVar(&profileOpts.listen, "listen-profile", "", "listen on this `address:port` for memory profiling") + f.StringVar(&profileOpts.memPath, "mem-profile", "", "write memory profile to `dir`") + f.StringVar(&profileOpts.cpuPath, "cpu-profile", "", "write cpu profile to `dir`") + f.StringVar(&profileOpts.tracePath, "trace-profile", "", "write trace to `dir`") + f.StringVar(&profileOpts.blockPath, "block-profile", "", "write block profile to `dir`") + f.BoolVar(&profileOpts.insecure, "insecure-kdf", false, "use insecure KDF settings") } type fakeTestingTB struct{} @@ -41,10 +46,10 @@ func (fakeTestingTB) Logf(msg string, args ...interface{}) { } func runDebug() error { - if listenProfile != "" { - fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", listenProfile) + if profileOpts.listen != "" { + fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", profileOpts.listen) go func() { - err := http.ListenAndServe(listenProfile, nil) + err := http.ListenAndServe(profileOpts.listen, nil) if err != nil { fmt.Fprintf(os.Stderr, "profile HTTP server listen failed: %v\n", err) } @@ -52,16 +57,16 @@ func runDebug() error { } profilesEnabled := 0 - if memProfilePath != "" { + if profileOpts.memPath != "" { profilesEnabled++ } - if cpuProfilePath != "" { + if profileOpts.cpuPath != "" { profilesEnabled++ } - if traceProfilePath != "" { + if profileOpts.tracePath != "" { profilesEnabled++ } - if blockProfilePath != "" { + if profileOpts.blockPath != "" { profilesEnabled++ } @@ -69,30 +74,25 @@ func runDebug() error { return errors.Fatal("only one profile (memory, CPU, trace, or block) may be activated at the same time") } - var prof interface { - Stop() + if profileOpts.memPath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(profileOpts.memPath)) + } else if profileOpts.cpuPath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(profileOpts.cpuPath)) + } else if profileOpts.tracePath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(profileOpts.tracePath)) + } else if profileOpts.blockPath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(profileOpts.blockPath)) } - if memProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(memProfilePath)) - } else if cpuProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(cpuProfilePath)) - } else if traceProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(traceProfilePath)) - } else if blockProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(blockProfilePath)) - } - - if prof != nil { - AddCleanupHandler(func(code int) (int, error) { - prof.Stop() - return code, nil - }) - } - - if insecure { + if profileOpts.insecure { repository.TestUseLowSecurityKDFParameters(fakeTestingTB{}) } return nil } + +func stopDebug() { + if prof != nil { + prof.Stop() + } +} diff --git a/cmd/restic/global_release.go b/cmd/restic/global_release.go index 7cb2e6caf3c..1dab5a293ac 100644 --- a/cmd/restic/global_release.go +++ b/cmd/restic/global_release.go @@ -5,3 +5,6 @@ package main // runDebug is a noop without the debug tag. func runDebug() error { return nil } + +// stopDebug is a noop without the debug tag. +func stopDebug() {} diff --git a/cmd/restic/main.go b/cmd/restic/main.go index a4acb1cab38..308a432b5f7 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -74,6 +74,9 @@ The full documentation can be found at https://restic.readthedocs.io/ . // enabled) return runDebug() }, + PersistentPostRun: func(_ *cobra.Command, _ []string) { + stopDebug() + }, } // Distinguish commands that need the password from those that work without, From 6f2a4dea210c98169c4b6658a6cf42091d182424 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 29 Mar 2024 23:58:48 +0100 Subject: [PATCH 136/893] remove global shutdown hook --- cmd/restic/cleanup.go | 84 ++++++++++--------------------------------- cmd/restic/global.go | 11 ------ cmd/restic/main.go | 17 ++++++--- 3 files changed, 31 insertions(+), 81 deletions(-) diff --git a/cmd/restic/cleanup.go b/cmd/restic/cleanup.go index 5a6cf79e11d..90ea93b9235 100644 --- a/cmd/restic/cleanup.go +++ b/cmd/restic/cleanup.go @@ -1,89 +1,41 @@ package main import ( + "context" "os" "os/signal" - "sync" "syscall" "github.com/restic/restic/internal/debug" ) -var cleanupHandlers struct { - sync.Mutex - list []func(code int) (int, error) - done bool - ch chan os.Signal -} - -func init() { - cleanupHandlers.ch = make(chan os.Signal, 1) - go CleanupHandler(cleanupHandlers.ch) - signal.Notify(cleanupHandlers.ch, syscall.SIGINT, syscall.SIGTERM) -} +func createGlobalContext() context.Context { + ctx, cancel := context.WithCancel(context.Background()) -// AddCleanupHandler adds the function f to the list of cleanup handlers so -// that it is executed when all the cleanup handlers are run, e.g. when SIGINT -// is received. -func AddCleanupHandler(f func(code int) (int, error)) { - cleanupHandlers.Lock() - defer cleanupHandlers.Unlock() + ch := make(chan os.Signal, 1) + go cleanupHandler(ch, cancel) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) - // reset the done flag for integration tests - cleanupHandlers.done = false - - cleanupHandlers.list = append(cleanupHandlers.list, f) + return ctx } -// RunCleanupHandlers runs all registered cleanup handlers -func RunCleanupHandlers(code int) int { - cleanupHandlers.Lock() - defer cleanupHandlers.Unlock() - - if cleanupHandlers.done { - return code - } - cleanupHandlers.done = true +// cleanupHandler handles the SIGINT and SIGTERM signals. +func cleanupHandler(c <-chan os.Signal, cancel context.CancelFunc) { + s := <-c + debug.Log("signal %v received, cleaning up", s) + Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s) - for _, f := range cleanupHandlers.list { - var err error - code, err = f(code) - if err != nil { - Warnf("error in cleanup handler: %v\n", err) - } + if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" { + _, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n") + _, _ = os.Stderr.WriteString(debug.DumpStacktrace()) + _, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n") } - cleanupHandlers.list = nil - return code -} - -// CleanupHandler handles the SIGINT and SIGTERM signals. -func CleanupHandler(c <-chan os.Signal) { - for s := range c { - debug.Log("signal %v received, cleaning up", s) - Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s) - - if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" { - _, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n") - _, _ = os.Stderr.WriteString(debug.DumpStacktrace()) - _, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n") - } - code := 0 - - if s == syscall.SIGINT || s == syscall.SIGTERM { - code = 130 - } else { - code = 1 - } - - Exit(code) - } + cancel() } -// Exit runs the cleanup handlers and then terminates the process with the -// given exit code. +// Exit terminates the process with the given exit code. func Exit(code int) { - code = RunCleanupHandlers(code) debug.Log("exiting with status code %d", code) os.Exit(code) } diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 9f1ec85a2d3..5b21871dc5b 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -96,8 +96,6 @@ var globalOptions = GlobalOptions{ stderr: os.Stderr, } -var internalGlobalCtx context.Context - func init() { backends := location.NewRegistry() backends.Register(azure.NewFactory()) @@ -111,15 +109,6 @@ func init() { backends.Register(swift.NewFactory()) globalOptions.backends = backends - var cancel context.CancelFunc - internalGlobalCtx, cancel = context.WithCancel(context.Background()) - AddCleanupHandler(func(code int) (int, error) { - // Must be called before the unlock cleanup handler to ensure that the latter is - // not blocked due to limited number of backend connections, see #1434 - cancel() - return code, nil - }) - f := cmdRoot.PersistentFlags() f.StringVarP(&globalOptions.Repo, "repo", "r", "", "`repository` to backup to or restore from (default: $RESTIC_REPOSITORY)") f.StringVarP(&globalOptions.RepositoryFile, "repository-file", "", "", "`file` to read the repository location from (default: $RESTIC_REPOSITORY_FILE)") diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 308a432b5f7..56ddf74a45a 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -3,6 +3,7 @@ package main import ( "bufio" "bytes" + "context" "fmt" "log" "os" @@ -118,7 +119,13 @@ func main() { debug.Log("main %#v", os.Args) debug.Log("restic %s compiled with %v on %v/%v", version, runtime.Version(), runtime.GOOS, runtime.GOARCH) - err = cmdRoot.ExecuteContext(internalGlobalCtx) + + ctx := createGlobalContext() + err = cmdRoot.ExecuteContext(ctx) + + if err == nil { + err = ctx.Err() + } switch { case restic.IsAlreadyLocked(err): @@ -140,11 +147,13 @@ func main() { } var exitCode int - switch err { - case nil: + switch { + case err == nil: exitCode = 0 - case ErrInvalidSourceData: + case err == ErrInvalidSourceData: exitCode = 3 + case errors.Is(err, context.Canceled): + exitCode = 130 default: exitCode = 1 } From 910927670f135ae08810710631f1126900743abc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 5 Apr 2024 22:23:08 +0200 Subject: [PATCH 137/893] mount: fix exit code on cancellation --- cmd/restic/cmd_mount.go | 2 +- cmd/restic/main.go | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 1e4bff03e3e..5a10447f36f 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -190,7 +190,7 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err) } - return nil + return ErrOK case <-done: // clean shutdown, nothing to do } diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 56ddf74a45a..82517c31aff 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -25,6 +25,8 @@ func init() { _, _ = maxprocs.Set() } +var ErrOK = errors.New("ok") + // cmdRoot is the base command when no other command has been specified. var cmdRoot = &cobra.Command{ Use: "restic", @@ -125,6 +127,9 @@ func main() { if err == nil { err = ctx.Err() + } else if err == ErrOK { + // ErrOK overwrites context cancelation errors + err = nil } switch { From 31624aeffd4cf9a06afbb16db7ffd7e899187bb6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 30 Mar 2024 00:19:58 +0100 Subject: [PATCH 138/893] Improve command shutdown on context cancellation --- cmd/restic/cmd_check.go | 14 +++++++++++++- cmd/restic/cmd_copy.go | 5 ++++- cmd/restic/cmd_find.go | 3 +++ cmd/restic/cmd_forget.go | 7 +++++++ cmd/restic/cmd_prune.go | 3 +++ cmd/restic/cmd_recover.go | 6 ++++++ cmd/restic/cmd_repair_snapshots.go | 3 +++ cmd/restic/cmd_rewrite.go | 3 +++ cmd/restic/cmd_snapshots.go | 3 +++ cmd/restic/cmd_stats.go | 5 ++--- cmd/restic/cmd_tag.go | 3 +++ internal/archiver/archiver.go | 1 + internal/archiver/tree_saver.go | 4 ++++ internal/index/master_index.go | 3 +++ internal/repository/prune.go | 18 ++++++++++++++++++ internal/repository/repack.go | 2 +- internal/repository/repository.go | 3 +++ 17 files changed, 80 insertions(+), 6 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 83ebf89a6f5..38623c3059c 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -219,6 +219,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args Verbosef("load indexes\n") bar := newIndexProgress(gopts.Quiet, gopts.JSON) hints, errs := chkr.LoadIndex(ctx, bar) + if ctx.Err() != nil { + return ctx.Err() + } errorsFound := false suggestIndexRebuild := false @@ -280,6 +283,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args if orphanedPacks > 0 { Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) } + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("check snapshots, trees and blobs\n") errChan = make(chan error) @@ -313,6 +319,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args // Must happen after `errChan` is read from in the above loop to avoid // deadlocking in the case of errors. wg.Wait() + if ctx.Err() != nil { + return ctx.Err() + } if opts.CheckUnused { for _, id := range chkr.UnusedBlobs(ctx) { @@ -392,10 +401,13 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args doReadData(packs) } + if ctx.Err() != nil { + return ctx.Err() + } + if errorsFound { return errors.Fatal("repository contains errors") } - Verbosef("no errors were found\n") return nil diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index de3958defa3..ad6c58a2526 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -103,6 +103,9 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] // also consider identical snapshot copies dstSnapshotByOriginal[*sn.ID()] = append(dstSnapshotByOriginal[*sn.ID()], sn) } + if ctx.Err() != nil { + return ctx.Err() + } // remember already processed trees across all snapshots visitedTrees := restic.NewIDSet() @@ -147,7 +150,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] } Verbosef("snapshot %s saved\n", newID.Str()) } - return nil + return ctx.Err() } func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool { diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index e29fe30dc60..77b651c5ed9 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -608,6 +608,9 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args [] for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) { filteredSnapshots = append(filteredSnapshots, sn) } + if ctx.Err() != nil { + return ctx.Err() + } sort.Slice(filteredSnapshots, func(i, j int) bool { return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 9018da21173..92eeed4a174 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -188,6 +188,9 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { snapshots = append(snapshots, sn) } + if ctx.Err() != nil { + return ctx.Err() + } var jsonGroups []*ForgetGroup @@ -270,6 +273,10 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } } + if ctx.Err() != nil { + return ctx.Err() + } + if len(removeSnIDs) > 0 { if !opts.DryRun { bar := printer.NewCounter("files deleted") diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index ea5acddf337..cbec100df05 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -197,6 +197,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption if err != nil { return err } + if ctx.Err() != nil { + return ctx.Err() + } if popts.DryRun { printer.P("\nWould have made the following changes:") diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index f9a4d419d22..cac29a60c3c 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -66,11 +66,17 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { trees[blob.Blob.ID] = false } }) + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("load %d trees\n", len(trees)) bar = newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded") for id := range trees { tree, err := restic.LoadTree(ctx, repo, id) + if ctx.Err() != nil { + return ctx.Err() + } if err != nil { Warnf("unable to load tree %v: %v\n", id.Str(), err) continue diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index 4d9745e1550..b200d100adc 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -145,6 +145,9 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt changedCount++ } } + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("\n") if changedCount == 0 { diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 06d4ddbd177..38a868c5c97 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -294,6 +294,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a changedCount++ } } + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("\n") if changedCount == 0 { diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index 1a9cd2232b2..faa86d3a6bc 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -69,6 +69,9 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { snapshots = append(snapshots, sn) } + if ctx.Err() != nil { + return ctx.Err() + } snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy) if err != nil { return err diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 6bf0dbf1935..2647d78e515 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -117,9 +117,8 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args return fmt.Errorf("error walking snapshot: %v", err) } } - - if err != nil { - return err + if ctx.Err() != nil { + return ctx.Err() } if opts.countMode == countModeRawData { diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index b0d139fa673..3bf386f2cd3 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -122,6 +122,9 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st changeCnt++ } } + if ctx.Err() != nil { + return ctx.Err() + } if changeCnt == 0 { Verbosef("no snapshots were modified\n") } else { diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 146ff3a7ccb..c1f73eea664 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -380,6 +380,7 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult { return res } case <-ctx.Done(): + return futureNodeResult{err: ctx.Err()} } return futureNodeResult{err: errors.Errorf("no result")} } diff --git a/internal/archiver/tree_saver.go b/internal/archiver/tree_saver.go index eae524a7805..9c11b48f060 100644 --- a/internal/archiver/tree_saver.go +++ b/internal/archiver/tree_saver.go @@ -90,6 +90,10 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I // return the error if it wasn't ignored if fnr.err != nil { debug.Log("err for %v: %v", fnr.snPath, fnr.err) + if fnr.err == context.Canceled { + return nil, stats, fnr.err + } + fnr.err = s.errFn(fnr.target, fnr.err) if fnr.err == nil { // ignore error diff --git a/internal/index/master_index.go b/internal/index/master_index.go index 4c114b955d8..9833f9a5510 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -320,6 +320,9 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, exclude newIndex = NewIndex() } } + if wgCtx.Err() != nil { + return wgCtx.Err() + } } err := newIndex.AddToSupersedes(extraObsolete...) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 8900fffaa4f..39eb3031767 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -130,6 +130,9 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g } keepBlobs.Delete(blob.BlobHandle) }) + if ctx.Err() != nil { + return nil, ctx.Err() + } if keepBlobs.Len() < blobCount/2 { // replace with copy to shrink map to necessary size if there's a chance to benefit @@ -166,6 +169,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re usedBlobs[bh] = count } }) + if ctx.Err() != nil { + return nil, nil, ctx.Err() + } // Check if all used blobs have been found in index missingBlobs := restic.NewBlobSet() @@ -240,6 +246,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re // update indexPack indexPack[blob.PackID] = ip }) + if ctx.Err() != nil { + return nil, nil, ctx.Err() + } // if duplicate blobs exist, those will be set to either "used" or "unused": // - mark only one occurrence of duplicate blobs as used @@ -286,6 +295,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re indexPack[blob.PackID] = ip }) } + if ctx.Err() != nil { + return nil, nil, ctx.Err() + } // Sanity check. If no duplicates exist, all blobs have value 1. After handling // duplicates, this also applies to duplicates. @@ -528,6 +540,9 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e printer.P("deleting unreferenced packs\n") _ = deleteFiles(ctx, true, repo, plan.removePacksFirst, restic.PackFile, printer) } + if ctx.Err() != nil { + return ctx.Err() + } if len(plan.repackPacks) != 0 { printer.P("repacking packs\n") @@ -578,6 +593,9 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e printer.P("removing %d old packs\n", len(plan.removePacks)) _ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer) } + if ctx.Err() != nil { + return ctx.Err() + } if plan.opts.UnsafeRecovery { err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) diff --git a/internal/repository/repack.go b/internal/repository/repack.go index 5588984f6f9..53656252a54 100644 --- a/internal/repository/repack.go +++ b/internal/repository/repack.go @@ -72,7 +72,7 @@ func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito return wgCtx.Err() } } - return nil + return wgCtx.Err() }) worker := func() error { diff --git a/internal/repository/repository.go b/internal/repository/repository.go index ae4528d80bc..a4397126673 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -713,6 +713,9 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { return errors.New("index uses feature not supported by repository version 1") } } + if ctx.Err() != nil { + return ctx.Err() + } // remove index files from the cache which have been removed in the repo return r.prepareCache() From 940a3159b5b3bf84ae84c4a1766f6c2359344804 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 5 Apr 2024 22:20:14 +0200 Subject: [PATCH 139/893] let index.Each() and pack.Size() return error on canceled context This forces a caller to actually check that the function did complete. --- cmd/restic/cmd_check.go | 6 ++++- cmd/restic/cmd_find.go | 14 +++++++---- cmd/restic/cmd_list.go | 3 +-- cmd/restic/cmd_recover.go | 6 ++--- cmd/restic/cmd_stats.go | 11 +++++---- cmd/restic/integration_helpers_test.go | 8 +++---- internal/checker/checker.go | 26 ++++++++++++-------- internal/checker/checker_test.go | 3 ++- internal/checker/testing.go | 5 +++- internal/index/index.go | 3 ++- internal/index/index_test.go | 8 +++---- internal/index/master_index.go | 16 +++++++------ internal/index/master_index_test.go | 12 +++++----- internal/pack/pack.go | 6 ++--- internal/repository/prune.go | 32 ++++++++++++++----------- internal/repository/repair_index.go | 5 +++- internal/repository/repair_pack_test.go | 2 +- internal/repository/repository.go | 5 +++- internal/repository/repository_test.go | 4 ++-- internal/restic/repository.go | 4 ++-- 20 files changed, 107 insertions(+), 72 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 38623c3059c..c44edae7e79 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -324,7 +324,11 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } if opts.CheckUnused { - for _, id := range chkr.UnusedBlobs(ctx) { + unused, err := chkr.UnusedBlobs(ctx) + if err != nil { + return err + } + for _, id := range unused { Verbosef("unused blob %v\n", id) errorsFound = true } diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 77b651c5ed9..81df0ab9882 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -439,7 +439,10 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error { if err != errAllPacksFound { // try to resolve unknown pack ids from the index - packIDs = f.indexPacksToBlobs(ctx, packIDs) + packIDs, err = f.indexPacksToBlobs(ctx, packIDs) + if err != nil { + return err + } } if len(packIDs) > 0 { @@ -456,13 +459,13 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error { return nil } -func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) map[string]struct{} { +func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) (map[string]struct{}, error) { wctx, cancel := context.WithCancel(ctx) defer cancel() // remember which packs were found in the index indexPackIDs := make(map[string]struct{}) - f.repo.Index().Each(wctx, func(pb restic.PackedBlob) { + err := f.repo.Index().Each(wctx, func(pb restic.PackedBlob) { idStr := pb.PackID.String() // keep entry in packIDs as Each() returns individual index entries matchingID := false @@ -481,6 +484,9 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc indexPackIDs[idStr] = struct{}{} } }) + if err != nil { + return nil, err + } for id := range indexPackIDs { delete(packIDs, id) @@ -493,7 +499,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc } Warnf("some pack files are missing from the repository, getting their blobs from the repository index: %v\n\n", list) } - return packIDs + return packIDs, nil } func (f *Finder) findObjectPack(id string, t restic.BlobType) { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index a3df0c98f5d..27f59b4ab17 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -59,10 +59,9 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error { if err != nil { return err } - idx.Each(ctx, func(blobs restic.PackedBlob) { + return idx.Each(ctx, func(blobs restic.PackedBlob) { Printf("%v %v\n", blobs.Type, blobs.ID) }) - return nil }) default: return errors.Fatal("invalid type") diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index cac29a60c3c..debaa4e5b5f 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -61,13 +61,13 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { // tree. If it is not referenced, we have a root tree. trees := make(map[restic.ID]bool) - repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err = repo.Index().Each(ctx, func(blob restic.PackedBlob) { if blob.Type == restic.TreeBlob { trees[blob.Blob.ID] = false } }) - if ctx.Err() != nil { - return ctx.Err() + if err != nil { + return err } Verbosef("load %d trees\n", len(trees)) diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 2647d78e515..a7891e5b036 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -351,7 +351,10 @@ func statsDebug(ctx context.Context, repo restic.Repository) error { Warnf("File Type: %v\n%v\n", t, hist) } - hist := statsDebugBlobs(ctx, repo) + hist, err := statsDebugBlobs(ctx, repo) + if err != nil { + return err + } for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { Warnf("Blob Type: %v\n%v\n\n", t, hist[t]) } @@ -369,17 +372,17 @@ func statsDebugFileType(ctx context.Context, repo restic.Lister, tpe restic.File return hist, err } -func statsDebugBlobs(ctx context.Context, repo restic.Repository) [restic.NumBlobTypes]*sizeHistogram { +func statsDebugBlobs(ctx context.Context, repo restic.Repository) ([restic.NumBlobTypes]*sizeHistogram, error) { var hist [restic.NumBlobTypes]*sizeHistogram for i := 0; i < len(hist); i++ { hist[i] = newSizeHistogram(2 * chunker.MaxSize) } - repo.Index().Each(ctx, func(pb restic.PackedBlob) { + err := repo.Index().Each(ctx, func(pb restic.PackedBlob) { hist[pb.Type].Add(uint64(pb.Length)) }) - return hist + return hist, err } type sizeClass struct { diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index c87e1071e71..e7a90dd560a 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -252,11 +252,11 @@ func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet { rtest.OK(t, r.LoadIndex(ctx, nil)) treePacks := restic.NewIDSet() - r.Index().Each(ctx, func(pb restic.PackedBlob) { + rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) { if pb.Type == restic.TreeBlob { treePacks.Insert(pb.PackID) } - }) + })) return treePacks } @@ -280,11 +280,11 @@ func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, rem rtest.OK(t, r.LoadIndex(ctx, nil)) treePacks := restic.NewIDSet() - r.Index().Each(ctx, func(pb restic.PackedBlob) { + rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) { if pb.Type == restic.TreeBlob { treePacks.Insert(pb.PackID) } - }) + })) // remove all packs containing data blobs rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error { diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 28f55ce3ad2..1057341bc73 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -106,9 +106,9 @@ func (c *Checker) LoadSnapshots(ctx context.Context) error { return err } -func computePackTypes(ctx context.Context, idx restic.MasterIndex) map[restic.ID]restic.BlobType { +func computePackTypes(ctx context.Context, idx restic.MasterIndex) (map[restic.ID]restic.BlobType, error) { packs := make(map[restic.ID]restic.BlobType) - idx.Each(ctx, func(pb restic.PackedBlob) { + err := idx.Each(ctx, func(pb restic.PackedBlob) { tpe, exists := packs[pb.PackID] if exists { if pb.Type != tpe { @@ -119,7 +119,7 @@ func computePackTypes(ctx context.Context, idx restic.MasterIndex) map[restic.ID } packs[pb.PackID] = tpe }) - return packs + return packs, err } // LoadIndex loads all index files. @@ -169,7 +169,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e debug.Log("process blobs") cnt := 0 - index.Each(ctx, func(blob restic.PackedBlob) { + err = index.Each(ctx, func(blob restic.PackedBlob) { cnt++ if _, ok := packToIndex[blob.PackID]; !ok { @@ -179,7 +179,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e }) debug.Log("%d blobs processed", cnt) - return nil + return err }) if err != nil { errs = append(errs, err) @@ -193,8 +193,14 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e } // compute pack size using index entries - c.packs = pack.Size(ctx, c.masterIndex, false) - packTypes := computePackTypes(ctx, c.masterIndex) + c.packs, err = pack.Size(ctx, c.masterIndex, false) + if err != nil { + return hints, append(errs, err) + } + packTypes, err := computePackTypes(ctx, c.masterIndex) + if err != nil { + return hints, append(errs, err) + } debug.Log("checking for duplicate packs") for packID := range c.packs { @@ -484,7 +490,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { } // UnusedBlobs returns all blobs that have never been referenced. -func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) { +func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles, err error) { if !c.trackUnused { panic("only works when tracking blob references") } @@ -495,7 +501,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) { ctx, cancel := context.WithCancel(ctx) defer cancel() - c.repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err = c.repo.Index().Each(ctx, func(blob restic.PackedBlob) { h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} if !c.blobRefs.M.Has(h) { debug.Log("blob %v not referenced", h) @@ -503,7 +509,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) { } }) - return blobs + return blobs, err } // CountPacks returns the number of packs in the repository. diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index b0fa4e3e386..9746e9f5d8b 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -180,7 +180,8 @@ func TestUnreferencedBlobs(t *testing.T) { test.OKs(t, checkPacks(chkr)) test.OKs(t, checkStruct(chkr)) - blobs := chkr.UnusedBlobs(context.TODO()) + blobs, err := chkr.UnusedBlobs(context.TODO()) + test.OK(t, err) sort.Sort(blobs) test.Equals(t, unusedBlobsBySnapshot, blobs) diff --git a/internal/checker/testing.go b/internal/checker/testing.go index 9e949af026f..d0014398ff7 100644 --- a/internal/checker/testing.go +++ b/internal/checker/testing.go @@ -43,7 +43,10 @@ func TestCheckRepo(t testing.TB, repo restic.Repository, skipStructure bool) { } // unused blobs - blobs := chkr.UnusedBlobs(context.TODO()) + blobs, err := chkr.UnusedBlobs(context.TODO()) + if err != nil { + t.Error(err) + } if len(blobs) > 0 { t.Errorf("unused blobs found: %v", blobs) } diff --git a/internal/index/index.go b/internal/index/index.go index 73128f7bb21..1c20fe38d60 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -218,7 +218,7 @@ func (idx *Index) AddToSupersedes(ids ...restic.ID) error { // Each passes all blobs known to the index to the callback fn. This blocks any // modification of the index. -func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) { +func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) error { idx.m.Lock() defer idx.m.Unlock() @@ -232,6 +232,7 @@ func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) { return true }) } + return ctx.Err() } type EachByPackResult struct { diff --git a/internal/index/index_test.go b/internal/index/index_test.go index 78e4800cac3..bafd95c4838 100644 --- a/internal/index/index_test.go +++ b/internal/index/index_test.go @@ -339,7 +339,7 @@ func TestIndexUnserialize(t *testing.T) { rtest.Equals(t, oldIdx, idx.Supersedes()) - blobs := listPack(idx, exampleLookupTest.packID) + blobs := listPack(t, idx, exampleLookupTest.packID) if len(blobs) != len(exampleLookupTest.blobs) { t.Fatalf("expected %d blobs in pack, got %d", len(exampleLookupTest.blobs), len(blobs)) } @@ -356,12 +356,12 @@ func TestIndexUnserialize(t *testing.T) { } } -func listPack(idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) { - idx.Each(context.TODO(), func(pb restic.PackedBlob) { +func listPack(t testing.TB, idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) { + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { if pb.PackID.Equal(id) { pbs = append(pbs, pb) } - }) + })) return pbs } diff --git a/internal/index/master_index.go b/internal/index/master_index.go index 9833f9a5510..d99a3434df1 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -223,13 +223,16 @@ func (mi *MasterIndex) finalizeFullIndexes() []*Index { // Each runs fn on all blobs known to the index. When the context is cancelled, // the index iteration return immediately. This blocks any modification of the index. -func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) { +func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) error { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() for _, idx := range mi.idx { - idx.Each(ctx, fn) + if err := idx.Each(ctx, fn); err != nil { + return err + } } + return nil } // MergeFinalIndexes merges all final indexes together. @@ -429,10 +432,6 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan defer close(out) // only resort a part of the index to keep the memory overhead bounded for i := byte(0); i < 16; i++ { - if ctx.Err() != nil { - return - } - packBlob := make(map[restic.ID][]restic.Blob) for pack := range packs { if pack[0]&0xf == i { @@ -442,11 +441,14 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan if len(packBlob) == 0 { continue } - mi.Each(ctx, func(pb restic.PackedBlob) { + err := mi.Each(ctx, func(pb restic.PackedBlob) { if packs.Has(pb.PackID) && pb.PackID[0]&0xf == i { packBlob[pb.PackID] = append(packBlob[pb.PackID], pb.Blob) } }) + if err != nil { + return + } // pass on packs for packID, pbs := range packBlob { diff --git a/internal/index/master_index_test.go b/internal/index/master_index_test.go index dcf6a94f6e9..fe0364c61dd 100644 --- a/internal/index/master_index_test.go +++ b/internal/index/master_index_test.go @@ -166,9 +166,9 @@ func TestMasterMergeFinalIndexes(t *testing.T) { rtest.Equals(t, 1, idxCount) blobCount := 0 - mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { blobCount++ - }) + })) rtest.Equals(t, 2, blobCount) blobs := mIdx.Lookup(bhInIdx1) @@ -198,9 +198,9 @@ func TestMasterMergeFinalIndexes(t *testing.T) { rtest.Equals(t, []restic.PackedBlob{blob2}, blobs) blobCount = 0 - mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { blobCount++ - }) + })) rtest.Equals(t, 2, blobCount) } @@ -319,9 +319,9 @@ func BenchmarkMasterIndexEach(b *testing.B) { for i := 0; i < b.N; i++ { entries := 0 - mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(b, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { entries++ - }) + })) } } diff --git a/internal/pack/pack.go b/internal/pack/pack.go index cd118ab032a..53631a6fb73 100644 --- a/internal/pack/pack.go +++ b/internal/pack/pack.go @@ -389,10 +389,10 @@ func CalculateHeaderSize(blobs []restic.Blob) int { // If onlyHdr is set to true, only the size of the header is returned // Note that this function only gives correct sizes, if there are no // duplicates in the index. -func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.ID]int64 { +func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) (map[restic.ID]int64, error) { packSize := make(map[restic.ID]int64) - mi.Each(ctx, func(blob restic.PackedBlob) { + err := mi.Each(ctx, func(blob restic.PackedBlob) { size, ok := packSize[blob.PackID] if !ok { size = headerSize @@ -403,5 +403,5 @@ func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.I packSize[blob.PackID] = size + int64(CalculateEntrySize(blob.Blob)) }) - return packSize + return packSize, err } diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 39eb3031767..77811e3214f 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -124,14 +124,14 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g blobCount := keepBlobs.Len() // when repacking, we do not want to keep blobs which are // already contained in kept packs, so delete them from keepBlobs - repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err := repo.Index().Each(ctx, func(blob restic.PackedBlob) { if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) { return } keepBlobs.Delete(blob.BlobHandle) }) - if ctx.Err() != nil { - return nil, ctx.Err() + if err != nil { + return nil, err } if keepBlobs.Len() < blobCount/2 { @@ -155,7 +155,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re // iterate over all blobs in index to find out which blobs are duplicates // The counter in usedBlobs describes how many instances of the blob exist in the repository index // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist - idx.Each(ctx, func(blob restic.PackedBlob) { + err := idx.Each(ctx, func(blob restic.PackedBlob) { bh := blob.BlobHandle count, ok := usedBlobs[bh] if ok { @@ -169,8 +169,8 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re usedBlobs[bh] = count } }) - if ctx.Err() != nil { - return nil, nil, ctx.Err() + if err != nil { + return nil, nil, err } // Check if all used blobs have been found in index @@ -194,14 +194,18 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re indexPack := make(map[restic.ID]packInfo) // save computed pack header size - for pid, hdrSize := range pack.Size(ctx, idx, true) { + sz, err := pack.Size(ctx, idx, true) + if err != nil { + return nil, nil, err + } + for pid, hdrSize := range sz { // initialize tpe with NumBlobTypes to indicate it's not set indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)} } hasDuplicates := false // iterate over all blobs in index to generate packInfo - idx.Each(ctx, func(blob restic.PackedBlob) { + err = idx.Each(ctx, func(blob restic.PackedBlob) { ip := indexPack[blob.PackID] // Set blob type if not yet set @@ -246,8 +250,8 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re // update indexPack indexPack[blob.PackID] = ip }) - if ctx.Err() != nil { - return nil, nil, ctx.Err() + if err != nil { + return nil, nil, err } // if duplicate blobs exist, those will be set to either "used" or "unused": @@ -256,7 +260,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re // - if there are no used blobs in a pack, possibly mark duplicates as "unused" if hasDuplicates { // iterate again over all blobs in index (this is pretty cheap, all in-mem) - idx.Each(ctx, func(blob restic.PackedBlob) { + err = idx.Each(ctx, func(blob restic.PackedBlob) { bh := blob.BlobHandle count, ok := usedBlobs[bh] // skip non-duplicate, aka. normal blobs @@ -294,9 +298,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re // update indexPack indexPack[blob.PackID] = ip }) - } - if ctx.Err() != nil { - return nil, nil, ctx.Err() + if err != nil { + return nil, nil, err + } } // Sanity check. If no duplicates exist, all blobs have value 1. After handling diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index 63e10413278..a6e732b4452 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -54,7 +54,10 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, if err != nil { return err } - packSizeFromIndex = pack.Size(ctx, repo.Index(), false) + packSizeFromIndex, err = pack.Size(ctx, repo.Index(), false) + if err != nil { + return err + } } printer.P("getting pack files to read...\n") diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index c5cdf5ed52e..078017d213f 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -17,7 +17,7 @@ import ( func listBlobs(repo restic.Repository) restic.BlobSet { blobs := restic.NewBlobSet() - repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) { + _ = repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) { blobs.Insert(pb.BlobHandle) }) return blobs diff --git a/internal/repository/repository.go b/internal/repository/repository.go index a4397126673..cac1551c441 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -704,11 +704,14 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { defer cancel() invalidIndex := false - r.idx.Each(ctx, func(blob restic.PackedBlob) { + err := r.idx.Each(ctx, func(blob restic.PackedBlob) { if blob.IsCompressed() { invalidIndex = true } }) + if err != nil { + return err + } if invalidIndex { return errors.New("index uses feature not supported by repository version 1") } diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index b013c482362..48a56a1fd51 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -370,13 +370,13 @@ func testRepositoryIncrementalIndex(t *testing.T, version uint) { idx, err := loadIndex(context.TODO(), repo, id) rtest.OK(t, err) - idx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { if _, ok := packEntries[pb.PackID]; !ok { packEntries[pb.PackID] = make(map[restic.ID]struct{}) } packEntries[pb.PackID][id] = struct{}{} - }) + })) return nil }) if err != nil { diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 89c54ffbb1b..7a3389e00d0 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -103,8 +103,8 @@ type MasterIndex interface { Lookup(BlobHandle) []PackedBlob // Each runs fn on all blobs known to the index. When the context is cancelled, - // the index iteration return immediately. This blocks any modification of the index. - Each(ctx context.Context, fn func(PackedBlob)) + // the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. + Each(ctx context.Context, fn func(PackedBlob)) error ListPacks(ctx context.Context, packs IDSet) <-chan PackBlobs Save(ctx context.Context, repo Repository, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error From 484dbb1cf49d6348ff0d35e0bf88126e851e63ba Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 30 Mar 2024 14:28:59 +0100 Subject: [PATCH 140/893] get rid of a few global variables --- cmd/restic/global.go | 2 +- cmd/restic/main.go | 3 +-- helpers/prepare-release/main.go | 6 +++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 5b21871dc5b..40083ad6982 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -43,7 +43,7 @@ import ( "golang.org/x/term" ) -var version = "0.16.4-dev (compiled manually)" +const version = "0.16.4-dev (compiled manually)" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 82517c31aff..e847b815674 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -94,8 +94,6 @@ func needsPassword(cmd string) bool { } } -var logBuffer = bytes.NewBuffer(nil) - func tweakGoGC() { // lower GOGC from 100 to 50, unless it was manually overwritten by the user oldValue := godebug.SetGCPercent(50) @@ -108,6 +106,7 @@ func main() { tweakGoGC() // install custom global logger into a buffer, if an error occurs // we can show the logs + logBuffer := bytes.NewBuffer(nil) log.SetOutput(logBuffer) err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES"), func(s string) { diff --git a/helpers/prepare-release/main.go b/helpers/prepare-release/main.go index baf8aa2baeb..703d85e7007 100644 --- a/helpers/prepare-release/main.go +++ b/helpers/prepare-release/main.go @@ -303,7 +303,7 @@ func generateFiles() { } } -var versionPattern = `var version = ".*"` +var versionPattern = `const version = ".*"` const versionCodeFile = "cmd/restic/global.go" @@ -313,7 +313,7 @@ func updateVersion() { die("unable to write version to file: %v", err) } - newVersion := fmt.Sprintf("var version = %q", opts.Version) + newVersion := fmt.Sprintf("const version = %q", opts.Version) replace(versionCodeFile, versionPattern, newVersion) if len(uncommittedChanges("VERSION")) > 0 || len(uncommittedChanges(versionCodeFile)) > 0 { @@ -323,7 +323,7 @@ func updateVersion() { } func updateVersionDev() { - newVersion := fmt.Sprintf(`var version = "%s-dev (compiled manually)"`, opts.Version) + newVersion := fmt.Sprintf(`const version = "%s-dev (compiled manually)"`, opts.Version) replace(versionCodeFile, versionPattern, newVersion) msg("committing cmd/restic/global.go with dev version") From 5f263752d7f41bbec8e889e3e9bb27c5a20ad2ff Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 24 Apr 2024 20:42:30 +0200 Subject: [PATCH 141/893] init: also apply limiter for non-HTTP backend --- cmd/restic/global.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index c93fb4bce29..eded479ada6 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -598,7 +598,7 @@ func innerOpen(ctx context.Context, s string, gopts GlobalOptions, opts options. var be backend.Backend if create { - be, err = factory.Create(ctx, cfg, rt, nil) + be, err = factory.Create(ctx, cfg, rt, lim) } else { be, err = factory.Open(ctx, cfg, rt, lim) } From 871ea1eaf3dcd8da4641401425a10953d8fbcd27 Mon Sep 17 00:00:00 2001 From: Altan Orhon Date: Mon, 18 Mar 2024 13:20:45 -0700 Subject: [PATCH 142/893] Add support for specifying --host via environment variable This commit adds support for specifying the `--host` option via the `RESTIC_HOST` environment variable. This is done by extending option processing in `cmd_backup.go` and for `restic.SnapshotFilter` in `find.go`. --- changelog/unreleased/issue-4733 | 9 +++++++++ cmd/restic/cmd_backup.go | 7 ++++++- cmd/restic/find.go | 19 +++++++++++++++++-- 3 files changed, 32 insertions(+), 3 deletions(-) create mode 100644 changelog/unreleased/issue-4733 diff --git a/changelog/unreleased/issue-4733 b/changelog/unreleased/issue-4733 new file mode 100644 index 00000000000..250c2ba6899 --- /dev/null +++ b/changelog/unreleased/issue-4733 @@ -0,0 +1,9 @@ +Enhancement: Allow specifying `--host` via environment variable + +Restic commands that operate on snapshots, such as `restic backup` and +`restic snapshots`, support the `--host` flag to specify the hostname for +grouoping snapshots. They now permit selecting the hostname via the +environment variable `RESTIC_HOST`. `--host` still takes precedence over the +environment variable. + +https://github.com/restic/restic/issues/4733 \ No newline at end of file diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 8b2f1f8083e..d3e5a8546af 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -114,7 +114,7 @@ func init() { f.BoolVar(&backupOptions.StdinCommand, "stdin-from-command", false, "interpret arguments as command to execute and store its stdout") f.Var(&backupOptions.Tags, "tag", "add `tags` for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times)") f.UintVar(&backupOptions.ReadConcurrency, "read-concurrency", 0, "read `n` files concurrently (default: $RESTIC_READ_CONCURRENCY or 2)") - f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag") + f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually (default: $RESTIC_HOST). To prevent an expensive rescan use the \"parent\" flag") f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually") err := f.MarkDeprecated("hostname", "use --host") if err != nil { @@ -137,6 +137,11 @@ func init() { // parse read concurrency from env, on error the default value will be used readConcurrency, _ := strconv.ParseUint(os.Getenv("RESTIC_READ_CONCURRENCY"), 10, 32) backupOptions.ReadConcurrency = uint(readConcurrency) + + // parse host from env, if not exists or empty the default value will be used + if host := os.Getenv("RESTIC_HOST"); host != "" { + backupOptions.Host = host + } } // filterExisting returns a slice of all existing items, or an error if no diff --git a/cmd/restic/find.go b/cmd/restic/find.go index a990b458d6f..7c28b3be5d1 100644 --- a/cmd/restic/find.go +++ b/cmd/restic/find.go @@ -2,6 +2,7 @@ package main import ( "context" + "os" "github.com/restic/restic/internal/restic" "github.com/spf13/pflag" @@ -14,17 +15,31 @@ func initMultiSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter, if !addHostShorthand { hostShorthand = "" } - flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times)") + flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times) (default: $RESTIC_HOST)") flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)") flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)") + + if len(filt.Hosts) == 0 { + // parse host from env, if not exists or empty the default value will be used + if host := os.Getenv("RESTIC_HOST"); host != "" { + filt.Hosts = []string{host} + } + } } // initSingleSnapshotFilter is used for commands that work on a single snapshot // MUST be combined with restic.FindFilteredSnapshot func initSingleSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter) { - flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times)") + flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times) (default: $RESTIC_HOST)") flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)") flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)") + + if len(filt.Hosts) == 0 { + // parse host from env, if not exists or empty the default value will be used + if host := os.Getenv("RESTIC_HOST"); host != "" { + filt.Hosts = []string{host} + } + } } // FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots. From 347e9d07657f358c297a21aabb6224c38502a08f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 24 Apr 2024 21:49:34 +0200 Subject: [PATCH 143/893] complete RESITC_HOST environment handling & test --- changelog/unreleased/issue-4733 | 3 +- cmd/restic/find.go | 16 ++++----- cmd/restic/find_test.go | 61 +++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 11 deletions(-) create mode 100644 cmd/restic/find_test.go diff --git a/changelog/unreleased/issue-4733 b/changelog/unreleased/issue-4733 index 250c2ba6899..1fc2715870e 100644 --- a/changelog/unreleased/issue-4733 +++ b/changelog/unreleased/issue-4733 @@ -6,4 +6,5 @@ grouoping snapshots. They now permit selecting the hostname via the environment variable `RESTIC_HOST`. `--host` still takes precedence over the environment variable. -https://github.com/restic/restic/issues/4733 \ No newline at end of file +https://github.com/restic/restic/issues/4733 +https://github.com/restic/restic/pull/4734 diff --git a/cmd/restic/find.go b/cmd/restic/find.go index 7c28b3be5d1..c7754d5d9bf 100644 --- a/cmd/restic/find.go +++ b/cmd/restic/find.go @@ -19,11 +19,9 @@ func initMultiSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter, flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)") flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)") - if len(filt.Hosts) == 0 { - // parse host from env, if not exists or empty the default value will be used - if host := os.Getenv("RESTIC_HOST"); host != "" { - filt.Hosts = []string{host} - } + // set default based on env if set + if host := os.Getenv("RESTIC_HOST"); host != "" { + filt.Hosts = []string{host} } } @@ -34,11 +32,9 @@ func initSingleSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter) flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)") flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)") - if len(filt.Hosts) == 0 { - // parse host from env, if not exists or empty the default value will be used - if host := os.Getenv("RESTIC_HOST"); host != "" { - filt.Hosts = []string{host} - } + // set default based on env if set + if host := os.Getenv("RESTIC_HOST"); host != "" { + filt.Hosts = []string{host} } } diff --git a/cmd/restic/find_test.go b/cmd/restic/find_test.go new file mode 100644 index 00000000000..a98a14f041d --- /dev/null +++ b/cmd/restic/find_test.go @@ -0,0 +1,61 @@ +package main + +import ( + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/spf13/pflag" +) + +func TestSnapshotFilter(t *testing.T) { + for _, test := range []struct { + name string + args []string + expected []string + env string + }{ + { + "no value", + []string{}, + nil, + "", + }, + { + "args only", + []string{"--host", "abc"}, + []string{"abc"}, + "", + }, + { + "env default", + []string{}, + []string{"def"}, + "def", + }, + { + "both", + []string{"--host", "abc"}, + []string{"abc"}, + "def", + }, + } { + t.Run(test.name, func(t *testing.T) { + t.Setenv("RESTIC_HOST", test.env) + + for _, mode := range []bool{false, true} { + set := pflag.NewFlagSet("test", pflag.PanicOnError) + flt := &restic.SnapshotFilter{} + if mode { + initMultiSnapshotFilter(set, flt, false) + } else { + initSingleSnapshotFilter(set, flt) + } + err := set.Parse(test.args) + rtest.OK(t, err) + + rtest.Equals(t, test.expected, flt.Hosts, "unexpected hosts") + } + }) + } +} From 669a66960330cd2d822b4c83f7d3f1a8ae536013 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 28 Apr 2024 11:48:26 +0200 Subject: [PATCH 144/893] sftp: Fix upload performance issue Since pkg/sftp 1.13.0 files were uploaded sequentially using 32kb chunks instead of sending 64 chunks in parallel. --- internal/backend/sftp/sftp.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index 0a94e4aa3c9..389abab672c 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -102,7 +102,10 @@ func startClient(cfg Config) (*SFTP, error) { }() // open the SFTP session - client, err := sftp.NewClientPipe(rd, wr) + client, err := sftp.NewClientPipe(rd, wr, + // write multiple packets (32kb) in parallel per file + // not strictly necessary as we use ReadFromWithConcurrency + sftp.UseConcurrentWrites(true)) if err != nil { return nil, errors.Errorf("unable to start the sftp session, error: %v", err) } @@ -359,7 +362,7 @@ func (r *SFTP) Save(_ context.Context, h backend.Handle, rd backend.RewindReader }() // save data, make sure to use the optimized sftp upload method - wbytes, err := f.ReadFrom(rd) + wbytes, err := f.ReadFromWithConcurrency(rd, 0) if err != nil { _ = f.Close() err = r.checkNoSpace(dirname, rd.Length(), err) From 935327d480507d4bf88d548555cb3e2ac07677a5 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 28 Apr 2024 11:50:09 +0200 Subject: [PATCH 145/893] sftp: slightly increase write concurrency This should increase upload throughput for high latency links a bit. --- internal/backend/sftp/sftp.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index 389abab672c..efd66f76cb8 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -105,7 +105,9 @@ func startClient(cfg Config) (*SFTP, error) { client, err := sftp.NewClientPipe(rd, wr, // write multiple packets (32kb) in parallel per file // not strictly necessary as we use ReadFromWithConcurrency - sftp.UseConcurrentWrites(true)) + sftp.UseConcurrentWrites(true), + // increase send buffer per file to 4MB + sftp.MaxConcurrentRequestsPerFile(128)) if err != nil { return nil, errors.Errorf("unable to start the sftp session, error: %v", err) } From a1d682ce0e31375e39478a374328987f7e256eea Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 28 Apr 2024 11:58:08 +0200 Subject: [PATCH 146/893] add changelog for sftp performance fix --- changelog/unreleased/issue-4209 | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 changelog/unreleased/issue-4209 diff --git a/changelog/unreleased/issue-4209 b/changelog/unreleased/issue-4209 new file mode 100644 index 00000000000..2e49191c1e5 --- /dev/null +++ b/changelog/unreleased/issue-4209 @@ -0,0 +1,7 @@ +Bugfix: Fix slow sftp upload performance + +Since restic 0.12.1, the upload speed of the sftp backend to a remote server +has regressed significantly. This has been fixed. + +https://github.com/restic/restic/issues/4209 +https://github.com/restic/restic/pull/4782 From 78dbc5ec5871a9995a25abe7c091fc1a9f104417 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 03:41:02 +0300 Subject: [PATCH 147/893] vss: Add initial support for extended options --- cmd/restic/cmd_backup.go | 13 +++++++++++-- internal/fs/fs_local_vss.go | 30 +++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index d3e5a8546af..5329a928cab 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -445,7 +445,16 @@ func findParentSnapshot(ctx context.Context, repo restic.ListerLoaderUnpacked, o } func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { - err := opts.Check(gopts, args) + var vsscfg fs.VSSConfig + var err error + + if runtime.GOOS == "windows" { + if vsscfg, err = fs.ParseVSSConfig(gopts.extended); err != nil { + return err + } + } + + err = opts.Check(gopts, args) if err != nil { return err } @@ -557,7 +566,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } } - localVss := fs.NewLocalVss(errorHandler, messageHandler) + localVss := fs.NewLocalVss(errorHandler, messageHandler, vsscfg) defer localVss.DeleteSnapshots() targetFS = localVss } diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index aa3522aea5b..f68e2ff28de 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -3,12 +3,40 @@ package fs import ( "os" "path/filepath" + "runtime" "strings" "sync" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" ) +// VSSConfig holds extended options of windows volume shadow copy service. +type VSSConfig struct { +} + +func init() { + if runtime.GOOS == "windows" { + options.Register("vss", VSSConfig{}) + } +} + +// NewVSSConfig returns a new VSSConfig with the default values filled in. +func NewVSSConfig() VSSConfig { + return VSSConfig{} +} + +// ParseVSSConfig parses a VSS extended options to VSSConfig struct. +func ParseVSSConfig(o options.Options) (VSSConfig, error) { + cfg := NewVSSConfig() + o = o.Extract("vss") + if err := o.Apply("vss", &cfg); err != nil { + return VSSConfig{}, err + } + + return cfg, nil +} + // ErrorHandler is used to report errors via callback type ErrorHandler func(item string, err error) error @@ -31,7 +59,7 @@ var _ FS = &LocalVss{} // NewLocalVss creates a new wrapper around the windows filesystem using volume // shadow copy service to access locked files. -func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler) *LocalVss { +func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig) *LocalVss { return &LocalVss{ FS: Local{}, snapshots: make(map[string]VssSnapshot), From 7470e5356e04424cb891bd6cdea9071a005f30dd Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 03:41:02 +0300 Subject: [PATCH 148/893] vss: Add "timeout" option Changing multiple "callAsyncFunctionAndWait" with fixed timeout to calculated timeout based on deadline. --- internal/fs/fs_local_vss.go | 10 ++++++++-- internal/fs/vss.go | 4 +++- internal/fs/vss_windows.go | 37 +++++++++++++++++++++++++------------ 3 files changed, 36 insertions(+), 15 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index f68e2ff28de..1f600178210 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -6,6 +6,7 @@ import ( "runtime" "strings" "sync" + "time" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/options" @@ -13,6 +14,7 @@ import ( // VSSConfig holds extended options of windows volume shadow copy service. type VSSConfig struct { + Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshots before timing out"` } func init() { @@ -23,7 +25,9 @@ func init() { // NewVSSConfig returns a new VSSConfig with the default values filled in. func NewVSSConfig() VSSConfig { - return VSSConfig{} + return VSSConfig{ + Timeout: time.Second * 120, + } } // ParseVSSConfig parses a VSS extended options to VSSConfig struct. @@ -52,6 +56,7 @@ type LocalVss struct { mutex sync.RWMutex msgError ErrorHandler msgMessage MessageHandler + timeout time.Duration } // statically ensure that LocalVss implements FS. @@ -66,6 +71,7 @@ func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig failedSnapshots: make(map[string]struct{}), msgError: msgError, msgMessage: msgMessage, + timeout: cfg.Timeout, } } @@ -144,7 +150,7 @@ func (fs *LocalVss) snapshotPath(path string) string { vssVolume := volumeNameLower + string(filepath.Separator) fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) - if snapshot, err := NewVssSnapshot(vssVolume, 120, fs.msgError); err != nil { + if snapshot, err := NewVssSnapshot(vssVolume, fs.timeout, fs.msgError); err != nil { _ = fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", vssVolume, err)) fs.failedSnapshots[volumeNameLower] = struct{}{} diff --git a/internal/fs/vss.go b/internal/fs/vss.go index 5f0ea36d995..92143883df7 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -4,6 +4,8 @@ package fs import ( + "time" + "github.com/restic/restic/internal/errors" ) @@ -34,7 +36,7 @@ func HasSufficientPrivilegesForVSS() error { // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. func NewVssSnapshot( - _ string, _ uint, _ ErrorHandler) (VssSnapshot, error) { + _ string, _ time.Duration, _ ErrorHandler) (VssSnapshot, error) { return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows") } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index d75567d25ac..4e7f103850b 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -9,6 +9,7 @@ import ( "runtime" "strings" "syscall" + "time" "unsafe" ole "github.com/go-ole/go-ole" @@ -617,8 +618,13 @@ func (vssAsync *IVSSAsync) QueryStatus() (HRESULT, uint32) { // WaitUntilAsyncFinished waits until either the async call is finished or // the given timeout is reached. -func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(millis uint32) error { - hresult := vssAsync.Wait(millis) +func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error { + const maxTimeout = 2147483647 * time.Millisecond + if timeout > maxTimeout { + timeout = maxTimeout + } + + hresult := vssAsync.Wait(uint32(timeout.Milliseconds())) err := newVssErrorIfResultNotOK("Wait() failed", hresult) if err != nil { vssAsync.Cancel() @@ -677,7 +683,7 @@ type VssSnapshot struct { snapshotProperties VssSnapshotProperties snapshotDeviceObject string mountPointInfo map[string]MountPoint - timeoutInMillis uint32 + timeout time.Duration } // GetSnapshotDeviceObject returns root path to access the snapshot files @@ -730,7 +736,7 @@ func HasSufficientPrivilegesForVSS() error { // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. func NewVssSnapshot( - volume string, timeoutInSeconds uint, msgError ErrorHandler) (VssSnapshot, error) { + volume string, timeout time.Duration, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() if err != nil { @@ -744,7 +750,7 @@ func NewVssSnapshot( runtime.GOARCH)) } - timeoutInMillis := uint32(timeoutInSeconds * 1000) + deadline := time.Now().Add(timeout) oleIUnknown, err := initializeVssCOMInterface() if oleIUnknown != nil { @@ -796,7 +802,7 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.GatherWriterMetadata, - "GatherWriterMetadata", timeoutInMillis) + "GatherWriterMetadata", deadline) if err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err @@ -854,7 +860,7 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.PrepareForBackup, "PrepareForBackup", - timeoutInMillis) + deadline) if err != nil { // After calling PrepareForBackup one needs to call AbortBackup() before releasing the VSS // instance for proper cleanup. @@ -865,7 +871,7 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.DoSnapshotSet, "DoSnapshotSet", - timeoutInMillis) + deadline) if err != nil { iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() @@ -901,7 +907,7 @@ func NewVssSnapshot( } return VssSnapshot{iVssBackupComponents, snapshotSetID, snapshotProperties, - snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, timeoutInMillis}, nil + snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline)}, nil } // Delete deletes the created snapshot. @@ -922,8 +928,10 @@ func (p *VssSnapshot) Delete() error { if p.iVssBackupComponents != nil { defer p.iVssBackupComponents.Release() + deadline := time.Now().Add(p.timeout) + err = callAsyncFunctionAndWait(p.iVssBackupComponents.BackupComplete, "BackupComplete", - p.timeoutInMillis) + deadline) if err != nil { return err } @@ -945,7 +953,7 @@ type asyncCallFunc func() (*IVSSAsync, error) // callAsyncFunctionAndWait calls an async functions and waits for it to either // finish or timeout. -func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMillis uint32) error { +func callAsyncFunctionAndWait(function asyncCallFunc, name string, deadline time.Time) error { iVssAsync, err := function() if err != nil { return err @@ -955,7 +963,12 @@ func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMill return newVssTextError(fmt.Sprintf("%s() returned nil", name)) } - err = iVssAsync.WaitUntilAsyncFinished(timeoutInMillis) + timeout := time.Until(deadline) + if timeout <= 0 { + return newVssTextError(fmt.Sprintf("%s() deadline exceeded", name)) + } + + err = iVssAsync.WaitUntilAsyncFinished(timeout) iVssAsync.Release() return err } From c4f67c00644ebb344b08e8038868674f7fee0981 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 03:41:02 +0300 Subject: [PATCH 149/893] vss: Add volume filtering Add options to exclude all mountpoints and arbitrary volumes from snapshotting. --- internal/fs/fs_local_vss.go | 113 +++++++++++++++++++++++++++--------- internal/fs/vss.go | 8 ++- internal/fs/vss_windows.go | 78 +++++++++++++++++-------- 3 files changed, 148 insertions(+), 51 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 1f600178210..0e73092f237 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -14,7 +14,9 @@ import ( // VSSConfig holds extended options of windows volume shadow copy service. type VSSConfig struct { - Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshots before timing out"` + ExcludeAllMountPoints bool `option:"excludeallmountpoints" help:"exclude mountpoints from snapshotting on all volumes"` + ExcludeVolumes string `option:"excludevolumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` + Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"` } func init() { @@ -47,31 +49,59 @@ type ErrorHandler func(item string, err error) error // MessageHandler is used to report errors/messages via callbacks. type MessageHandler func(msg string, args ...interface{}) +// VolumeFilter is used to filter volumes by it's mount point or GUID path. +type VolumeFilter func(volume string) bool + // LocalVss is a wrapper around the local file system which uses windows volume // shadow copy service (VSS) in a transparent way. type LocalVss struct { FS - snapshots map[string]VssSnapshot - failedSnapshots map[string]struct{} - mutex sync.RWMutex - msgError ErrorHandler - msgMessage MessageHandler - timeout time.Duration + snapshots map[string]VssSnapshot + failedSnapshots map[string]struct{} + mutex sync.RWMutex + msgError ErrorHandler + msgMessage MessageHandler + excludeAllMountPoints bool + excludeVolumes map[string]struct{} + timeout time.Duration } // statically ensure that LocalVss implements FS. var _ FS = &LocalVss{} +// parseMountPoints try to convert semicolon separated list of mount points +// to map of lowercased volume GUID pathes. Mountpoints already in volume +// GUID path format will be validated and normalized. +func parseMountPoints(list string, msgError ErrorHandler) (volumes map[string]struct{}) { + if list == "" { + return + } + for _, s := range strings.Split(list, ";") { + if v, err := GetVolumeNameForVolumeMountPoint(s); err != nil { + msgError(s, errors.Errorf("failed to parse vss.excludevolumes [%s]: %s", s, err)) + } else { + if volumes == nil { + volumes = make(map[string]struct{}) + } + volumes[strings.ToLower(v)] = struct{}{} + } + } + + return +} + // NewLocalVss creates a new wrapper around the windows filesystem using volume // shadow copy service to access locked files. func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig) *LocalVss { return &LocalVss{ - FS: Local{}, - snapshots: make(map[string]VssSnapshot), - failedSnapshots: make(map[string]struct{}), - msgError: msgError, - msgMessage: msgMessage, - timeout: cfg.Timeout, + FS: Local{}, + snapshots: make(map[string]VssSnapshot), + failedSnapshots: make(map[string]struct{}), + msgError: msgError, + msgMessage: msgMessage, + excludeAllMountPoints: cfg.ExcludeAllMountPoints, + excludeVolumes: parseMountPoints(cfg.ExcludeVolumes, msgError), + timeout: cfg.Timeout, } } @@ -112,6 +142,24 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { return os.Lstat(fs.snapshotPath(name)) } +// isMountPointExcluded is true if given mountpoint excluded by user. +func (fs *LocalVss) isMountPointExcluded(mountPoint string) bool { + if fs.excludeVolumes == nil { + return false + } + + volume, err := GetVolumeNameForVolumeMountPoint(mountPoint) + if err != nil { + fs.msgError(mountPoint, errors.Errorf("failed to get volume from mount point [%s]: %s", mountPoint, err)) + + return false + } + + _, ok := fs.excludeVolumes[strings.ToLower(volume)] + + return ok +} + // snapshotPath returns the path inside a VSS snapshots if it already exists. // If the path is not yet available as a snapshot, a snapshot is created. // If creation of a snapshot fails the file's original path is returned as @@ -148,23 +196,36 @@ func (fs *LocalVss) snapshotPath(path string) string { if !snapshotExists && !snapshotFailed { vssVolume := volumeNameLower + string(filepath.Separator) - fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) - if snapshot, err := NewVssSnapshot(vssVolume, fs.timeout, fs.msgError); err != nil { - _ = fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", - vssVolume, err)) + if fs.isMountPointExcluded(vssVolume) { + fs.msgMessage("snapshots for [%s] excluded by user\n", vssVolume) fs.failedSnapshots[volumeNameLower] = struct{}{} } else { - fs.snapshots[volumeNameLower] = snapshot - fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume) - if len(snapshot.mountPointInfo) > 0 { - fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume) - for mp, mpInfo := range snapshot.mountPointInfo { - info := "" - if !mpInfo.IsSnapshotted() { - info = " (not snapshotted)" + fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) + + var filter VolumeFilter + if !fs.excludeAllMountPoints { + filter = func(volume string) bool { + return !fs.isMountPointExcluded(volume) + } + } + + if snapshot, err := NewVssSnapshot(vssVolume, fs.timeout, filter, fs.msgError); err != nil { + fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", + vssVolume, err)) + fs.failedSnapshots[volumeNameLower] = struct{}{} + } else { + fs.snapshots[volumeNameLower] = snapshot + fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume) + if len(snapshot.mountPointInfo) > 0 { + fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume) + for mp, mpInfo := range snapshot.mountPointInfo { + info := "" + if !mpInfo.IsSnapshotted() { + info = " (not snapshotted)" + } + fs.msgMessage(" - %s%s\n", mp, info) } - fs.msgMessage(" - %s%s\n", mp, info) } } } diff --git a/internal/fs/vss.go b/internal/fs/vss.go index 92143883df7..838bdf79bb3 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -33,10 +33,16 @@ func HasSufficientPrivilegesForVSS() error { return errors.New("VSS snapshots are only supported on windows") } +// GetVolumeNameForVolumeMountPoint clear input parameter +// and calls the equivalent windows api. +func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { + return mountPoint, nil +} + // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. func NewVssSnapshot( - _ string, _ time.Duration, _ ErrorHandler) (VssSnapshot, error) { + _ string, _ time.Duration, _ VolumeFilter, _ ErrorHandler) (VssSnapshot, error) { return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows") } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 4e7f103850b..4ed28936620 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -733,10 +733,33 @@ func HasSufficientPrivilegesForVSS() error { return err } +// GetVolumeNameForVolumeMountPoint clear input parameter +// and calls the equivalent windows api. +func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { + if mountPoint != "" && mountPoint[len(mountPoint)-1] != filepath.Separator { + mountPoint += string(filepath.Separator) + } + + mountPointPointer, err := syscall.UTF16PtrFromString(mountPoint) + if err != nil { + return mountPoint, err + } + + // A reasonable size for the buffer to accommodate the largest possible + // volume GUID path is 50 characters. + volumeNameBuffer := make([]uint16, 50) + if err := windows.GetVolumeNameForVolumeMountPoint( + mountPointPointer, &volumeNameBuffer[0], 50); err != nil { + return mountPoint, err + } + + return syscall.UTF16ToString(volumeNameBuffer), nil +} + // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. func NewVssSnapshot( - volume string, timeout time.Duration, msgError ErrorHandler) (VssSnapshot, error) { + volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() if err != nil { @@ -828,35 +851,42 @@ func NewVssSnapshot( return VssSnapshot{}, err } - mountPoints, err := enumerateMountedFolders(volume) - if err != nil { - iVssBackupComponents.Release() - return VssSnapshot{}, newVssTextError(fmt.Sprintf( - "failed to enumerate mount points for volume %s: %s", volume, err)) - } - mountPointInfo := make(map[string]MountPoint) - for _, mountPoint := range mountPoints { - // ensure every mountpoint is available even without a valid - // snapshot because we need to consider this when backing up files - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false} - - if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil { - continue - } else if !isSupported { - continue - } - - var mountPointSnapshotSetID ole.GUID - err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID) + // if filter==nil just don't process mount points for this volume at all + if filter != nil { + mountPoints, err := enumerateMountedFolders(volume) if err != nil { iVssBackupComponents.Release() - return VssSnapshot{}, err + + return VssSnapshot{}, newVssTextError(fmt.Sprintf( + "failed to enumerate mount points for volume %s: %s", volume, err)) } - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true, - snapshotSetID: mountPointSnapshotSetID} + for _, mountPoint := range mountPoints { + // ensure every mountpoint is available even without a valid + // snapshot because we need to consider this when backing up files + mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false} + + if !filter(mountPoint) { + continue + } else if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil { + continue + } else if !isSupported { + continue + } + + var mountPointSnapshotSetID ole.GUID + err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID) + if err != nil { + iVssBackupComponents.Release() + + return VssSnapshot{}, err + } + + mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true, + snapshotSetID: mountPointSnapshotSetID} + } } err = callAsyncFunctionAndWait(iVssBackupComponents.PrepareForBackup, "PrepareForBackup", From 9182e6bab55c87703d11987c19b9fdb463aa6a74 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 06:18:30 +0300 Subject: [PATCH 150/893] vss: Update docs and changelog --- changelog/unreleased/pull-3067 | 18 ++++++++++++++++++ doc/040_backup.rst | 18 ++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 changelog/unreleased/pull-3067 diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 new file mode 100644 index 00000000000..a56c045faa3 --- /dev/null +++ b/changelog/unreleased/pull-3067 @@ -0,0 +1,18 @@ +Enhancement: Add options to configure Windows Shadow Copy Service + +Restic always used 120 sec. timeout and unconditionally created VSS snapshots +for all volume mount points on disk. Now this behavior can be fine-tuned by +new options, like exclude user specific volumes and mount points or completely +disable auto snapshotting of volume mount points. + +For example: + + restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.excludeallmountpoints=true + +changes timeout to five minutes and disable snapshotting of mount points on all volumes, and + + restic backup --use-fs-snapshot -o vss.excludevolumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" + +excludes drive `D:`, mount point `C:\MNT` and specific volume from VSS snapshotting. + +https://github.com/restic/restic/pull/3067 diff --git a/doc/040_backup.rst b/doc/040_backup.rst index d0bd4b2e2c6..d1bb39f968e 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -56,6 +56,24 @@ snapshot for each volume that contains files to backup. Files are read from the VSS snapshot instead of the regular filesystem. This allows to backup files that are exclusively locked by another process during the backup. +You can use three additional options to change VSS behaviour: + + * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds + * ``-o vss.excludeallmountpoints`` disable auto snapshotting of all volume mount points + * ``-o vss.excludevolumes`` allows excluding specific volumes or volume mount points from snapshotting + +E.g., 2.5 minutes timeout with mount points snapshotting disabled can be specified as + +.. code-block:: console + + -o vss.timeout=2m30s -o vss.excludeallmountpoints=true + +and excluding drive ``D:\``, mount point ``C:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as + +.. code-block:: console + + -o vss.excludevolumes="d:;c:\MNT\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" + By default VSS ignores Outlook OST files. This is not a restriction of restic but the default Windows VSS configuration. The files not to snapshot are configured in the Windows registry under the following key: From 9d3d915e2c9a349630ac9d9272962b3ef801db39 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 22:36:48 +0300 Subject: [PATCH 151/893] vss: Add some tests --- internal/fs/fs_local_vss_test.go | 211 +++++++++++++++++++++++++++++++ 1 file changed, 211 insertions(+) create mode 100644 internal/fs/fs_local_vss_test.go diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go new file mode 100644 index 00000000000..ce07fee3c81 --- /dev/null +++ b/internal/fs/fs_local_vss_test.go @@ -0,0 +1,211 @@ +// +build windows + +package fs + +import ( + "fmt" + "regexp" + "strings" + "testing" + "time" + + "github.com/restic/restic/internal/options" +) + +func matchStrings(ptrs []string, strs []string) bool { + if len(ptrs) != len(strs) { + return false + } + + for i, p := range ptrs { + matched, err := regexp.MatchString(p, strs[i]) + if err != nil { + panic(err) + } + if !matched { + return false + } + } + + return true +} + +func matchMap(strs []string, m map[string]struct{}) bool { + if len(strs) != len(m) { + return false + } + + for _, s := range strs { + if _, ok := m[s]; !ok { + return false + } + } + + return true +} + +func TestVSSConfig(t *testing.T) { + type config struct { + excludeAllMountPoints bool + timeout time.Duration + } + setTests := []struct { + input options.Options + output config + }{ + { + options.Options{ + "vss.timeout": "6h38m42s", + }, + config{ + timeout: 23922000000000, + }, + }, + { + options.Options{ + "vss.excludeallmountpoints": "t", + }, + config{ + excludeAllMountPoints: true, + timeout: 120000000000, + }, + }, + { + options.Options{ + "vss.excludeallmountpoints": "0", + "vss.excludevolumes": "", + "vss.timeout": "120s", + }, + config{ + timeout: 120000000000, + }, + }, + } + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + cfg, err := ParseVSSConfig(test.input) + if err != nil { + t.Fatal(err) + } + + errorHandler := func(item string, err error) error { + t.Fatalf("unexpected error (%v)", err) + + return nil + } + messageHandler := func(msg string, args ...interface{}) { + t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) + } + + dst := NewLocalVss(errorHandler, messageHandler, cfg) + + if dst.excludeAllMountPoints != test.output.excludeAllMountPoints || + dst.excludeVolumes != nil || dst.timeout != test.output.timeout { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst) + } + }) + } +} + +func TestParseMountPoints(t *testing.T) { + volumeMatch := regexp.MustCompile(`^\\\\\?\\Volume\{[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}\}\\$`) + + // It's not a good idea to test functions based on GetVolumeNameForVolumeMountPoint by calling + // GetVolumeNameForVolumeMountPoint itself, but we have restricted test environment: + // cannot manage volumes and can only be sure that the mount point C:\ exists + sysVolume, err := GetVolumeNameForVolumeMountPoint("C:") + if err != nil { + t.Fatal(err) + } + // We don't know a valid volume GUID path for C:\, but we'll at least check its format + if !volumeMatch.MatchString(sysVolume) { + t.Fatalf("invalid volume GUID path: %s", sysVolume) + } + sysVolumeMutated := strings.ToUpper(sysVolume[:len(sysVolume)-1]) + sysVolumeMatch := strings.ToLower(sysVolume) + + type check struct { + volume string + result bool + } + setTests := []struct { + input options.Options + output []string + checks []check + errors []string + }{ + { + options.Options{ + "vss.excludevolumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated, + }, + []string{ + sysVolumeMatch, + }, + []check{ + {`c:\`, true}, + {`c:`, true}, + {sysVolume, true}, + {sysVolumeMutated, true}, + }, + []string{}, + }, + { + options.Options{ + "vss.excludevolumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, + }, + []string{ + sysVolumeMatch, + }, + []check{ + {`c:\windows\`, false}, + {`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, false}, + {`c:`, true}, + {``, false}, + }, + []string{ + `failed to parse vss\.excludevolumes \[z:\\nonexistent\]:.*`, + `failed to parse vss\.excludevolumes \[c:\\windows\\\]:.*`, + `failed to parse vss\.excludevolumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to get volume from mount point \[c:\\windows\\\]:.*`, + `failed to get volume from mount point \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to get volume from mount point \[\]:.*`, + }, + }, + } + + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + cfg, err := ParseVSSConfig(test.input) + if err != nil { + t.Fatal(err) + } + + var log []string + errorHandler := func(item string, err error) error { + log = append(log, strings.TrimSpace(err.Error())) + + return nil + } + messageHandler := func(msg string, args ...interface{}) { + t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) + } + + dst := NewLocalVss(errorHandler, messageHandler, cfg) + + if !matchMap(test.output, dst.excludeVolumes) { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", + test.output, dst.excludeVolumes) + } + + for _, c := range test.checks { + if dst.isMountPointExcluded(c.volume) != c.result { + t.Fatalf(`wrong check: isMountPointExcluded("%s") != %v`, c.volume, c.result) + } + } + + if !matchStrings(test.errors, log) { + t.Fatalf("wrong log, want:\n %#v\ngot:\n %#v", test.errors, log) + } + }) + } +} From 88c509e3e9c301d72e0aabf9abd3c7c13344b090 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Tue, 10 Nov 2020 06:48:05 +0300 Subject: [PATCH 152/893] vss: Change `ErrorHandler` signature We don't need `error` here: the only existing implementation of `ErrorHandler` always call `Backup.Error` and all implementations of `Backup.Error` always return nil. --- cmd/restic/cmd_backup.go | 4 ++-- internal/fs/fs_local_vss.go | 6 +++--- internal/fs/fs_local_vss_test.go | 8 ++------ 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 5329a928cab..19b96e9b0c4 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -556,8 +556,8 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter return err } - errorHandler := func(item string, err error) error { - return progressReporter.Error(item, err) + errorHandler := func(item string, err error) { + _ = progressReporter.Error(item, err) } messageHandler := func(msg string, args ...interface{}) { diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 0e73092f237..230e14a1f42 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -43,8 +43,8 @@ func ParseVSSConfig(o options.Options) (VSSConfig, error) { return cfg, nil } -// ErrorHandler is used to report errors via callback -type ErrorHandler func(item string, err error) error +// ErrorHandler is used to report errors via callback. +type ErrorHandler func(item string, err error) // MessageHandler is used to report errors/messages via callbacks. type MessageHandler func(msg string, args ...interface{}) @@ -114,7 +114,7 @@ func (fs *LocalVss) DeleteSnapshots() { for volumeName, snapshot := range fs.snapshots { if err := snapshot.Delete(); err != nil { - _ = fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err)) + fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err)) activeSnapshots[volumeName] = snapshot } } diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index ce07fee3c81..6beb35b9812 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -88,10 +88,8 @@ func TestVSSConfig(t *testing.T) { t.Fatal(err) } - errorHandler := func(item string, err error) error { + errorHandler := func(item string, err error) { t.Fatalf("unexpected error (%v)", err) - - return nil } messageHandler := func(msg string, args ...interface{}) { t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) @@ -181,10 +179,8 @@ func TestParseMountPoints(t *testing.T) { } var log []string - errorHandler := func(item string, err error) error { + errorHandler := func(item string, err error) { log = append(log, strings.TrimSpace(err.Error())) - - return nil } messageHandler := func(msg string, args ...interface{}) { t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) From 3bac1f0135f6c238e1ac90bfd14cb50eb83521c3 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Sun, 31 Jan 2021 00:34:41 +0300 Subject: [PATCH 153/893] vss: Fix issues reported by linters --- internal/fs/fs_local_vss.go | 4 +--- internal/fs/vss_windows.go | 43 +++++++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 230e14a1f42..5f55dcfd1a7 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -165,7 +165,6 @@ func (fs *LocalVss) isMountPointExcluded(mountPoint string) bool { // If creation of a snapshot fails the file's original path is returned as // a fallback. func (fs *LocalVss) snapshotPath(path string) string { - fixPath := fixpath(path) if strings.HasPrefix(fixPath, `\\?\UNC\`) { @@ -268,9 +267,8 @@ func (fs *LocalVss) snapshotPath(path string) string { snapshotPath = fs.Join(snapshot.GetSnapshotDeviceObject(), strings.TrimPrefix(fixPath, volumeName)) if snapshotPath == snapshot.GetSnapshotDeviceObject() { - snapshotPath = snapshotPath + string(filepath.Separator) + snapshotPath += string(filepath.Separator) } - } else { // no snapshot is available for the requested path: // -> try to backup without a snapshot diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 4ed28936620..424548a7445 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -21,6 +21,7 @@ import ( type HRESULT uint // HRESULT constant values necessary for using VSS api. +//nolint:golint const ( S_OK HRESULT = 0x00000000 E_ACCESSDENIED HRESULT = 0x80070005 @@ -256,6 +257,7 @@ type IVssBackupComponents struct { } // IVssBackupComponentsVTable is the vtable for IVssBackupComponents. +// nolint:structcheck type IVssBackupComponentsVTable struct { ole.IUnknownVtbl getWriterComponentsCount uintptr @@ -415,7 +417,7 @@ func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot panic(err) } - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) @@ -479,9 +481,9 @@ func (vss *IVssBackupComponents) DoSnapshotSet() (*IVSSAsync, error) { // DeleteSnapshots calls the equivalent VSS api. func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ole.GUID, error) { - var deletedSnapshots int32 = 0 + var deletedSnapshots int32 var nondeletedSnapshotID ole.GUID - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(&snapshotID)) @@ -505,7 +507,7 @@ func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ol // GetSnapshotProperties calls the equivalent VSS api. func (vss *IVssBackupComponents) GetSnapshotProperties(snapshotID ole.GUID, properties *VssSnapshotProperties) error { - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(&snapshotID)) @@ -528,8 +530,8 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error { if err != nil { return err } - - proc.Call(uintptr(unsafe.Pointer(properties))) + // this function always succeeds and returns no value + _, _, _ = proc.Call(uintptr(unsafe.Pointer(properties))) return nil } @@ -544,6 +546,7 @@ func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) { } // VssSnapshotProperties defines the properties of a VSS snapshot as part of the VSS api. +// nolint:structcheck type VssSnapshotProperties struct { snapshotID ole.GUID snapshotSetID ole.GUID @@ -700,7 +703,12 @@ func initializeVssCOMInterface() (*ole.IUnknown, error) { } // ensure COM is initialized before use - ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil { + // CoInitializeEx returns 1 if COM is already initialized + if oleErr, ok := err.(*ole.OleError); !ok || oleErr.Code() != 1 { + return nil, err + } + } var oleIUnknown *ole.IUnknown result, _, _ := vssInstance.Call(uintptr(unsafe.Pointer(&oleIUnknown))) @@ -761,7 +769,6 @@ func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { func NewVssSnapshot( volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() - if err != nil { return VssSnapshot{}, newVssTextError(fmt.Sprintf( "Failed to detect windows architecture: %s", err.Error())) @@ -884,8 +891,10 @@ func NewVssSnapshot( return VssSnapshot{}, err } - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true, - snapshotSetID: mountPointSnapshotSetID} + mountPointInfo[mountPoint] = MountPoint{ + isSnapshotted: true, + snapshotSetID: mountPointSnapshotSetID, + } } } @@ -903,7 +912,7 @@ func NewVssSnapshot( err = callAsyncFunctionAndWait(iVssBackupComponents.DoSnapshotSet, "DoSnapshotSet", deadline) if err != nil { - iVssBackupComponents.AbortBackup() + _ = iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() return VssSnapshot{}, err } @@ -911,13 +920,12 @@ func NewVssSnapshot( var snapshotProperties VssSnapshotProperties err = iVssBackupComponents.GetSnapshotProperties(snapshotSetID, &snapshotProperties) if err != nil { - iVssBackupComponents.AbortBackup() + _ = iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() return VssSnapshot{}, err } for mountPoint, info := range mountPointInfo { - if !info.isSnapshotted { continue } @@ -936,8 +944,10 @@ func NewVssSnapshot( mountPointInfo[mountPoint] = info } - return VssSnapshot{iVssBackupComponents, snapshotSetID, snapshotProperties, - snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline)}, nil + return VssSnapshot{ + iVssBackupComponents, snapshotSetID, snapshotProperties, + snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline), + }, nil } // Delete deletes the created snapshot. @@ -968,7 +978,7 @@ func (p *VssSnapshot) Delete() error { if _, _, e := p.iVssBackupComponents.DeleteSnapshots(p.snapshotID); e != nil { err = newVssTextError(fmt.Sprintf("Failed to delete snapshot: %s", e.Error())) - p.iVssBackupComponents.AbortBackup() + _ = p.iVssBackupComponents.AbortBackup() if err != nil { return err } @@ -1079,6 +1089,7 @@ func enumerateMountedFolders(volume string) ([]string, error) { return mountedFolders, nil } + // nolint:errcheck defer windows.FindVolumeMountPointClose(handle) volumeMountPoint := syscall.UTF16ToString(volumeMountPointBuffer) From bb0f93ef3d3ac6dd8b86928571adf2583dd443b7 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 22 Mar 2021 23:31:19 +0300 Subject: [PATCH 154/893] vss: Add "provider" option --- internal/fs/fs_local_vss.go | 5 +- internal/fs/vss.go | 2 +- internal/fs/vss_windows.go | 168 +++++++++++++++++++++++++++++++++--- 3 files changed, 160 insertions(+), 15 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 5f55dcfd1a7..de30bcedb3b 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -17,6 +17,7 @@ type VSSConfig struct { ExcludeAllMountPoints bool `option:"excludeallmountpoints" help:"exclude mountpoints from snapshotting on all volumes"` ExcludeVolumes string `option:"excludevolumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"` + Provider string `option:"provider" help:"VSS provider identifier which will be used for snapshotting"` } func init() { @@ -64,6 +65,7 @@ type LocalVss struct { excludeAllMountPoints bool excludeVolumes map[string]struct{} timeout time.Duration + provider string } // statically ensure that LocalVss implements FS. @@ -102,6 +104,7 @@ func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig excludeAllMountPoints: cfg.ExcludeAllMountPoints, excludeVolumes: parseMountPoints(cfg.ExcludeVolumes, msgError), timeout: cfg.Timeout, + provider: cfg.Provider, } } @@ -209,7 +212,7 @@ func (fs *LocalVss) snapshotPath(path string) string { } } - if snapshot, err := NewVssSnapshot(vssVolume, fs.timeout, filter, fs.msgError); err != nil { + if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, filter, fs.msgError); err != nil { fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", vssVolume, err)) fs.failedSnapshots[volumeNameLower] = struct{}{} diff --git a/internal/fs/vss.go b/internal/fs/vss.go index 838bdf79bb3..a54475480c5 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -41,7 +41,7 @@ func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. -func NewVssSnapshot( +func NewVssSnapshot(_ string, _ string, _ time.Duration, _ VolumeFilter, _ ErrorHandler) (VssSnapshot, error) { return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows") } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 424548a7445..18aea419d02 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -367,7 +367,7 @@ func (vss *IVssBackupComponents) convertToVSSAsync( } // IsVolumeSupported calls the equivalent VSS api. -func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, error) { +func (vss *IVssBackupComponents) IsVolumeSupported(providerID *ole.GUID, volumeName string) (bool, error) { volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName) if err != nil { panic(err) @@ -377,7 +377,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err var result uintptr if runtime.GOARCH == "386" { - id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) + id := (*[4]uintptr)(unsafe.Pointer(providerID)) result, _, _ = syscall.Syscall9(vss.getVTable().isVolumeSupported, 7, uintptr(unsafe.Pointer(vss)), id[0], id[1], id[2], id[3], @@ -385,7 +385,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err 0) } else { result, _, _ = syscall.Syscall6(vss.getVTable().isVolumeSupported, 4, - uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(ole.IID_NULL)), + uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(volumeNamePointer)), uintptr(unsafe.Pointer(&isSupportedRaw)), 0, 0) } @@ -411,7 +411,7 @@ func (vss *IVssBackupComponents) StartSnapshotSet() (ole.GUID, error) { } // AddToSnapshotSet calls the equivalent VSS api. -func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot *ole.GUID) error { +func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, providerID *ole.GUID, idSnapshot *ole.GUID) error { volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName) if err != nil { panic(err) @@ -420,15 +420,15 @@ func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot var result uintptr if runtime.GOARCH == "386" { - id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) + id := (*[4]uintptr)(unsafe.Pointer(providerID)) result, _, _ = syscall.Syscall9(vss.getVTable().addToSnapshotSet, 7, - uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), id[0], id[1], - id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0) + uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), + id[0], id[1], id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0) } else { result, _, _ = syscall.Syscall6(vss.getVTable().addToSnapshotSet, 4, uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), - uintptr(unsafe.Pointer(ole.IID_NULL)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0) + uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0) } return newVssErrorIfResultNotOK("AddToSnapshotSet() failed", HRESULT(result)) @@ -535,6 +535,13 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error { return nil } +func vssFreeProviderProperties(p *VssProviderProperties) { + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName))) + p.providerName = nil + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion))) + p.providerName = nil +} + // BackupComplete calls the equivalent VSS api. func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) { var oleIUnknown *ole.IUnknown @@ -563,6 +570,17 @@ type VssSnapshotProperties struct { status uint } +// VssProviderProperties defines the properties of a VSS provider as part of the VSS api. +// nolint:structcheck +type VssProviderProperties struct { + providerID ole.GUID + providerName *uint16 + providerType uint32 + providerVersion *uint16 + providerVersionID ole.GUID + classID ole.GUID +} + // GetSnapshotDeviceObject returns root path to access the snapshot files // and folders. func (p *VssSnapshotProperties) GetSnapshotDeviceObject() string { @@ -660,6 +678,75 @@ func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error { return nil } +// UIID_IVSS_ADMIN defines the GUID of IVSSAdmin. +var ( + UIID_IVSS_ADMIN = ole.NewGUID("{77ED5996-2F63-11d3-8A39-00C04F72D8E3}") + CLSID_VSS_COORDINATOR = ole.NewGUID("{E579AB5F-1CC4-44b4-BED9-DE0991FF0623}") +) + +// IVSSAdmin VSS api interface. +type IVSSAdmin struct { + ole.IUnknown +} + +// IVSSAdminVTable is the vtable for IVSSAdmin. +// nolint:structcheck +type IVSSAdminVTable struct { + ole.IUnknownVtbl + registerProvider uintptr + unregisterProvider uintptr + queryProviders uintptr + abortAllSnapshotsInProgress uintptr +} + +// getVTable returns the vtable for IVSSAdmin. +func (vssAdmin *IVSSAdmin) getVTable() *IVSSAdminVTable { + return (*IVSSAdminVTable)(unsafe.Pointer(vssAdmin.RawVTable)) +} + +// QueryProviders calls the equivalent VSS api. +func (vssAdmin *IVSSAdmin) QueryProviders() (*IVssEnumObject, error) { + var enum *IVssEnumObject + + result, _, _ := syscall.Syscall(vssAdmin.getVTable().queryProviders, 2, + uintptr(unsafe.Pointer(vssAdmin)), uintptr(unsafe.Pointer(&enum)), 0) + + return enum, newVssErrorIfResultNotOK("QueryProviders() failed", HRESULT(result)) +} + +// IVssEnumObject VSS api interface. +type IVssEnumObject struct { + ole.IUnknown +} + +// IVssEnumObjectVTable is the vtable for IVssEnumObject. +// nolint:structcheck +type IVssEnumObjectVTable struct { + ole.IUnknownVtbl + next uintptr + skip uintptr + reset uintptr + clone uintptr +} + +// getVTable returns the vtable for IVssEnumObject. +func (vssEnum *IVssEnumObject) getVTable() *IVssEnumObjectVTable { + return (*IVssEnumObjectVTable)(unsafe.Pointer(vssEnum.RawVTable)) +} + +// Next calls the equivalent VSS api. +func (vssEnum *IVssEnumObject) Next(count uint, props unsafe.Pointer) (uint, error) { + var fetched uint32 + result, _, _ := syscall.Syscall6(vssEnum.getVTable().next, 4, + uintptr(unsafe.Pointer(vssEnum)), uintptr(count), uintptr(props), + uintptr(unsafe.Pointer(&fetched)), 0, 0) + if result == 1 { + return uint(fetched), nil + } + + return uint(fetched), newVssErrorIfResultNotOK("Next() failed", HRESULT(result)) +} + // MountPoint wraps all information of a snapshot of a mountpoint on a volume. type MountPoint struct { isSnapshotted bool @@ -766,7 +853,7 @@ func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. -func NewVssSnapshot( +func NewVssSnapshot(provider string, volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() if err != nil { @@ -814,6 +901,12 @@ func NewVssSnapshot( iVssBackupComponents := (*IVssBackupComponents)(unsafe.Pointer(comInterface)) + providerID, err := getProviderID(provider) + if err != nil { + iVssBackupComponents.Release() + return VssSnapshot{}, err + } + if err := iVssBackupComponents.InitializeForBackup(); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err @@ -838,7 +931,7 @@ func NewVssSnapshot( return VssSnapshot{}, err } - if isSupported, err := iVssBackupComponents.IsVolumeSupported(volume); err != nil { + if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, volume); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err } else if !isSupported { @@ -853,7 +946,7 @@ func NewVssSnapshot( return VssSnapshot{}, err } - if err := iVssBackupComponents.AddToSnapshotSet(volume, &snapshotSetID); err != nil { + if err := iVssBackupComponents.AddToSnapshotSet(volume, providerID, &snapshotSetID); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err } @@ -877,14 +970,14 @@ func NewVssSnapshot( if !filter(mountPoint) { continue - } else if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil { + } else if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, mountPoint); err != nil { continue } else if !isSupported { continue } var mountPointSnapshotSetID ole.GUID - err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID) + err := iVssBackupComponents.AddToSnapshotSet(mountPoint, providerID, &mountPointSnapshotSetID) if err != nil { iVssBackupComponents.Release() @@ -988,6 +1081,55 @@ func (p *VssSnapshot) Delete() error { return nil } +func getProviderID(provider string) (*ole.GUID, error) { + comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN) + if err != nil { + return nil, err + } + defer comInterface.Release() + + vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface)) + + providerLower := strings.ToLower(provider) + switch providerLower { + case "": + return ole.IID_NULL, nil + case "ms": + return ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}"), nil + } + + enum, err := vssAdmin.QueryProviders() + if err != nil { + return nil, err + } + defer enum.Release() + + id := ole.NewGUID(provider) + + var props struct { + objectType uint32 + provider VssProviderProperties + } + for { + count, err := enum.Next(1, unsafe.Pointer(&props)) + if err != nil { + return nil, err + } + + if count < 1 { + return nil, errors.Errorf(`invalid VSS provider "%s"`, provider) + } + + name := ole.UTF16PtrToString(props.provider.providerName) + vssFreeProviderProperties(&props.provider) + + if id != nil && *id == props.provider.providerID || + id == nil && providerLower == strings.ToLower(name) { + return &props.provider.providerID, nil + } + } +} + // asyncCallFunc is the callback type for callAsyncFunctionAndWait. type asyncCallFunc func() (*IVSSAsync, error) From 739d3243d9f8e17b0f80440aaec9bacdcb046745 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Tue, 23 Mar 2021 07:41:45 +0300 Subject: [PATCH 155/893] vss: Update docs and changelog --- changelog/unreleased/pull-3067 | 4 ++++ doc/040_backup.rst | 17 ++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 index a56c045faa3..52f04c5e003 100644 --- a/changelog/unreleased/pull-3067 +++ b/changelog/unreleased/pull-3067 @@ -15,4 +15,8 @@ changes timeout to five minutes and disable snapshotting of mount points on all excludes drive `D:`, mount point `C:\MNT` and specific volume from VSS snapshotting. + restic backup --use-fs-snapshot -o vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5} + +uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider. + https://github.com/restic/restic/pull/3067 diff --git a/doc/040_backup.rst b/doc/040_backup.rst index d1bb39f968e..7360bb0e0d9 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -56,11 +56,12 @@ snapshot for each volume that contains files to backup. Files are read from the VSS snapshot instead of the regular filesystem. This allows to backup files that are exclusively locked by another process during the backup. -You can use three additional options to change VSS behaviour: +You can use additional options to change VSS behaviour: * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds * ``-o vss.excludeallmountpoints`` disable auto snapshotting of all volume mount points * ``-o vss.excludevolumes`` allows excluding specific volumes or volume mount points from snapshotting + * ``-o vss.provider`` specifies VSS provider used for snapshotting E.g., 2.5 minutes timeout with mount points snapshotting disabled can be specified as @@ -74,6 +75,20 @@ and excluding drive ``D:\``, mount point ``C:\mnt`` and volume ``\\?\Volume{04ce -o vss.excludevolumes="d:;c:\MNT\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" +VSS provider can be specified by GUID + +.. code-block:: console + + -o vss.provider={3f900f90-00e9-440e-873a-96ca5eb079e5} + +or by name + +.. code-block:: console + + -o vss.provider="Hyper-V IC Software Shadow Copy Provider" + +Also ``MS`` can be used as alias for ``Microsoft Software Shadow Copy provider 1.0``. + By default VSS ignores Outlook OST files. This is not a restriction of restic but the default Windows VSS configuration. The files not to snapshot are configured in the Windows registry under the following key: From 0a8f9c5d9cef798902a49e63da0a51be0e14d095 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 30 Apr 2021 18:01:40 +0300 Subject: [PATCH 156/893] vss: Add tests for "provider" option --- internal/fs/fs_local_vss_test.go | 88 ++++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+), 4 deletions(-) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index 6beb35b9812..cff881151b8 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + ole "github.com/go-ole/go-ole" "github.com/restic/restic/internal/options" ) @@ -18,6 +19,9 @@ func matchStrings(ptrs []string, strs []string) bool { } for i, p := range ptrs { + if p == "" { + return false + } matched, err := regexp.MatchString(p, strs[i]) if err != nil { panic(err) @@ -48,6 +52,7 @@ func TestVSSConfig(t *testing.T) { type config struct { excludeAllMountPoints bool timeout time.Duration + provider string } setTests := []struct { input options.Options @@ -55,19 +60,23 @@ func TestVSSConfig(t *testing.T) { }{ { options.Options{ - "vss.timeout": "6h38m42s", + "vss.timeout": "6h38m42s", + "vss.provider": "Ms", }, config{ - timeout: 23922000000000, + timeout: 23922000000000, + provider: "Ms", }, }, { options.Options{ "vss.excludeallmountpoints": "t", + "vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}", }, config{ excludeAllMountPoints: true, timeout: 120000000000, + provider: "{b5946137-7b9f-4925-af80-51abd60b20d5}", }, }, { @@ -75,9 +84,11 @@ func TestVSSConfig(t *testing.T) { "vss.excludeallmountpoints": "0", "vss.excludevolumes": "", "vss.timeout": "120s", + "vss.provider": "Microsoft Software Shadow Copy provider 1.0", }, config{ - timeout: 120000000000, + timeout: 120000000000, + provider: "Microsoft Software Shadow Copy provider 1.0", }, }, } @@ -98,7 +109,8 @@ func TestVSSConfig(t *testing.T) { dst := NewLocalVss(errorHandler, messageHandler, cfg) if dst.excludeAllMountPoints != test.output.excludeAllMountPoints || - dst.excludeVolumes != nil || dst.timeout != test.output.timeout { + dst.excludeVolumes != nil || dst.timeout != test.output.timeout || + dst.provider != test.output.provider { t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst) } }) @@ -205,3 +217,71 @@ func TestParseMountPoints(t *testing.T) { }) } } + +func TestParseProvider(t *testing.T) { + msProvider := ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}") + setTests := []struct { + provider string + id *ole.GUID + result string + }{ + { + "", + ole.IID_NULL, + "", + }, + { + "mS", + msProvider, + "", + }, + { + "{B5946137-7b9f-4925-Af80-51abD60b20d5}", + msProvider, + "", + }, + { + "Microsoft Software Shadow Copy provider 1.0", + msProvider, + "", + }, + { + "{04560982-3d7d-4bbc-84f7-0712f833a28f}", + nil, + `invalid VSS provider "{04560982-3d7d-4bbc-84f7-0712f833a28f}"`, + }, + { + "non-existent provider", + nil, + `invalid VSS provider "non-existent provider"`, + }, + } + + _ = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + id, err := getProviderID(test.provider) + + if err != nil && id != nil { + t.Fatalf("err!=nil but id=%v", id) + } + + if test.result != "" || err != nil { + var result string + if err != nil { + result = err.Error() + } + matched, err := regexp.MatchString(test.result, result) + if err != nil { + panic(err) + } + if !matched || test.result == "" { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.result, result) + } + } else if !ole.IsEqualGUID(id, test.id) { + t.Fatalf("wrong id, want:\n %s\ngot:\n %s", test.id.String(), id.String()) + } + }) + } +} From 5703e5a6526fda6ab7856876bedaf07cffcce752 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:18:46 +0300 Subject: [PATCH 157/893] Fix texts and comments --- changelog/unreleased/pull-3067 | 12 ++++++------ doc/040_backup.rst | 12 ++++++------ internal/fs/fs_local_vss_test.go | 4 +++- internal/fs/vss.go | 2 +- internal/fs/vss_windows.go | 2 +- 5 files changed, 17 insertions(+), 15 deletions(-) diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 index 52f04c5e003..855c7f2bedf 100644 --- a/changelog/unreleased/pull-3067 +++ b/changelog/unreleased/pull-3067 @@ -2,21 +2,21 @@ Enhancement: Add options to configure Windows Shadow Copy Service Restic always used 120 sec. timeout and unconditionally created VSS snapshots for all volume mount points on disk. Now this behavior can be fine-tuned by -new options, like exclude user specific volumes and mount points or completely +new options, like exclude specific volumes and mount points or completely disable auto snapshotting of volume mount points. For example: - restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.excludeallmountpoints=true - + restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.exclude-all-mount-points=true + changes timeout to five minutes and disable snapshotting of mount points on all volumes, and - restic backup --use-fs-snapshot -o vss.excludevolumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" + restic backup --use-fs-snapshot -o vss.exclude-volumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" -excludes drive `D:`, mount point `C:\MNT` and specific volume from VSS snapshotting. +excludes drive `d:`, mount point `c:\mnt` and specific volume from VSS snapshotting. restic backup --use-fs-snapshot -o vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5} - + uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider. https://github.com/restic/restic/pull/3067 diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 7360bb0e0d9..50de954ef79 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -59,21 +59,21 @@ exclusively locked by another process during the backup. You can use additional options to change VSS behaviour: * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds - * ``-o vss.excludeallmountpoints`` disable auto snapshotting of all volume mount points - * ``-o vss.excludevolumes`` allows excluding specific volumes or volume mount points from snapshotting + * ``-o vss.exclude-all-mount-points`` disable auto snapshotting of all volume mount points + * ``-o vss.exclude-volumes`` allows excluding specific volumes or volume mount points from snapshotting * ``-o vss.provider`` specifies VSS provider used for snapshotting -E.g., 2.5 minutes timeout with mount points snapshotting disabled can be specified as +For example a 2.5 minutes timeout with snapshotting of mount points disabled can be specified as .. code-block:: console - -o vss.timeout=2m30s -o vss.excludeallmountpoints=true + -o vss.timeout=2m30s -o vss.exclude-all-mount-points=true -and excluding drive ``D:\``, mount point ``C:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as +and excluding drive ``d:\``, mount point ``c:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as .. code-block:: console - -o vss.excludevolumes="d:;c:\MNT\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" + -o vss.exclude-volumes="d:;c:\mnt\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" VSS provider can be specified by GUID diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index cff881151b8..23e86b9119e 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -127,10 +127,12 @@ func TestParseMountPoints(t *testing.T) { if err != nil { t.Fatal(err) } - // We don't know a valid volume GUID path for C:\, but we'll at least check its format + // We don't know a valid volume GUID path for c:\, but we'll at least check its format if !volumeMatch.MatchString(sysVolume) { t.Fatalf("invalid volume GUID path: %s", sysVolume) } + // Changing the case and removing trailing backslash allows tests + // the equality of different ways of writing a volume name sysVolumeMutated := strings.ToUpper(sysVolume[:len(sysVolume)-1]) sysVolumeMatch := strings.ToLower(sysVolume) diff --git a/internal/fs/vss.go b/internal/fs/vss.go index a54475480c5..8bfffab71cf 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -33,7 +33,7 @@ func HasSufficientPrivilegesForVSS() error { return errors.New("VSS snapshots are only supported on windows") } -// GetVolumeNameForVolumeMountPoint clear input parameter +// GetVolumeNameForVolumeMountPoint add trailing backslash to input parameter // and calls the equivalent windows api. func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { return mountPoint, nil diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 18aea419d02..91c60c4babe 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -828,7 +828,7 @@ func HasSufficientPrivilegesForVSS() error { return err } -// GetVolumeNameForVolumeMountPoint clear input parameter +// GetVolumeNameForVolumeMountPoint add trailing backslash to input parameter // and calls the equivalent windows api. func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { if mountPoint != "" && mountPoint[len(mountPoint)-1] != filepath.Separator { From 24330c19a8be55fc4d4f89d9f4b912b066e0fa32 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:21:33 +0300 Subject: [PATCH 158/893] Use kebab case in option names --- internal/fs/fs_local_vss.go | 6 +++--- internal/fs/fs_local_vss_test.go | 22 +++++++++++----------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index de30bcedb3b..0f983d13628 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -14,8 +14,8 @@ import ( // VSSConfig holds extended options of windows volume shadow copy service. type VSSConfig struct { - ExcludeAllMountPoints bool `option:"excludeallmountpoints" help:"exclude mountpoints from snapshotting on all volumes"` - ExcludeVolumes string `option:"excludevolumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` + ExcludeAllMountPoints bool `option:"exclude-all-mount-points" help:"exclude mountpoints from snapshotting on all volumes"` + ExcludeVolumes string `option:"exclude-volumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"` Provider string `option:"provider" help:"VSS provider identifier which will be used for snapshotting"` } @@ -80,7 +80,7 @@ func parseMountPoints(list string, msgError ErrorHandler) (volumes map[string]st } for _, s := range strings.Split(list, ";") { if v, err := GetVolumeNameForVolumeMountPoint(s); err != nil { - msgError(s, errors.Errorf("failed to parse vss.excludevolumes [%s]: %s", s, err)) + msgError(s, errors.Errorf("failed to parse vss.exclude-volumes [%s]: %s", s, err)) } else { if volumes == nil { volumes = make(map[string]struct{}) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index 23e86b9119e..9e11b6c6e55 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -70,8 +70,8 @@ func TestVSSConfig(t *testing.T) { }, { options.Options{ - "vss.excludeallmountpoints": "t", - "vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}", + "vss.exclude-all-mount-points": "t", + "vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}", }, config{ excludeAllMountPoints: true, @@ -81,10 +81,10 @@ func TestVSSConfig(t *testing.T) { }, { options.Options{ - "vss.excludeallmountpoints": "0", - "vss.excludevolumes": "", - "vss.timeout": "120s", - "vss.provider": "Microsoft Software Shadow Copy provider 1.0", + "vss.exclude-all-mount-points": "0", + "vss.exclude-volumes": "", + "vss.timeout": "120s", + "vss.provider": "Microsoft Software Shadow Copy provider 1.0", }, config{ timeout: 120000000000, @@ -148,7 +148,7 @@ func TestParseMountPoints(t *testing.T) { }{ { options.Options{ - "vss.excludevolumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated, + "vss.exclude-volumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated, }, []string{ sysVolumeMatch, @@ -163,7 +163,7 @@ func TestParseMountPoints(t *testing.T) { }, { options.Options{ - "vss.excludevolumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, + "vss.exclude-volumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, }, []string{ sysVolumeMatch, @@ -175,9 +175,9 @@ func TestParseMountPoints(t *testing.T) { {``, false}, }, []string{ - `failed to parse vss\.excludevolumes \[z:\\nonexistent\]:.*`, - `failed to parse vss\.excludevolumes \[c:\\windows\\\]:.*`, - `failed to parse vss\.excludevolumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to parse vss\.exclude-volumes \[z:\\nonexistent\]:.*`, + `failed to parse vss\.exclude-volumes \[c:\\windows\\\]:.*`, + `failed to parse vss\.exclude-volumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, `failed to get volume from mount point \[c:\\windows\\\]:.*`, `failed to get volume from mount point \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, `failed to get volume from mount point \[\]:.*`, From 90b168eb6cde4fe1afd6aad68185d6abcca3b806 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:23:50 +0300 Subject: [PATCH 159/893] isMountPointExcluded to isMountPointIncluded --- internal/fs/fs_local_vss.go | 22 ++++++++++------------ internal/fs/fs_local_vss_test.go | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 0f983d13628..48ab165f1d7 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -145,22 +145,20 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { return os.Lstat(fs.snapshotPath(name)) } -// isMountPointExcluded is true if given mountpoint excluded by user. -func (fs *LocalVss) isMountPointExcluded(mountPoint string) bool { +// isMountPointIncluded is true if given mountpoint included by user. +func (fs *LocalVss) isMountPointIncluded(mountPoint string) bool { if fs.excludeVolumes == nil { - return false + return true } volume, err := GetVolumeNameForVolumeMountPoint(mountPoint) if err != nil { fs.msgError(mountPoint, errors.Errorf("failed to get volume from mount point [%s]: %s", mountPoint, err)) - - return false + return true } _, ok := fs.excludeVolumes[strings.ToLower(volume)] - - return ok + return !ok } // snapshotPath returns the path inside a VSS snapshots if it already exists. @@ -199,20 +197,20 @@ func (fs *LocalVss) snapshotPath(path string) string { if !snapshotExists && !snapshotFailed { vssVolume := volumeNameLower + string(filepath.Separator) - if fs.isMountPointExcluded(vssVolume) { + if !fs.isMountPointIncluded(vssVolume) { fs.msgMessage("snapshots for [%s] excluded by user\n", vssVolume) fs.failedSnapshots[volumeNameLower] = struct{}{} } else { fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) - var filter VolumeFilter + var includeVolume VolumeFilter if !fs.excludeAllMountPoints { - filter = func(volume string) bool { - return !fs.isMountPointExcluded(volume) + includeVolume = func(volume string) bool { + return fs.isMountPointIncluded(volume) } } - if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, filter, fs.msgError); err != nil { + if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, includeVolume, fs.msgError); err != nil { fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", vssVolume, err)) fs.failedSnapshots[volumeNameLower] = struct{}{} diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index 9e11b6c6e55..c25ce4535c5 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -154,10 +154,10 @@ func TestParseMountPoints(t *testing.T) { sysVolumeMatch, }, []check{ - {`c:\`, true}, - {`c:`, true}, - {sysVolume, true}, - {sysVolumeMutated, true}, + {`c:\`, false}, + {`c:`, false}, + {sysVolume, false}, + {sysVolumeMutated, false}, }, []string{}, }, @@ -169,10 +169,10 @@ func TestParseMountPoints(t *testing.T) { sysVolumeMatch, }, []check{ - {`c:\windows\`, false}, - {`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, false}, - {`c:`, true}, - {``, false}, + {`c:\windows\`, true}, + {`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, true}, + {`c:`, false}, + {``, true}, }, []string{ `failed to parse vss\.exclude-volumes \[z:\\nonexistent\]:.*`, @@ -208,8 +208,8 @@ func TestParseMountPoints(t *testing.T) { } for _, c := range test.checks { - if dst.isMountPointExcluded(c.volume) != c.result { - t.Fatalf(`wrong check: isMountPointExcluded("%s") != %v`, c.volume, c.result) + if dst.isMountPointIncluded(c.volume) != c.result { + t.Fatalf(`wrong check: isMountPointIncluded("%s") != %v`, c.volume, c.result) } } From 7ee889bb0d0dcf0292745975454ed53d94cdb0a9 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:25:25 +0300 Subject: [PATCH 160/893] Use S_FALSE and MaxInt --- internal/fs/vss_windows.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 91c60c4babe..e8c5dc56190 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -5,6 +5,7 @@ package fs import ( "fmt" + "math" "path/filepath" "runtime" "strings" @@ -24,6 +25,7 @@ type HRESULT uint //nolint:golint const ( S_OK HRESULT = 0x00000000 + S_FALSE HRESULT = 0x00000001 E_ACCESSDENIED HRESULT = 0x80070005 E_OUTOFMEMORY HRESULT = 0x8007000E E_INVALIDARG HRESULT = 0x80070057 @@ -640,7 +642,7 @@ func (vssAsync *IVSSAsync) QueryStatus() (HRESULT, uint32) { // WaitUntilAsyncFinished waits until either the async call is finished or // the given timeout is reached. func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error { - const maxTimeout = 2147483647 * time.Millisecond + const maxTimeout = math.MaxInt32 * time.Millisecond if timeout > maxTimeout { timeout = maxTimeout } @@ -740,7 +742,7 @@ func (vssEnum *IVssEnumObject) Next(count uint, props unsafe.Pointer) (uint, err result, _, _ := syscall.Syscall6(vssEnum.getVTable().next, 4, uintptr(unsafe.Pointer(vssEnum)), uintptr(count), uintptr(props), uintptr(unsafe.Pointer(&fetched)), 0, 0) - if result == 1 { + if HRESULT(result) == S_FALSE { return uint(fetched), nil } @@ -791,8 +793,8 @@ func initializeVssCOMInterface() (*ole.IUnknown, error) { // ensure COM is initialized before use if err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil { - // CoInitializeEx returns 1 if COM is already initialized - if oleErr, ok := err.(*ole.OleError); !ok || oleErr.Code() != 1 { + // CoInitializeEx returns S_FALSE if COM is already initialized + if oleErr, ok := err.(*ole.OleError); !ok || HRESULT(oleErr.Code()) != S_FALSE { return nil, err } } From 125dba23c5e8e453eb7a5d784ac262cacca4f3c9 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:27:34 +0300 Subject: [PATCH 161/893] Rearange code --- internal/fs/fs_local_vss_test.go | 6 +----- internal/fs/vss_windows.go | 30 +++++++++++++++--------------- 2 files changed, 16 insertions(+), 20 deletions(-) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index c25ce4535c5..60262c8735b 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -274,11 +274,7 @@ func TestParseProvider(t *testing.T) { if err != nil { result = err.Error() } - matched, err := regexp.MatchString(test.result, result) - if err != nil { - panic(err) - } - if !matched || test.result == "" { + if test.result != result || test.result == "" { t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.result, result) } } else if !ole.IsEqualGUID(id, test.id) { diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index e8c5dc56190..0b51b00f398 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -537,13 +537,6 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error { return nil } -func vssFreeProviderProperties(p *VssProviderProperties) { - ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName))) - p.providerName = nil - ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion))) - p.providerName = nil -} - // BackupComplete calls the equivalent VSS api. func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) { var oleIUnknown *ole.IUnknown @@ -583,6 +576,13 @@ type VssProviderProperties struct { classID ole.GUID } +func vssFreeProviderProperties(p *VssProviderProperties) { + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName))) + p.providerName = nil + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion))) + p.providerVersion = nil +} + // GetSnapshotDeviceObject returns root path to access the snapshot files // and folders. func (p *VssSnapshotProperties) GetSnapshotDeviceObject() string { @@ -1084,14 +1084,6 @@ func (p *VssSnapshot) Delete() error { } func getProviderID(provider string) (*ole.GUID, error) { - comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN) - if err != nil { - return nil, err - } - defer comInterface.Release() - - vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface)) - providerLower := strings.ToLower(provider) switch providerLower { case "": @@ -1100,6 +1092,14 @@ func getProviderID(provider string) (*ole.GUID, error) { return ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}"), nil } + comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN) + if err != nil { + return nil, err + } + defer comInterface.Release() + + vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface)) + enum, err := vssAdmin.QueryProviders() if err != nil { return nil, err From ccd35565ee10a12a7698f088aa37df1035fc230d Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:48:22 +0300 Subject: [PATCH 162/893] s/sec./seconds --- changelog/unreleased/pull-3067 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 index 855c7f2bedf..fbdcfd7e5cb 100644 --- a/changelog/unreleased/pull-3067 +++ b/changelog/unreleased/pull-3067 @@ -1,6 +1,6 @@ Enhancement: Add options to configure Windows Shadow Copy Service -Restic always used 120 sec. timeout and unconditionally created VSS snapshots +Restic always used 120 seconds timeout and unconditionally created VSS snapshots for all volume mount points on disk. Now this behavior can be fine-tuned by new options, like exclude specific volumes and mount points or completely disable auto snapshotting of volume mount points. From 08c6945d612ac48c9e38dd56d6e5677c99deca68 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Mon, 29 Apr 2024 16:21:38 -0600 Subject: [PATCH 163/893] Fix review comments --- changelog/unreleased/pull-4708 | 9 ++- doc/040_backup.rst | 4 +- doc/050_restore.rst | 4 +- internal/debug/debug.go | 4 +- internal/fs/sd_windows.go | 90 ++++++++------------------ internal/fs/sd_windows_test.go | 4 +- internal/fs/sd_windows_test_helpers.go | 17 +++++ internal/restic/node.go | 2 +- internal/restic/node_windows.go | 2 +- 9 files changed, 56 insertions(+), 80 deletions(-) diff --git a/changelog/unreleased/pull-4708 b/changelog/unreleased/pull-4708 index 2c666c30044..5c5d426b551 100644 --- a/changelog/unreleased/pull-4708 +++ b/changelog/unreleased/pull-4708 @@ -1,10 +1,9 @@ Enhancement: Back up and restore SecurityDescriptors on Windows -Restic did not back up SecurityDescriptors of files on Windows. -Restic now backs up and restores SecurityDescriptors (which includes owner, group, -discretionary access control list (DACL), system access control list (SACL)) -when backing up files and folders on Windows. This requires the user to be -a member of backup operators or the application must be run as admin. +Restic now backs up and restores SecurityDescriptors when backing up files and folders +on Windows which includes owner, group, discretionary access control list (DACL), +system access control list (SACL). This requires the user to be a member of backup +operators or the application must be run as admin. If that is not the case, only the current user's owner, group and DACL will be backed up and during restore only the DACL of the backed file will be restored while the current user's owner and group will be set during the restore. diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 3a332ca75e1..e125d2c656c 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -514,9 +514,9 @@ written, and the next backup needs to write new metadata again. If you really want to save the access time for files and directories, you can pass the ``--with-atime`` option to the ``backup`` command. -Backing up full security descriptors on windows is only possible when the user +Backing up full security descriptors on Windows is only possible when the user has ``SeBackupPrivilege``privilege or is running as admin. This is a restriction -of windows not restic. +of Windows not restic. If either of these conditions are not met, only the owner, group and DACL will be backed up. diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 5ab0286f10b..193a00870f3 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -72,9 +72,9 @@ Restoring symbolic links on windows is only possible when the user has ``SeCreateSymbolicLinkPrivilege`` privilege or is running as admin. This is a restriction of windows not restic. -Restoring full security descriptors on windows is only possible when the user has +Restoring full security descriptors on Windows is only possible when the user has ``SeRestorePrivilege``, ``SeSecurityPrivilege`` and ``SeTakeOwnershipPrivilege`` -privilege or is running as admin. This is a restriction of windows not restic. +privilege or is running as admin. This is a restriction of Windows not restic. If either of these conditions are not met, only the DACL will be restored. By default, restic does not restore files as sparse. Use ``restore --sparse`` to diff --git a/internal/debug/debug.go b/internal/debug/debug.go index 62c145e1a03..7bc3291d1f1 100644 --- a/internal/debug/debug.go +++ b/internal/debug/debug.go @@ -8,8 +8,6 @@ import ( "path/filepath" "runtime" "strings" - - "github.com/restic/restic/internal/fs" ) var opts struct { @@ -46,7 +44,7 @@ func initDebugLogger() { fmt.Fprintf(os.Stderr, "debug log file %v\n", debugfile) - f, err := fs.OpenFile(debugfile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + f, err := os.OpenFile(debugfile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) if err != nil { fmt.Fprintf(os.Stderr, "unable to open debug log file: %v\n", err) os.Exit(2) diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index 9d53b39748d..d7f2152b194 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -9,7 +9,7 @@ import ( "unicode/utf16" "unsafe" - "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/debug" "golang.org/x/sys/windows" ) @@ -26,9 +26,7 @@ var ( // SeTakeOwnershipPrivilege allows the application to take ownership of files and directories, regardless of the permissions set on them. SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" - backupPrivilegeError error - restorePrivilegeError error - lowerPrivileges bool + lowerPrivileges bool ) // Flags for backup and restore with admin permissions @@ -49,14 +47,14 @@ func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err var sd *windows.SECURITY_DESCRIPTOR if lowerPrivileges { - sd, err = getNamedSecurityInfoLow(sd, err, filePath) + sd, err = getNamedSecurityInfoLow(filePath) } else { - sd, err = getNamedSecurityInfoHigh(sd, err, filePath) + sd, err = getNamedSecurityInfoHigh(filePath) } if err != nil { if isHandlePrivilegeNotHeldError(err) { lowerPrivileges = true - sd, err = getNamedSecurityInfoLow(sd, err, filePath) + sd, err = getNamedSecurityInfoLow(filePath) if err != nil { return nil, fmt.Errorf("get low-level named security info failed with: %w", err) } @@ -128,12 +126,12 @@ func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { } // getNamedSecurityInfoHigh gets the higher level SecurityDescriptor which requires admin permissions. -func getNamedSecurityInfoHigh(sd *windows.SECURITY_DESCRIPTOR, err error, filePath string) (*windows.SECURITY_DESCRIPTOR, error) { +func getNamedSecurityInfoHigh(filePath string) (*windows.SECURITY_DESCRIPTOR, error) { return windows.GetNamedSecurityInfo(filePath, windows.SE_FILE_OBJECT, highSecurityFlags) } // getNamedSecurityInfoLow gets the lower level SecurityDescriptor which requires no admin permissions. -func getNamedSecurityInfoLow(sd *windows.SECURITY_DESCRIPTOR, err error, filePath string) (*windows.SECURITY_DESCRIPTOR, error) { +func getNamedSecurityInfoLow(filePath string) (*windows.SECURITY_DESCRIPTOR, error) { return windows.GetNamedSecurityInfo(filePath, windows.SE_FILE_OBJECT, lowBackupSecurityFlags) } @@ -151,7 +149,7 @@ func setNamedSecurityInfoLow(filePath string, dacl *windows.ACL) error { func enableBackupPrivilege() { err := enableProcessPrivileges([]string{SeBackupPrivilege}) if err != nil { - backupPrivilegeError = fmt.Errorf("error enabling backup privilege: %w", err) + debug.Log("error enabling backup privilege: %v", err) } } @@ -159,26 +157,10 @@ func enableBackupPrivilege() { func enableRestorePrivilege() { err := enableProcessPrivileges([]string{SeRestorePrivilege, SeSecurityPrivilege, SeTakeOwnershipPrivilege}) if err != nil { - restorePrivilegeError = fmt.Errorf("error enabling restore/security privilege: %w", err) + debug.Log("error enabling restore/security privilege: %v", err) } } -// DisableBackupPrivileges disables privileges that are needed for backup operations. -// They may be reenabled if GetSecurityDescriptor is called again. -func DisableBackupPrivileges() error { - //Reset the once so that backup privileges can be enabled again if needed. - onceBackup = sync.Once{} - return enableDisableProcessPrivilege([]string{SeBackupPrivilege}, 0) -} - -// DisableRestorePrivileges disables privileges that are needed for restore operations. -// They may be reenabled if SetSecurityDescriptor is called again. -func DisableRestorePrivileges() error { - //Reset the once so that restore privileges can be enabled again if needed. - onceRestore = sync.Once{} - return enableDisableProcessPrivilege([]string{SeRestorePrivilege, SeSecurityPrivilege}, 0) -} - // isHandlePrivilegeNotHeldError checks if the error is ERROR_PRIVILEGE_NOT_HELD func isHandlePrivilegeNotHeldError(err error) bool { // Use a type assertion to check if the error is of type syscall.Errno @@ -189,23 +171,26 @@ func isHandlePrivilegeNotHeldError(err error) bool { return false } -// IsAdmin checks if current user is an administrator. -func IsAdmin() (isAdmin bool, err error) { - var sid *windows.SID - err = windows.AllocateAndInitializeSid(&windows.SECURITY_NT_AUTHORITY, 2, windows.SECURITY_BUILTIN_DOMAIN_RID, windows.DOMAIN_ALIAS_RID_ADMINS, - 0, 0, 0, 0, 0, 0, &sid) - if err != nil { - return false, errors.Errorf("sid error: %s", err) - } - token := windows.Token(0) - member, err := token.IsMember(sid) - if err != nil { - return false, errors.Errorf("token membership error: %s", err) +// SecurityDescriptorBytesToStruct converts the security descriptor bytes representation +// into a pointer to windows SECURITY_DESCRIPTOR. +func SecurityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, error) { + if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { + return nil, fmt.Errorf("securityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) } - return member, nil + s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) + return s, nil +} + +// securityDescriptorStructToBytes converts the pointer to windows SECURITY_DESCRIPTOR +// into a security descriptor bytes representation. +func securityDescriptorStructToBytes(sd *windows.SECURITY_DESCRIPTOR) ([]byte, error) { + b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) + return b, nil } -// The code below was adapted from github.com/Microsoft/go-winio under MIT license. +// The code below was adapted from +// https://github.com/microsoft/go-winio/blob/3c9576c9346a1892dee136329e7e15309e82fb4f/privilege.go +// under MIT license. // The MIT License (MIT) @@ -262,23 +247,6 @@ type PrivilegeError struct { privileges []uint64 } -// SecurityDescriptorBytesToStruct converts the security descriptor bytes representation -// into a pointer to windows SECURITY_DESCRIPTOR. -func SecurityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, error) { - if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { - return nil, fmt.Errorf("securityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) - } - s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) - return s, nil -} - -// securityDescriptorStructToBytes converts the pointer to windows SECURITY_DESCRIPTOR -// into a security descriptor bytes representation. -func securityDescriptorStructToBytes(sd *windows.SECURITY_DESCRIPTOR) ([]byte, error) { - b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) - return b, nil -} - // Error returns the string message for the error. func (e *PrivilegeError) Error() string { s := "Could not enable privilege " @@ -293,12 +261,6 @@ func (e *PrivilegeError) Error() string { s += getPrivilegeName(p) s += `"` } - if backupPrivilegeError != nil { - s += " backupPrivilegeError:" + backupPrivilegeError.Error() - } - if restorePrivilegeError != nil { - s += " restorePrivilegeError:" + restorePrivilegeError.Error() - } return s } diff --git a/internal/fs/sd_windows_test.go b/internal/fs/sd_windows_test.go index e4e37cb4a6e..e78241ed356 100644 --- a/internal/fs/sd_windows_test.go +++ b/internal/fs/sd_windows_test.go @@ -13,7 +13,7 @@ import ( "github.com/restic/restic/internal/test" ) -func Test_SetGetFileSecurityDescriptors(t *testing.T) { +func TestSetGetFileSecurityDescriptors(t *testing.T) { tempDir := t.TempDir() testfilePath := filepath.Join(tempDir, "testfile.txt") // create temp file @@ -31,7 +31,7 @@ func Test_SetGetFileSecurityDescriptors(t *testing.T) { testSecurityDescriptors(t, TestFileSDs, testfilePath) } -func Test_SetGetFolderSecurityDescriptors(t *testing.T) { +func TestSetGetFolderSecurityDescriptors(t *testing.T) { tempDir := t.TempDir() testfolderPath := filepath.Join(tempDir, "testfolder") // create temp folder diff --git a/internal/fs/sd_windows_test_helpers.go b/internal/fs/sd_windows_test_helpers.go index 8774087963e..8b3be5fd7c8 100644 --- a/internal/fs/sd_windows_test_helpers.go +++ b/internal/fs/sd_windows_test_helpers.go @@ -23,6 +23,23 @@ var ( } ) +// IsAdmin checks if current user is an administrator. +func IsAdmin() (isAdmin bool, err error) { + var sid *windows.SID + err = windows.AllocateAndInitializeSid(&windows.SECURITY_NT_AUTHORITY, 2, windows.SECURITY_BUILTIN_DOMAIN_RID, windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, &sid) + if err != nil { + return false, errors.Errorf("sid error: %s", err) + } + windows.GetCurrentProcessToken() + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false, errors.Errorf("token membership error: %s", err) + } + return member, nil +} + // CompareSecurityDescriptors runs tests for comparing 2 security descriptors in []byte format. func CompareSecurityDescriptors(t *testing.T, testPath string, sdInputBytes, sdOutputBytes []byte) { sdInput, err := SecurityDescriptorBytesToStruct(sdInputBytes) diff --git a/internal/restic/node.go b/internal/restic/node.go index 1e7e5d68eff..807ee0c0f33 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -721,7 +721,7 @@ func (node *Node) fillExtra(path string, fi os.FileInfo, ignoreXattrListError bo allowExtended, err := node.fillGenericAttributes(path, fi, stat) if allowExtended { // Skip processing ExtendedAttributes if allowExtended is false. - err = errors.CombineErrors(err, node.fillExtendedAttributes(path)) + err = errors.CombineErrors(err, node.fillExtendedAttributes(path, ignoreXattrListError)) } return err } diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 043a050918f..0c6d3775e3f 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -24,7 +24,7 @@ type WindowsAttributes struct { // FileAttributes is used for storing file attributes for windows files. FileAttributes *uint32 `generic:"file_attributes"` // SecurityDescriptor is used for storing security descriptors which includes - // owner, group, discretionary access control list (DACL), system access control list (SACL)) + // owner, group, discretionary access control list (DACL), system access control list (SACL) SecurityDescriptor *[]byte `generic:"security_descriptor"` } From 672f6cd776ae9738b1f3bd1404a2d1289ff135d6 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Mon, 29 Apr 2024 17:29:51 -0600 Subject: [PATCH 164/893] Fix review comments for privileges and security flags --- internal/fs/sd_windows.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index d7f2152b194..ccd20392a11 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -30,10 +30,10 @@ var ( ) // Flags for backup and restore with admin permissions -var highSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.SACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.BACKUP_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION | windows.PROTECTED_SACL_SECURITY_INFORMATION +var highSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.SACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.BACKUP_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION | windows.PROTECTED_SACL_SECURITY_INFORMATION | windows.UNPROTECTED_DACL_SECURITY_INFORMATION | windows.UNPROTECTED_SACL_SECURITY_INFORMATION // Flags for backup without admin permissions. If there are no admin permissions, only the current user's owner, group and DACL will be backed up. -var lowBackupSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION +var lowBackupSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION | windows.GROUP_SECURITY_INFORMATION | windows.DACL_SECURITY_INFORMATION | windows.LABEL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.SCOPE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION | windows.UNPROTECTED_DACL_SECURITY_INFORMATION // Flags for restore without admin permissions. If there are no admin permissions, only the DACL from the SD can be restored and owner and group will be set based on the current user. var lowRestoreSecurityFlags windows.SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION @@ -52,7 +52,7 @@ func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err sd, err = getNamedSecurityInfoHigh(filePath) } if err != nil { - if isHandlePrivilegeNotHeldError(err) { + if !lowerPrivileges && isHandlePrivilegeNotHeldError(err) { lowerPrivileges = true sd, err = getNamedSecurityInfoLow(filePath) if err != nil { From 3bb88e83070627fdabde44d91c763345261be0c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 May 2024 01:02:11 +0000 Subject: [PATCH 165/893] build(deps): bump golang.org/x/net from 0.23.0 to 0.24.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.23.0 to 0.24.0. - [Commits](https://github.com/golang/net/compare/v0.23.0...v0.24.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index c928b4a97b2..17c172f26c1 100644 --- a/go.mod +++ b/go.mod @@ -26,12 +26,12 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.21.0 - golang.org/x/net v0.23.0 + golang.org/x/crypto v0.22.0 + golang.org/x/net v0.24.0 golang.org/x/oauth2 v0.18.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.18.0 - golang.org/x/term v0.18.0 + golang.org/x/sys v0.19.0 + golang.org/x/term v0.19.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 google.golang.org/api v0.170.0 diff --git a/go.sum b/go.sum index 1a7dc1a7967..98bda7818de 100644 --- a/go.sum +++ b/go.sum @@ -206,8 +206,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -227,8 +227,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= @@ -255,14 +255,14 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= From 18a6d6b408703b3a71efca2f8d1ff5d7b0cff75a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 May 2024 01:02:22 +0000 Subject: [PATCH 166/893] build(deps): bump github.com/klauspost/compress from 1.17.7 to 1.17.8 Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.17.7 to 1.17.8. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.17.7...v1.17.8) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c928b4a97b2..bbe98c79935 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/go-ole/go-ole v1.3.0 github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/klauspost/compress v1.17.7 + github.com/klauspost/compress v1.17.8 github.com/minio/minio-go/v7 v7.0.66 github.com/minio/sha256-simd v1.0.1 github.com/ncw/swift/v2 v2.0.2 diff --git a/go.sum b/go.sum index 1a7dc1a7967..b1cb7a87838 100644 --- a/go.sum +++ b/go.sum @@ -114,8 +114,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= From 502e5867a588f6d439d82b18bd81e397d77fb90a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 May 2024 01:02:39 +0000 Subject: [PATCH 167/893] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/storage/azblob Bumps [github.com/Azure/azure-sdk-for-go/sdk/storage/azblob](https://github.com/Azure/azure-sdk-for-go) from 1.3.1 to 1.3.2. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.3.1...sdk/storage/azblob/v1.3.2) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/storage/azblob dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index c928b4a97b2..a2e8e2aa543 100644 --- a/go.mod +++ b/go.mod @@ -2,9 +2,9 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.40.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.2.0 github.com/cenkalti/backoff/v4 v4.2.1 diff --git a/go.sum b/go.sum index 1a7dc1a7967..17a179bbe42 100644 --- a/go.sum +++ b/go.sum @@ -9,15 +9,15 @@ cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s= From 30609ae6b252f8980ef2f860194f683425bdb7af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 May 2024 01:45:43 +0000 Subject: [PATCH 168/893] build(deps): bump golangci/golangci-lint-action from 4 to 5 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 4 to 5. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v4...v5) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index b4b34e62f90..43480f9e257 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -261,7 +261,7 @@ jobs: uses: actions/checkout@v4 - name: golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v5 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. version: v1.57.1 From 676f0dc60d01bff28c3a3183c8db06fa391f0bc7 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 1 May 2024 16:28:57 +0200 Subject: [PATCH 169/893] add changelog --- changelog/unreleased/issue-4627 | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 changelog/unreleased/issue-4627 diff --git a/changelog/unreleased/issue-4627 b/changelog/unreleased/issue-4627 new file mode 100644 index 00000000000..626b341eacf --- /dev/null +++ b/changelog/unreleased/issue-4627 @@ -0,0 +1,8 @@ +Enhancement: Improve reliability of backend operations + +Restic now downloads pack files in large chunks instead of using a streaming +download. This prevents failures due to interrupted streams. The `restore` +command now also retries downloading individual blobs that cannot be retrieved. + +https://github.com/restic/restic/issues/4627 +https://github.com/restic/restic/pull/4605 From d4477a5a996fee329646dbdd8c0cb33f507cce25 Mon Sep 17 00:00:00 2001 From: flow-c <11755978+flow-c@users.noreply.github.com> Date: Sat, 4 May 2024 09:32:25 +0200 Subject: [PATCH 170/893] Update 060_forget.rst Replace deprecated `-1` with `unlimited` in calendar-related `--keep-*` options --- doc/060_forget.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/060_forget.rst b/doc/060_forget.rst index caeb6313a13..113098d0c36 100644 --- a/doc/060_forget.rst +++ b/doc/060_forget.rst @@ -205,7 +205,7 @@ The ``forget`` command accepts the following policy options: natural time boundaries and *not* relative to when you run ``forget``. Weeks are Monday 00:00 to Sunday 23:59, days 00:00 to 23:59, hours :00 to :59, etc. They also only count hours/days/weeks/etc which have one or more snapshots. - A value of ``-1`` will be interpreted as "forever", i.e. "keep all". + A value of ``unlimited`` will be interpreted as "forever", i.e. "keep all". .. note:: All duration related options (``--keep-{within-,}*``) ignore snapshots with a timestamp in the future (relative to when the ``forget`` command is From 45509eafc8c0a08301bd734926f87030dadbd233 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 4 May 2024 18:57:37 +0200 Subject: [PATCH 171/893] dump: load blobs of a file from repository in parallel --- internal/dump/common.go | 89 +++++++++++++++++++++++++++++++++-------- 1 file changed, 72 insertions(+), 17 deletions(-) diff --git a/internal/dump/common.go b/internal/dump/common.go index 0163288357d..116762b5a03 100644 --- a/internal/dump/common.go +++ b/internal/dump/common.go @@ -9,6 +9,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/walker" + "golang.org/x/sync/errgroup" ) // A Dumper writes trees and files from a repository to a Writer @@ -16,11 +17,11 @@ import ( type Dumper struct { cache *bloblru.Cache format string - repo restic.BlobLoader + repo restic.Loader w io.Writer } -func New(format string, repo restic.BlobLoader, w io.Writer) *Dumper { +func New(format string, repo restic.Loader, w io.Writer) *Dumper { return &Dumper{ cache: bloblru.New(64 << 20), format: format, @@ -103,27 +104,81 @@ func (d *Dumper) WriteNode(ctx context.Context, node *restic.Node) error { } func (d *Dumper) writeNode(ctx context.Context, w io.Writer, node *restic.Node) error { - var ( - buf []byte - err error - ) - for _, id := range node.Content { - blob, ok := d.cache.Get(id) - if !ok { - blob, err = d.repo.LoadBlob(ctx, restic.DataBlob, id, buf) - if err != nil { - return err + type loadTask struct { + id restic.ID + out chan<- []byte + } + type writeTask struct { + data <-chan []byte + } + + loaderCh := make(chan loadTask) + // per worker: allows for one blob that gets download + one blob thats queue for writing + writerCh := make(chan writeTask, d.repo.Connections()*2) + + wg, ctx := errgroup.WithContext(ctx) + + wg.Go(func() error { + defer close(loaderCh) + defer close(writerCh) + for _, id := range node.Content { + // non-blocking blob handover to allow the loader to load the next blob + // while the old one is still written + ch := make(chan []byte, 1) + select { + case loaderCh <- loadTask{id: id, out: ch}: + case <-ctx.Done(): + return ctx.Err() } - buf = d.cache.Add(id, blob) // Reuse evicted buffer. + select { + case writerCh <- writeTask{data: ch}: + case <-ctx.Done(): + return ctx.Err() + } } + return nil + }) + + for i := uint(0); i < d.repo.Connections(); i++ { + wg.Go(func() error { + for task := range loaderCh { + var err error + blob, ok := d.cache.Get(task.id) + if !ok { + blob, err = d.repo.LoadBlob(ctx, restic.DataBlob, task.id, nil) + if err != nil { + return err + } + + d.cache.Add(task.id, blob) + } + + select { + case task.out <- blob: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + }) + } - if _, err := w.Write(blob); err != nil { - return errors.Wrap(err, "Write") + wg.Go(func() error { + for result := range writerCh { + select { + case data := <-result.data: + if _, err := w.Write(data); err != nil { + return errors.Wrap(err, "Write") + } + case <-ctx.Done(): + return ctx.Err() + } } - } + return nil + }) - return nil + return wg.Wait() } // IsDir checks if the given node is a directory. From bd03af2febc5e223e8edf4fade1638d43720329a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 5 May 2024 11:37:35 +0200 Subject: [PATCH 172/893] dump: add GetOrCompute to bloblru cache --- internal/bloblru/cache.go | 48 +++++++++++++++++++++++++++++++++++++-- internal/dump/common.go | 14 ++++-------- 2 files changed, 51 insertions(+), 11 deletions(-) diff --git a/internal/bloblru/cache.go b/internal/bloblru/cache.go index 302ecc769b9..4477e37a94b 100644 --- a/internal/bloblru/cache.go +++ b/internal/bloblru/cache.go @@ -20,13 +20,15 @@ type Cache struct { c *simplelru.LRU[restic.ID, []byte] free, size int // Current and max capacity, in bytes. + inProgress map[restic.ID]chan struct{} } // New constructs a blob cache that stores at most size bytes worth of blobs. func New(size int) *Cache { c := &Cache{ - free: size, - size: size, + free: size, + size: size, + inProgress: make(map[restic.ID]chan struct{}), } // NewLRU wants us to specify some max. number of entries, else it errors. @@ -85,6 +87,48 @@ func (c *Cache) Get(id restic.ID) ([]byte, bool) { return blob, ok } +func (c *Cache) GetOrCompute(id restic.ID, compute func() ([]byte, error)) ([]byte, error) { + // check if already cached + blob, ok := c.Get(id) + if ok { + return blob, nil + } + + // check for parallel download or start our own + finish := make(chan struct{}) + c.mu.Lock() + waitForResult, isDownloading := c.inProgress[id] + if !isDownloading { + c.inProgress[id] = finish + + // remove progress channel once finished here + defer func() { + c.mu.Lock() + delete(c.inProgress, id) + c.mu.Unlock() + close(finish) + }() + } + c.mu.Unlock() + + if isDownloading { + // wait for result of parallel download + <-waitForResult + blob, ok := c.Get(id) + if ok { + return blob, nil + } + } + + // download it + blob, err := compute() + if err == nil { + c.Add(id, blob) + } + + return blob, err +} + func (c *Cache) evict(key restic.ID, blob []byte) { debug.Log("bloblru.Cache: evict %v, %d bytes", key, cap(blob)) c.free += cap(blob) + overhead diff --git a/internal/dump/common.go b/internal/dump/common.go index 116762b5a03..62145ba9cd8 100644 --- a/internal/dump/common.go +++ b/internal/dump/common.go @@ -143,15 +143,11 @@ func (d *Dumper) writeNode(ctx context.Context, w io.Writer, node *restic.Node) for i := uint(0); i < d.repo.Connections(); i++ { wg.Go(func() error { for task := range loaderCh { - var err error - blob, ok := d.cache.Get(task.id) - if !ok { - blob, err = d.repo.LoadBlob(ctx, restic.DataBlob, task.id, nil) - if err != nil { - return err - } - - d.cache.Add(task.id, blob) + blob, err := d.cache.GetOrCompute(task.id, func() ([]byte, error) { + return d.repo.LoadBlob(ctx, restic.DataBlob, task.id, nil) + }) + if err != nil { + return err } select { From 7cce667f921da5735f8c4a04540208599494a2bd Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 5 May 2024 11:38:17 +0200 Subject: [PATCH 173/893] fuse: switch to use bloblru.GetOrCompute --- internal/fuse/file.go | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/internal/fuse/file.go b/internal/fuse/file.go index 6152c912271..5190febbb55 100644 --- a/internal/fuse/file.go +++ b/internal/fuse/file.go @@ -96,20 +96,14 @@ func (f *file) Open(_ context.Context, _ *fuse.OpenRequest, _ *fuse.OpenResponse } func (f *openFile) getBlobAt(ctx context.Context, i int) (blob []byte, err error) { - - blob, ok := f.root.blobCache.Get(f.node.Content[i]) - if ok { - return blob, nil - } - - blob, err = f.root.repo.LoadBlob(ctx, restic.DataBlob, f.node.Content[i], nil) + blob, err = f.root.blobCache.GetOrCompute(f.node.Content[i], func() ([]byte, error) { + return f.root.repo.LoadBlob(ctx, restic.DataBlob, f.node.Content[i], nil) + }) if err != nil { debug.Log("LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err) return nil, unwrapCtxCanceled(err) } - f.root.blobCache.Add(f.node.Content[i], blob) - return blob, nil } From 4d55a62ada22931ce7e54ef133d3472c40e81148 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 5 May 2024 12:00:25 +0200 Subject: [PATCH 174/893] bloblru: add test for GetOrCompute --- internal/bloblru/cache_test.go | 67 ++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/internal/bloblru/cache_test.go b/internal/bloblru/cache_test.go index aa6f4465c43..b2becd2560a 100644 --- a/internal/bloblru/cache_test.go +++ b/internal/bloblru/cache_test.go @@ -1,11 +1,14 @@ package bloblru import ( + "context" + "fmt" "math/rand" "testing" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "golang.org/x/sync/errgroup" ) func TestCache(t *testing.T) { @@ -52,6 +55,70 @@ func TestCache(t *testing.T) { rtest.Equals(t, cacheSize, c.free) } +func TestCacheGetOrCompute(t *testing.T) { + var id1, id2 restic.ID + id1[0] = 1 + id2[0] = 2 + + const ( + kiB = 1 << 10 + cacheSize = 64*kiB + 3*overhead + ) + + c := New(cacheSize) + + e := fmt.Errorf("broken") + _, err := c.GetOrCompute(id1, func() ([]byte, error) { + return nil, e + }) + rtest.Equals(t, e, err, "expected error was not returned") + + // fill buffer + data1 := make([]byte, 10*kiB) + blob, err := c.GetOrCompute(id1, func() ([]byte, error) { + return data1, nil + }) + rtest.OK(t, err) + rtest.Equals(t, &data1[0], &blob[0], "wrong buffer returend") + + // now the buffer should be returned without calling the compute function + blob, err = c.GetOrCompute(id1, func() ([]byte, error) { + return nil, e + }) + rtest.OK(t, err) + rtest.Equals(t, &data1[0], &blob[0], "wrong buffer returend") + + // check concurrency + wg, _ := errgroup.WithContext(context.TODO()) + wait := make(chan struct{}) + calls := make(chan struct{}, 10) + + // start a bunch of blocking goroutines + for i := 0; i < 10; i++ { + wg.Go(func() error { + buf, err := c.GetOrCompute(id2, func() ([]byte, error) { + // block to ensure that multiple requests are waiting in parallel + <-wait + calls <- struct{}{} + return make([]byte, 42), nil + }) + if len(buf) != 42 { + return fmt.Errorf("wrong buffer") + } + return err + }) + } + + close(wait) + rtest.OK(t, wg.Wait()) + close(calls) + count := 0 + for range calls { + count++ + } + rtest.Equals(t, 1, count, "expected exactly one call of the compute function") +} + func BenchmarkAdd(b *testing.B) { const ( MiB = 1 << 20 From e184538ddf9f4757cce6017ca3a0291cca2dd601 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 5 May 2024 12:12:21 +0200 Subject: [PATCH 175/893] dump: add changelog --- changelog/unreleased/pull-4796 | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 changelog/unreleased/pull-4796 diff --git a/changelog/unreleased/pull-4796 b/changelog/unreleased/pull-4796 new file mode 100644 index 00000000000..319b9ccdcec --- /dev/null +++ b/changelog/unreleased/pull-4796 @@ -0,0 +1,8 @@ +Enhancement: Improve `dump` performance for large files + +The `dump` command now retrieves the data chunks for a file in parallel. This +improves the download performance by up to the configured number of parallel +backend connections. + +https://github.com/restic/restic/issues/3406 +https://github.com/restic/restic/pull/4796 From a4fd1b91e58294b93fa590bdf25f6cf8e2340ee3 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Mon, 6 May 2024 16:54:08 -0600 Subject: [PATCH 176/893] Fix review comments Change lowerPrivileges from bool to atomic.Bool. Add missing cleanup from upstream go-winio. Add handling for ERROR_NOT_ALL_ASSIGNED warning. --- internal/fs/sd_windows.go | 42 +++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index ccd20392a11..cc44433c376 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "fmt" "sync" + "sync/atomic" "syscall" "unicode/utf16" "unsafe" @@ -26,7 +27,7 @@ var ( // SeTakeOwnershipPrivilege allows the application to take ownership of files and directories, regardless of the permissions set on them. SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" - lowerPrivileges bool + lowerPrivileges atomic.Bool ) // Flags for backup and restore with admin permissions @@ -46,14 +47,15 @@ func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err var sd *windows.SECURITY_DESCRIPTOR - if lowerPrivileges { + if lowerPrivileges.Load() { sd, err = getNamedSecurityInfoLow(filePath) } else { sd, err = getNamedSecurityInfoHigh(filePath) } if err != nil { - if !lowerPrivileges && isHandlePrivilegeNotHeldError(err) { - lowerPrivileges = true + if !lowerPrivileges.Load() && isHandlePrivilegeNotHeldError(err) { + // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. + lowerPrivileges.Store(true) sd, err = getNamedSecurityInfoLow(filePath) if err != nil { return nil, fmt.Errorf("get low-level named security info failed with: %w", err) @@ -104,16 +106,16 @@ func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { sacl = nil } - if lowerPrivileges { + if lowerPrivileges.Load() { err = setNamedSecurityInfoLow(filePath, dacl) } else { err = setNamedSecurityInfoHigh(filePath, owner, group, dacl, sacl) } if err != nil { - if isHandlePrivilegeNotHeldError(err) { + if !lowerPrivileges.Load() && isHandlePrivilegeNotHeldError(err) { // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. - lowerPrivileges = true + lowerPrivileges.Store(true) err = setNamedSecurityInfoLow(filePath, dacl) if err != nil { return fmt.Errorf("set low-level named security info failed with: %w", err) @@ -231,7 +233,7 @@ const ( SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED //revive:disable-next-line:var-naming ALL_CAPS - ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED + ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED ) var ( @@ -287,11 +289,6 @@ func enableProcessPrivileges(names []string) error { return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) } -// DisableProcessPrivileges disables privileges globally for the process. -func DisableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, 0) -} - func enableDisableProcessPrivilege(names []string, action uint32) error { privileges, err := mapPrivileges(names) if err != nil { @@ -325,7 +322,7 @@ func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) e return err } if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno - return &PrivilegeError{privileges} + debug.Log("Not all requested privileges were fully set: %v. AdjustTokenPrivileges returned warning: %v", privileges, err) } return nil } @@ -349,6 +346,15 @@ func getPrivilegeName(luid uint64) string { return string(utf16.Decode(displayNameBuffer[:displayBufSize])) } +// The functions below are copied over from https://github.com/microsoft/go-winio/blob/main/zsyscall_windows.go + +// This windows api always returns an error even in case of success, warnings (partial success) and error cases. +// +// Full success - When we call this with admin permissions, it returns DNS_ERROR_RCODE_NO_ERROR (0). +// This gets translated to errErrorEinval and ultimately in adjustTokenPrivileges, it gets ignored. +// +// Partial success - If we call this api without admin privileges, privileges related to SACLs do not get set and +// though the api returns success, it returns an error - golang.org/x/sys/windows.ERROR_NOT_ALL_ASSIGNED (1300) func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { var _p0 uint32 if releaseAll { @@ -356,7 +362,7 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou } r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) success = r0 != 0 - if !success { + if true { err = errnoErr(e1) } return @@ -372,7 +378,7 @@ func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, } func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageID *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageID)), 0) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageID))) if r1 == 0 { err = errnoErr(e1) } @@ -389,7 +395,7 @@ func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size * } func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -418,6 +424,8 @@ func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err return } +// The code below was copied from https://github.com/microsoft/go-winio/blob/main/tools/mkwinsyscall/mkwinsyscall.go + // errnoErr returns common boxed Errno values, to prevent // allocations at runtime. func errnoErr(e syscall.Errno) error { From 0987c731ec7abf2f65fd42ea5e362c14edd96bf6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 1 May 2024 20:03:31 +0200 Subject: [PATCH 177/893] backend: configure protocol-level connection health checks This should detect a connection that is stuck for more than 2 minutes. --- internal/backend/http_transport.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/internal/backend/http_transport.go b/internal/backend/http_transport.go index 19b20dc6af6..354611e0756 100644 --- a/internal/backend/http_transport.go +++ b/internal/backend/http_transport.go @@ -13,6 +13,7 @@ import ( "github.com/peterbourgon/unixtransport" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "golang.org/x/net/http2" ) // TransportOptions collects various options which can be set for an HTTP based @@ -74,7 +75,6 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { KeepAlive: 30 * time.Second, DualStack: true, }).DialContext, - ForceAttemptHTTP2: true, MaxIdleConns: 100, MaxIdleConnsPerHost: 100, IdleConnTimeout: 90 * time.Second, @@ -83,6 +83,17 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { TLSClientConfig: &tls.Config{}, } + // ensure that http2 connections are closed if they are broken + h2, err := http2.ConfigureTransports(tr) + if err != nil { + panic(err) + } + if feature.Flag.Enabled(feature.HTTPTimeouts) { + h2.WriteByteTimeout = 120 * time.Second + h2.ReadIdleTimeout = 60 * time.Second + h2.PingTimeout = 60 * time.Second + } + unixtransport.Register(tr) if opts.InsecureTLS { From 877867023271c933948bf3fa8cfe1410c2d39cc8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 1 May 2024 20:06:54 +0200 Subject: [PATCH 178/893] backend: cancel stuck http requests requests that make no upload or download progress within a timeout are canceled. --- internal/backend/http_transport.go | 8 +- internal/backend/watchdog_roundtriper.go | 104 +++++++++++++++++++++++ internal/feature/registry.go | 2 + 3 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 internal/backend/watchdog_roundtriper.go diff --git a/internal/backend/http_transport.go b/internal/backend/http_transport.go index 354611e0756..09eb3cf16ea 100644 --- a/internal/backend/http_transport.go +++ b/internal/backend/http_transport.go @@ -13,6 +13,7 @@ import ( "github.com/peterbourgon/unixtransport" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "golang.org/x/net/http2" ) @@ -130,6 +131,11 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { tr.TLSClientConfig.RootCAs = pool } + rt := http.RoundTripper(tr) + if feature.Flag.Enabled(feature.HTTPTimeouts) { + rt = newWatchdogRoundtripper(rt, 120*time.Second, 128*1024) + } + // wrap in the debug round tripper (if active) - return debug.RoundTripper(tr), nil + return debug.RoundTripper(rt), nil } diff --git a/internal/backend/watchdog_roundtriper.go b/internal/backend/watchdog_roundtriper.go new file mode 100644 index 00000000000..fb7863002c8 --- /dev/null +++ b/internal/backend/watchdog_roundtriper.go @@ -0,0 +1,104 @@ +package backend + +import ( + "context" + "io" + "net/http" + "time" +) + +// watchdogRoundtripper cancels an http request if an upload or download did not make progress +// within timeout. The time between fully sending the request and receiving an response is also +// limited by this timeout. This ensures that stuck requests are cancelled after some time. +// +// The roundtriper makes the assumption that the upload and download happen continuously. In particular, +// the caller must not make long pauses between individual read requests from the response body. +type watchdogRoundtripper struct { + rt http.RoundTripper + timeout time.Duration + chunkSize int +} + +var _ http.RoundTripper = &watchdogRoundtripper{} + +func newWatchdogRoundtripper(rt http.RoundTripper, timeout time.Duration, chunkSize int) *watchdogRoundtripper { + return &watchdogRoundtripper{ + rt: rt, + timeout: timeout, + chunkSize: chunkSize, + } +} + +func (w *watchdogRoundtripper) RoundTrip(req *http.Request) (*http.Response, error) { + timer := time.NewTimer(w.timeout) + ctx, cancel := context.WithCancel(req.Context()) + + // cancel context if timer expires + go func() { + defer timer.Stop() + select { + case <-timer.C: + cancel() + case <-ctx.Done(): + } + }() + + kick := func() { + timer.Reset(w.timeout) + } + + req = req.Clone(ctx) + if req.Body != nil { + // kick watchdog timer as long as uploading makes progress + req.Body = newWatchdogReadCloser(req.Body, w.chunkSize, kick, nil) + } + + resp, err := w.rt.RoundTrip(req) + if err != nil { + return nil, err + } + + // kick watchdog timer as long as downloading makes progress + // cancel context to stop goroutine once response body is closed + resp.Body = newWatchdogReadCloser(resp.Body, w.chunkSize, kick, cancel) + return resp, nil +} + +func newWatchdogReadCloser(rc io.ReadCloser, chunkSize int, kick func(), close func()) *watchdogReadCloser { + return &watchdogReadCloser{ + rc: rc, + chunkSize: chunkSize, + kick: kick, + close: close, + } +} + +type watchdogReadCloser struct { + rc io.ReadCloser + chunkSize int + kick func() + close func() +} + +var _ io.ReadCloser = &watchdogReadCloser{} + +func (w *watchdogReadCloser) Read(p []byte) (n int, err error) { + w.kick() + + // Read is not required to fill the whole passed in byte slice + // Thus, keep things simple and just stay within our chunkSize. + if len(p) > w.chunkSize { + p = p[:w.chunkSize] + } + n, err = w.rc.Read(p) + w.kick() + + return n, err +} + +func (w *watchdogReadCloser) Close() error { + if w.close != nil { + w.close() + } + return w.rc.Close() +} diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 2d2e45edf76..b0e4d2ed78a 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -8,6 +8,7 @@ const ( DeprecateLegacyIndex FlagName = "deprecate-legacy-index" DeprecateS3LegacyLayout FlagName = "deprecate-s3-legacy-layout" DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" + HTTPTimeouts FlagName = "http-timeouts" ) func init() { @@ -15,5 +16,6 @@ func init() { DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, DeprecateS3LegacyLayout: {Type: Beta, Description: "disable support for S3 legacy layout used up to restic 0.7.0. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your S3 repository if necessary."}, DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, + HTTPTimeouts: {Type: Beta, Description: "enforce timeouts for stuck HTTP requests."}, }) } From ebd01a467599b469e486cb08a170ab04e787bc8a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 1 May 2024 21:54:21 +0200 Subject: [PATCH 179/893] backend: add tests for watchdogRoundTripper --- internal/backend/watchdog_roundtriper_test.go | 201 ++++++++++++++++++ 1 file changed, 201 insertions(+) create mode 100644 internal/backend/watchdog_roundtriper_test.go diff --git a/internal/backend/watchdog_roundtriper_test.go b/internal/backend/watchdog_roundtriper_test.go new file mode 100644 index 00000000000..a13d670e036 --- /dev/null +++ b/internal/backend/watchdog_roundtriper_test.go @@ -0,0 +1,201 @@ +package backend + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + rtest "github.com/restic/restic/internal/test" +) + +func TestRead(t *testing.T) { + data := []byte("abcdef") + var ctr int + kick := func() { + ctr++ + } + var closed bool + onClose := func() { + closed = true + } + + wd := newWatchdogReadCloser(io.NopCloser(bytes.NewReader(data)), 1, kick, onClose) + + out, err := io.ReadAll(wd) + rtest.OK(t, err) + rtest.Equals(t, data, out, "data mismatch") + // the EOF read also triggers the kick function + rtest.Equals(t, len(data)*2+2, ctr, "unexpected number of kick calls") + + rtest.Equals(t, false, closed, "close function called too early") + rtest.OK(t, wd.Close()) + rtest.Equals(t, true, closed, "close function not called") +} + +func TestRoundtrip(t *testing.T) { + t.Parallel() + + // at the higher delay values, it takes longer to transmit the request/response body + // than the roundTripper timeout + for _, delay := range []int{0, 1, 10, 20} { + t.Run(fmt.Sprintf("%v", delay), func(t *testing.T) { + msg := []byte("ping-pong-data") + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + data, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + + // slowly send the reply + for len(data) >= 2 { + _, _ = w.Write(data[:2]) + w.(http.Flusher).Flush() + data = data[2:] + time.Sleep(time.Duration(delay) * time.Millisecond) + } + _, _ = w.Write(data) + })) + defer srv.Close() + + rt := newWatchdogRoundtripper(http.DefaultTransport, 50*time.Millisecond, 2) + req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(newSlowReader(bytes.NewReader(msg), time.Duration(delay)*time.Millisecond))) + rtest.OK(t, err) + + resp, err := rt.RoundTrip(req) + rtest.OK(t, err) + rtest.Equals(t, 200, resp.StatusCode, "unexpected status code") + + response, err := io.ReadAll(resp.Body) + rtest.OK(t, err) + rtest.Equals(t, msg, response, "unexpected response") + + rtest.OK(t, resp.Body.Close()) + }) + } +} + +func TestCanceledRoundtrip(t *testing.T) { + rt := newWatchdogRoundtripper(http.DefaultTransport, time.Second, 2) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + req, err := http.NewRequestWithContext(ctx, "GET", "http://some.random.url.dfdgsfg", nil) + rtest.OK(t, err) + + resp, err := rt.RoundTrip(req) + rtest.Equals(t, context.Canceled, err) + // make linter happy + if resp != nil { + rtest.OK(t, resp.Body.Close()) + } +} + +type slowReader struct { + data io.Reader + delay time.Duration +} + +func newSlowReader(data io.Reader, delay time.Duration) *slowReader { + return &slowReader{ + data: data, + delay: delay, + } +} + +func (s *slowReader) Read(p []byte) (n int, err error) { + time.Sleep(s.delay) + return s.data.Read(p) +} + +func TestUploadTimeout(t *testing.T) { + t.Parallel() + + msg := []byte("ping") + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(500) + return + } + t.Error("upload should have been canceled") + })) + defer srv.Close() + + rt := newWatchdogRoundtripper(http.DefaultTransport, 10*time.Millisecond, 1024) + req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(newSlowReader(bytes.NewReader(msg), 100*time.Millisecond))) + rtest.OK(t, err) + + resp, err := rt.RoundTrip(req) + rtest.Equals(t, context.Canceled, err) + // make linter happy + if resp != nil { + rtest.OK(t, resp.Body.Close()) + } +} + +func TestProcessingTimeout(t *testing.T) { + t.Parallel() + + msg := []byte("ping") + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(500) + return + } + time.Sleep(100 * time.Millisecond) + w.WriteHeader(200) + })) + defer srv.Close() + + rt := newWatchdogRoundtripper(http.DefaultTransport, 10*time.Millisecond, 1024) + req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(bytes.NewReader(msg))) + rtest.OK(t, err) + + resp, err := rt.RoundTrip(req) + rtest.Equals(t, context.Canceled, err) + // make linter happy + if resp != nil { + rtest.OK(t, resp.Body.Close()) + } +} + +func TestDownloadTimeout(t *testing.T) { + t.Parallel() + + msg := []byte("ping") + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + data, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + _, _ = w.Write(data[:2]) + w.(http.Flusher).Flush() + data = data[2:] + + time.Sleep(100 * time.Millisecond) + _, _ = w.Write(data) + + })) + defer srv.Close() + + rt := newWatchdogRoundtripper(http.DefaultTransport, 10*time.Millisecond, 1024) + req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(bytes.NewReader(msg))) + rtest.OK(t, err) + + resp, err := rt.RoundTrip(req) + rtest.OK(t, err) + rtest.Equals(t, 200, resp.StatusCode, "unexpected status code") + + _, err = io.ReadAll(resp.Body) + rtest.Equals(t, context.Canceled, err, "response download not canceled") + rtest.OK(t, resp.Body.Close()) +} From 3740700ddca7eaf61524462a0d56cc27d632dc4a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 1 May 2024 22:03:20 +0200 Subject: [PATCH 180/893] add http timeouts to changelog --- changelog/unreleased/issue-4627 | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/changelog/unreleased/issue-4627 b/changelog/unreleased/issue-4627 index 626b341eacf..bbc861b8e49 100644 --- a/changelog/unreleased/issue-4627 +++ b/changelog/unreleased/issue-4627 @@ -4,5 +4,14 @@ Restic now downloads pack files in large chunks instead of using a streaming download. This prevents failures due to interrupted streams. The `restore` command now also retries downloading individual blobs that cannot be retrieved. +HTTP requests that are stuck for more than two minutes while uploading or +downloading are now forcibly interrupted. This ensures that stuck requests are +retried after a short timeout. These new request timeouts can temporarily be +disabled by setting the environment variable +`RESTIC_FEATURES=http-timeouts=false`. Note that this feature flag will be +removed in the next minor restic version. + https://github.com/restic/restic/issues/4627 +https://github.com/restic/restic/issues/4193 https://github.com/restic/restic/pull/4605 +https://github.com/restic/restic/pull/4792 From d6708505b951bcb9c6b27c9f696a3a9684e39b96 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Fri, 17 May 2024 14:15:03 -0600 Subject: [PATCH 181/893] Add helpers for Windows Extended Attributes --- internal/fs/ea_windows.go | 284 +++++++++++++++++++++++++++++++++ internal/fs/ea_windows_test.go | 259 ++++++++++++++++++++++++++++++ 2 files changed, 543 insertions(+) create mode 100644 internal/fs/ea_windows.go create mode 100644 internal/fs/ea_windows_test.go diff --git a/internal/fs/ea_windows.go b/internal/fs/ea_windows.go new file mode 100644 index 00000000000..e4b23d35a6c --- /dev/null +++ b/internal/fs/ea_windows.go @@ -0,0 +1,284 @@ +//go:build windows +// +build windows + +package fs + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// The code below was adapted from https://github.com/microsoft/go-winio under MIT license. + +// The MIT License (MIT) + +// Copyright (c) 2015 Microsoft + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// The code below was copied over from https://github.com/microsoft/go-winio/blob/main/ea.go under MIT license. + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return ea, nb, err + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return ea, nb, err + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return ea, nb, err +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return eas, err +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} + +// The code below was copied over from https://github.com/microsoft/go-winio/blob/main/pipe.go under MIT license. + +type ntStatus int32 + +func (status ntStatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} + +// The code below was copied over from https://github.com/microsoft/go-winio/blob/main/zsyscall_windows.go under MIT license. + +// ioStatusBlock represents the IO_STATUS_BLOCK struct defined here: +// https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/ns-wdm-_io_status_block +type ioStatusBlock struct { + Status, Information uintptr +} + +var ( + modntdll = windows.NewLazySystemDLL("ntdll.dll") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") +) + +func rtlNtStatusToDosError(status ntStatus) (winerr error) { + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status)) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +// The code below was adapted from https://github.com/ambarve/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/ea.go +// under MIT license. + +var ( + procNtQueryEaFile = modntdll.NewProc("NtQueryEaFile") + procNtSetEaFile = modntdll.NewProc("NtSetEaFile") +) + +const ( + // noExtendedAttribsStatus is a constant value which indicates no extended attributes were found + noExtendedAttribsStatus = -1073741742 +) + +// GetFileEA retrieves the extended attributes for the file represented by `handle`. The +// `handle` must have been opened with file access flag FILE_READ_EA (0x8). +// The extended file attribute names in windows are case-insensitive and when fetching +// the attributes the names are generally returned in UPPER case. +func GetFileEA(handle windows.Handle) ([]ExtendedAttribute, error) { + // default buffer size to start with + bufLen := 1024 + buf := make([]byte, bufLen) + var iosb ioStatusBlock + // keep increasing the buffer size until it is large enough + for { + status := getFileEA(handle, &iosb, &buf[0], uint32(bufLen), false, 0, 0, nil, true) + + if status == noExtendedAttribsStatus { + //If status is -1073741742, no extended attributes were found + return nil, nil + } + err := status.Err() + if err != nil { + // convert ntstatus code to windows error + if err == windows.ERROR_INSUFFICIENT_BUFFER || err == windows.ERROR_MORE_DATA { + bufLen *= 2 + buf = make([]byte, bufLen) + continue + } + return nil, fmt.Errorf("get file EA failed with: %w", err) + } + break + } + return DecodeExtendedAttributes(buf) +} + +// SetFileEA sets the extended attributes for the file represented by `handle`. The +// handle must have been opened with the file access flag FILE_WRITE_EA(0x10). +func SetFileEA(handle windows.Handle, attrs []ExtendedAttribute) error { + encodedEA, err := EncodeExtendedAttributes(attrs) + if err != nil { + return fmt.Errorf("failed to encoded extended attributes: %w", err) + } + + var iosb ioStatusBlock + + return setFileEA(handle, &iosb, &encodedEA[0], uint32(len(encodedEA))).Err() +} + +// The code below was adapted from https://github.com/ambarve/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/zsyscall_windows.go +// under MIT license. + +func getFileEA(handle windows.Handle, iosb *ioStatusBlock, buf *uint8, bufLen uint32, returnSingleEntry bool, eaList uintptr, eaListLen uint32, eaIndex *uint32, restartScan bool) (status ntStatus) { + var _p0 uint32 + if returnSingleEntry { + _p0 = 1 + } + var _p1 uint32 + if restartScan { + _p1 = 1 + } + r0, _, _ := syscall.SyscallN(procNtQueryEaFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(_p0), uintptr(eaList), uintptr(eaListLen), uintptr(unsafe.Pointer(eaIndex)), uintptr(_p1)) + status = ntStatus(r0) + return +} + +func setFileEA(handle windows.Handle, iosb *ioStatusBlock, buf *uint8, bufLen uint32) (status ntStatus) { + r0, _, _ := syscall.SyscallN(procNtSetEaFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(buf)), uintptr(bufLen)) + status = ntStatus(r0) + return +} diff --git a/internal/fs/ea_windows_test.go b/internal/fs/ea_windows_test.go new file mode 100644 index 00000000000..f9a4d9adf58 --- /dev/null +++ b/internal/fs/ea_windows_test.go @@ -0,0 +1,259 @@ +//go:build windows +// +build windows + +package fs + +import ( + "crypto/rand" + "fmt" + "math/big" + "os" + "path/filepath" + "reflect" + "syscall" + "testing" + "unsafe" + + "golang.org/x/sys/windows" +) + +// The code below was adapted from github.com/Microsoft/go-winio under MIT license. + +// The MIT License (MIT) + +// Copyright (c) 2015 Microsoft + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// The code below was adapted from https://github.com/ambarve/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/ea_test.go +// under MIT license. + +var ( + testEas = []ExtendedAttribute{ + {Name: "foo", Value: []byte("bar")}, + {Name: "fizz", Value: []byte("buzz")}, + } + + testEasEncoded = []byte{16, 0, 0, 0, 0, 3, 3, 0, 102, 111, 111, 0, 98, 97, 114, 0, 0, + 0, 0, 0, 0, 4, 4, 0, 102, 105, 122, 122, 0, 98, 117, 122, 122, 0, 0, 0} + testEasNotPadded = testEasEncoded[0 : len(testEasEncoded)-3] + testEasTruncated = testEasEncoded[0:20] +) + +func TestRoundTripEas(t *testing.T) { + b, err := EncodeExtendedAttributes(testEas) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEasEncoded, b) { + t.Fatalf("Encoded mismatch %v %v", testEasEncoded, b) + } + eas, err := DecodeExtendedAttributes(b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEas, eas) { + t.Fatalf("mismatch %+v %+v", testEas, eas) + } +} + +func TestEasDontNeedPaddingAtEnd(t *testing.T) { + eas, err := DecodeExtendedAttributes(testEasNotPadded) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEas, eas) { + t.Fatalf("mismatch %+v %+v", testEas, eas) + } +} + +func TestTruncatedEasFailCorrectly(t *testing.T) { + _, err := DecodeExtendedAttributes(testEasTruncated) + if err == nil { + t.Fatal("expected error") + } +} + +func TestNilEasEncodeAndDecodeAsNil(t *testing.T) { + b, err := EncodeExtendedAttributes(nil) + if err != nil { + t.Fatal(err) + } + if len(b) != 0 { + t.Fatal("expected empty") + } + eas, err := DecodeExtendedAttributes(nil) + if err != nil { + t.Fatal(err) + } + if len(eas) != 0 { + t.Fatal("expected empty") + } +} + +// TestSetFileEa makes sure that the test buffer is actually parsable by NtSetEaFile. +func TestSetFileEa(t *testing.T) { + f, err := os.CreateTemp("", "testea") + if err != nil { + t.Fatal(err) + } + defer func() { + err := os.Remove(f.Name()) + if err != nil { + t.Logf("Error removing file %s: %v\n", f.Name(), err) + } + err = f.Close() + if err != nil { + t.Logf("Error closing file %s: %v\n", f.Name(), err) + } + }() + ntdll := syscall.MustLoadDLL("ntdll.dll") + ntSetEaFile := ntdll.MustFindProc("NtSetEaFile") + var iosb [2]uintptr + r, _, _ := ntSetEaFile.Call(f.Fd(), + uintptr(unsafe.Pointer(&iosb[0])), + uintptr(unsafe.Pointer(&testEasEncoded[0])), + uintptr(len(testEasEncoded))) + if r != 0 { + t.Fatalf("NtSetEaFile failed with %08x", r) + } +} + +func TestSetGetFileEA(t *testing.T) { + tempDir := t.TempDir() + testfilePath := filepath.Join(tempDir, "testfile.txt") + // create temp file + testfile, err := os.Create(testfilePath) + if err != nil { + t.Fatalf("failed to create temporary file: %s", err) + } + defer func() { + err := testfile.Close() + if err != nil { + t.Logf("Error closing file %s: %v\n", testfile.Name(), err) + } + }() + + nAttrs := 3 + testEAs := make([]ExtendedAttribute, 3) + // generate random extended attributes for test + for i := 0; i < nAttrs; i++ { + // EA name is automatically converted to upper case before storing, so + // when reading it back it returns the upper case name. To avoid test + // failures because of that keep the name upper cased. + testEAs[i].Name = fmt.Sprintf("TESTEA%d", i+1) + testEAs[i].Value = make([]byte, getRandomInt()) + _, err := rand.Read(testEAs[i].Value) + if err != nil { + t.Logf("Error reading rand for file %s: %v\n", testfilePath, err) + } + } + + utf16Path := windows.StringToUTF16Ptr(testfilePath) + fileAccessRightReadWriteEA := (0x8 | 0x10) + fileHandle, err := windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) + if err != nil { + t.Fatalf("open file failed with: %s", err) + } + defer func() { + err := windows.Close(fileHandle) + if err != nil { + t.Logf("Error closing file handle %s: %v\n", testfilePath, err) + } + }() + + if err := SetFileEA(fileHandle, testEAs); err != nil { + t.Fatalf("set EA for file failed: %s", err) + } + + var readEAs []ExtendedAttribute + if readEAs, err = GetFileEA(fileHandle); err != nil { + t.Fatalf("get EA for file failed: %s", err) + } + + if !reflect.DeepEqual(readEAs, testEAs) { + t.Logf("expected: %+v, found: %+v\n", testEAs, readEAs) + t.Fatalf("EAs read from testfile don't match") + } +} + +func TestSetGetFolderEA(t *testing.T) { + tempDir := t.TempDir() + testfolderPath := filepath.Join(tempDir, "testfolder") + // create temp folder + err := os.Mkdir(testfolderPath, os.ModeDir) + if err != nil { + t.Fatalf("failed to create temporary file: %s", err) + } + + nAttrs := 3 + testEAs := make([]ExtendedAttribute, 3) + // generate random extended attributes for test + for i := 0; i < nAttrs; i++ { + // EA name is automatically converted to upper case before storing, so + // when reading it back it returns the upper case name. To avoid test + // failures because of that keep the name upper cased. + testEAs[i].Name = fmt.Sprintf("TESTEA%d", i+1) + testEAs[i].Value = make([]byte, getRandomInt()) + _, err := rand.Read(testEAs[i].Value) + if err != nil { + t.Logf("Error reading rand for file %s: %v\n", testfolderPath, err) + } + } + + utf16Path := windows.StringToUTF16Ptr(testfolderPath) + fileAccessRightReadWriteEA := (0x8 | 0x10) + fileHandle, err := windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + + if err != nil { + t.Fatalf("open folder failed with: %s", err) + } + defer func() { + err := windows.Close(fileHandle) + if err != nil { + t.Logf("Error closing file handle %s: %v\n", testfolderPath, err) + } + }() + + if err := SetFileEA(fileHandle, testEAs); err != nil { + t.Fatalf("set EA for folder failed: %s", err) + } + + var readEAs []ExtendedAttribute + if readEAs, err = GetFileEA(fileHandle); err != nil { + t.Fatalf("get EA for folder failed: %s", err) + } + + if !reflect.DeepEqual(readEAs, testEAs) { + t.Logf("expected: %+v, found: %+v\n", testEAs, readEAs) + t.Fatalf("EAs read from test folder don't match") + } +} + +func getRandomInt() int64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(27)) + if err != nil { + panic(err) + } + n := nBig.Int64() + if n == 0 { + n = getRandomInt() + } + return n +} From 5cff6e084e28c5d8fcff65cf344977df36575db3 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Fri, 17 May 2024 14:18:20 -0600 Subject: [PATCH 182/893] Add support for Windows EA in node Refactor Extended Attribute related functions in node files as windows apis get and set EA in bulk --- internal/restic/node.go | 38 ---------- internal/restic/node_aix.go | 18 ++--- internal/restic/node_netbsd.go | 18 ++--- internal/restic/node_openbsd.go | 18 ++--- internal/restic/node_test.go | 15 +++- internal/restic/node_windows.go | 109 +++++++++++++++++++++++---- internal/restic/node_windows_test.go | 66 ++++++++++++++++ internal/restic/node_xattr.go | 52 +++++++++++-- 8 files changed, 242 insertions(+), 92 deletions(-) diff --git a/internal/restic/node.go b/internal/restic/node.go index 807ee0c0f33..5bdc5ba27c2 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -284,16 +284,6 @@ func (node Node) restoreMetadata(path string, warn func(msg string)) error { return firsterr } -func (node Node) restoreExtendedAttributes(path string) error { - for _, attr := range node.ExtendedAttributes { - err := Setxattr(path, attr.Name, attr.Value) - if err != nil { - return err - } - } - return nil -} - func (node Node) RestoreTimestamps(path string) error { var utimes = [...]syscall.Timespec{ syscall.NsecToTimespec(node.AccessTime.UnixNano()), @@ -726,34 +716,6 @@ func (node *Node) fillExtra(path string, fi os.FileInfo, ignoreXattrListError bo return err } -func (node *Node) fillExtendedAttributes(path string, ignoreListError bool) error { - xattrs, err := Listxattr(path) - debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err) - if err != nil { - if ignoreListError && IsListxattrPermissionError(err) { - return nil - } - return err - } - - node.ExtendedAttributes = make([]ExtendedAttribute, 0, len(xattrs)) - for _, attr := range xattrs { - attrVal, err := Getxattr(path, attr) - if err != nil { - fmt.Fprintf(os.Stderr, "can not obtain extended attribute %v for %v:\n", attr, path) - continue - } - attr := ExtendedAttribute{ - Name: attr, - Value: attrVal, - } - - node.ExtendedAttributes = append(node.ExtendedAttributes, attr) - } - - return nil -} - func mkfifo(path string, mode uint32) (err error) { return mknod(path, mode|syscall.S_IFIFO, 0) } diff --git a/internal/restic/node_aix.go b/internal/restic/node_aix.go index 8ee9022c92c..32f63af15f0 100644 --- a/internal/restic/node_aix.go +++ b/internal/restic/node_aix.go @@ -23,25 +23,21 @@ func (s statT) atim() syscall.Timespec { return toTimespec(s.Atim) } func (s statT) mtim() syscall.Timespec { return toTimespec(s.Mtim) } func (s statT) ctim() syscall.Timespec { return toTimespec(s.Ctim) } -// Getxattr is a no-op on AIX. -func Getxattr(path, name string) ([]byte, error) { - return nil, nil +// restoreExtendedAttributes is a no-op on AIX. +func (node Node) restoreExtendedAttributes(_ string) error { + return nil } -// Listxattr is a no-op on AIX. -func Listxattr(path string) ([]string, error) { - return nil, nil +// fillExtendedAttributes is a no-op on AIX. +func (node *Node) fillExtendedAttributes(_ string, _ bool) error { + return nil } +// IsListxattrPermissionError is a no-op on AIX. func IsListxattrPermissionError(_ error) bool { return false } -// Setxattr is a no-op on AIX. -func Setxattr(path, name string, data []byte) error { - return nil -} - // restoreGenericAttributes is no-op on AIX. func (node *Node) restoreGenericAttributes(_ string, warn func(msg string)) error { return node.handleAllUnknownGenericAttributesFound(warn) diff --git a/internal/restic/node_netbsd.go b/internal/restic/node_netbsd.go index cf1fa36bd5f..0fe46a3f28d 100644 --- a/internal/restic/node_netbsd.go +++ b/internal/restic/node_netbsd.go @@ -13,25 +13,21 @@ func (s statT) atim() syscall.Timespec { return s.Atimespec } func (s statT) mtim() syscall.Timespec { return s.Mtimespec } func (s statT) ctim() syscall.Timespec { return s.Ctimespec } -// Getxattr is a no-op on netbsd. -func Getxattr(path, name string) ([]byte, error) { - return nil, nil +// restoreExtendedAttributes is a no-op on netbsd. +func (node Node) restoreExtendedAttributes(_ string) error { + return nil } -// Listxattr is a no-op on netbsd. -func Listxattr(path string) ([]string, error) { - return nil, nil +// fillExtendedAttributes is a no-op on netbsd. +func (node *Node) fillExtendedAttributes(_ string, _ bool) error { + return nil } +// IsListxattrPermissionError is a no-op on netbsd. func IsListxattrPermissionError(_ error) bool { return false } -// Setxattr is a no-op on netbsd. -func Setxattr(path, name string, data []byte) error { - return nil -} - // restoreGenericAttributes is no-op on netbsd. func (node *Node) restoreGenericAttributes(_ string, warn func(msg string)) error { return node.handleAllUnknownGenericAttributesFound(warn) diff --git a/internal/restic/node_openbsd.go b/internal/restic/node_openbsd.go index 4f1c0dacb7a..71841f59f6f 100644 --- a/internal/restic/node_openbsd.go +++ b/internal/restic/node_openbsd.go @@ -13,25 +13,21 @@ func (s statT) atim() syscall.Timespec { return s.Atim } func (s statT) mtim() syscall.Timespec { return s.Mtim } func (s statT) ctim() syscall.Timespec { return s.Ctim } -// Getxattr is a no-op on openbsd. -func Getxattr(path, name string) ([]byte, error) { - return nil, nil +// restoreExtendedAttributes is a no-op on openbsd. +func (node Node) restoreExtendedAttributes(_ string) error { + return nil } -// Listxattr is a no-op on openbsd. -func Listxattr(path string) ([]string, error) { - return nil, nil +// fillExtendedAttributes is a no-op on openbsd. +func (node *Node) fillExtendedAttributes(_ string, _ bool) error { + return nil } +// IsListxattrPermissionError is a no-op on openbsd. func IsListxattrPermissionError(_ error) bool { return false } -// Setxattr is a no-op on openbsd. -func Setxattr(path, name string, data []byte) error { - return nil -} - // restoreGenericAttributes is no-op on openbsd. func (node *Node) restoreGenericAttributes(_ string, warn func(msg string)) error { return node.handleAllUnknownGenericAttributesFound(warn) diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index ea271faab8a..99ea48bbb00 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -8,6 +8,7 @@ import ( "path/filepath" "reflect" "runtime" + "strings" "testing" "time" @@ -205,8 +206,18 @@ func TestNodeRestoreAt(t *testing.T) { var nodePath string if test.ExtendedAttributes != nil { if runtime.GOOS == "windows" { - // restic does not support xattrs on windows - return + // In windows extended attributes are case insensitive and windows returns + // the extended attributes in UPPER case. + // Update the tests to use UPPER case xattr names for windows. + extAttrArr := test.ExtendedAttributes + // Iterate through the array using pointers + for i := 0; i < len(extAttrArr); i++ { + // Get the pointer to the current element + namePtr := &extAttrArr[i].Name + + // Modify the value through the pointer + *namePtr = strings.ToUpper(*namePtr) + } } // tempdir might be backed by a filesystem that does not support diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 0c6d3775e3f..881c394be75 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -70,26 +70,109 @@ func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespe return syscall.SetFileTime(h, nil, &a, &w) } -// Getxattr retrieves extended attribute data associated with path. -func Getxattr(path, name string) ([]byte, error) { - return nil, nil +// restore extended attributes for windows +func (node Node) restoreExtendedAttributes(path string) (err error) { + eas := []fs.ExtendedAttribute{} + for _, attr := range node.ExtendedAttributes { + extr := new(fs.ExtendedAttribute) + extr.Name = attr.Name + extr.Value = attr.Value + eas = append(eas, *extr) + } + if len(eas) > 0 { + if errExt := restoreExtendedAttributes(node.Type, path, eas); errExt != nil { + return errExt + } + } + return nil } -// Listxattr retrieves a list of names of extended attributes associated with the -// given path in the file system. -func Listxattr(path string) ([]string, error) { - return nil, nil -} +// fill extended attributes in the node. This also includes the Generic attributes for windows. +func (node *Node) fillExtendedAttributes(path string, _ bool) (err error) { + var fileHandle windows.Handle -func IsListxattrPermissionError(_ error) bool { - return false -} + //Get file handle for file or dir + if node.Type == "file" { + if strings.HasSuffix(filepath.Clean(path), `\`) { + return nil + } + utf16Path := windows.StringToUTF16Ptr(path) + fileAccessRightReadWriteEA := (0x8 | 0x10) + fileHandle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) + } else if node.Type == "dir" { + utf16Path := windows.StringToUTF16Ptr(path) + fileAccessRightReadWriteEA := (0x8 | 0x10) + fileHandle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + } else { + return nil + } + if err != nil { + err = errors.Errorf("open file failed for path: %s, with: %v", path, err) + return err + } + defer func() { + err := windows.CloseHandle(fileHandle) + if err != nil { + debug.Log("Error closing file handle for %s: %v\n", path, err) + } + }() + + //Get the windows Extended Attributes using the file handle + extAtts, err := fs.GetFileEA(fileHandle) + debug.Log("fillExtendedAttributes(%v) %v", path, extAtts) + if err != nil { + debug.Log("open file failed for path: %s : %v", path, err) + return err + } else if len(extAtts) == 0 { + return nil + } + + //Fill the ExtendedAttributes in the node using the name/value pairs in the windows EA + for _, attr := range extAtts { + if err != nil { + err = errors.Errorf("can not obtain extended attribute for path %v, attr: %v, err: %v\n,", path, attr, err) + continue + } + extendedAttr := ExtendedAttribute{ + Name: attr.Name, + Value: attr.Value, + } -// Setxattr associates name and data together as an attribute of path. -func Setxattr(path, name string, data []byte) error { + node.ExtendedAttributes = append(node.ExtendedAttributes, extendedAttr) + } return nil } +// restoreExtendedAttributes handles restore of the Windows Extended Attributes to the specified path. +// The Windows API requires setting of all the Extended Attributes in one call. +func restoreExtendedAttributes(nodeType, path string, eas []fs.ExtendedAttribute) (err error) { + var fileHandle windows.Handle + switch nodeType { + case "file": + utf16Path := windows.StringToUTF16Ptr(path) + fileAccessRightReadWriteEA := (0x8 | 0x10) + fileHandle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) + case "dir": + utf16Path := windows.StringToUTF16Ptr(path) + fileAccessRightReadWriteEA := (0x8 | 0x10) + fileHandle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + default: + return nil + } + defer func() { + err := windows.CloseHandle(fileHandle) + if err != nil { + debug.Log("Error closing file handle for %s: %v\n", path, err) + } + }() + if err != nil { + err = errors.Errorf("open file failed for path %v, with: %v:\n", path, err) + } else if err = fs.SetFileEA(fileHandle, eas); err != nil { + err = errors.Errorf("set EA failed for path %v, with: %v:\n", path, err) + } + return err +} + type statT syscall.Win32FileAttributeData func toStatT(i interface{}) (*statT, bool) { diff --git a/internal/restic/node_windows_test.go b/internal/restic/node_windows_test.go index 57fc51e0732..f89e2aeee77 100644 --- a/internal/restic/node_windows_test.go +++ b/internal/restic/node_windows_test.go @@ -9,6 +9,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "syscall" "testing" @@ -265,3 +266,68 @@ func TestNewGenericAttributeType(t *testing.T) { test.Assert(t, len(ua) == 0, "Unkown attributes: %s found for path: %s", ua, testPath) } } + +func TestRestoreExtendedAttributes(t *testing.T) { + t.Parallel() + tempDir := t.TempDir() + expectedNodes := []Node{ + { + Name: "testfile", + Type: "file", + Mode: 0644, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + ExtendedAttributes: []ExtendedAttribute{ + {"user.foo", []byte("bar")}, + }, + }, + { + Name: "testdirectory", + Type: "dir", + Mode: 0755, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + ExtendedAttributes: []ExtendedAttribute{ + {"user.foo", []byte("bar")}, + }, + }, + } + for _, testNode := range expectedNodes { + testPath, node := restoreAndGetNode(t, tempDir, testNode, false) + + var handle windows.Handle + var err error + utf16Path := windows.StringToUTF16Ptr(testPath) + if node.Type == "file" { + handle, err = windows.CreateFile(utf16Path, windows.FILE_READ_EA, 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) + } else if node.Type == "dir" { + handle, err = windows.CreateFile(utf16Path, windows.FILE_READ_EA, 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + } + test.OK(t, errors.Wrapf(err, "Error opening file/directory for: %s", testPath)) + defer func() { + err := windows.Close(handle) + test.OK(t, errors.Wrapf(err, "Error closing file for: %s", testPath)) + }() + + if len(node.ExtendedAttributes) > 0 { + extAttr, err := fs.GetFileEA(handle) + test.OK(t, errors.Wrapf(err, "Error getting extended attributes for: %s", testPath)) + test.Equals(t, len(node.ExtendedAttributes), len(extAttr)) + + for _, expectedExtAttr := range node.ExtendedAttributes { + var foundExtAttr *fs.ExtendedAttribute + for _, ea := range extAttr { + if strings.EqualFold(ea.Name, expectedExtAttr.Name) { + foundExtAttr = &ea + break + + } + } + test.Assert(t, foundExtAttr != nil, "Expected extended attribute not found") + test.Equals(t, expectedExtAttr.Value, foundExtAttr.Value) + } + } + } +} diff --git a/internal/restic/node_xattr.go b/internal/restic/node_xattr.go index 8b080e74f8f..a55fcb2db4a 100644 --- a/internal/restic/node_xattr.go +++ b/internal/restic/node_xattr.go @@ -4,23 +4,25 @@ package restic import ( + "fmt" "os" "syscall" + "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/pkg/xattr" ) -// Getxattr retrieves extended attribute data associated with path. -func Getxattr(path, name string) ([]byte, error) { +// getxattr retrieves extended attribute data associated with path. +func getxattr(path, name string) ([]byte, error) { b, err := xattr.LGet(path, name) return b, handleXattrErr(err) } -// Listxattr retrieves a list of names of extended attributes associated with the +// listxattr retrieves a list of names of extended attributes associated with the // given path in the file system. -func Listxattr(path string) ([]string, error) { +func listxattr(path string) ([]string, error) { l, err := xattr.LList(path) return l, handleXattrErr(err) } @@ -33,8 +35,8 @@ func IsListxattrPermissionError(err error) bool { return false } -// Setxattr associates name and data together as an attribute of path. -func Setxattr(path, name string, data []byte) error { +// setxattr associates name and data together as an attribute of path. +func setxattr(path, name string, data []byte) error { return handleXattrErr(xattr.LSet(path, name, data)) } @@ -66,3 +68,41 @@ func (node *Node) restoreGenericAttributes(_ string, warn func(msg string)) erro func (node *Node) fillGenericAttributes(_ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { return true, nil } + +func (node Node) restoreExtendedAttributes(path string) error { + for _, attr := range node.ExtendedAttributes { + err := setxattr(path, attr.Name, attr.Value) + if err != nil { + return err + } + } + return nil +} + +func (node *Node) fillExtendedAttributes(path string, ignoreListError bool) error { + xattrs, err := listxattr(path) + debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err) + if err != nil { + if ignoreListError && IsListxattrPermissionError(err) { + return nil + } + return err + } + + node.ExtendedAttributes = make([]ExtendedAttribute, 0, len(xattrs)) + for _, attr := range xattrs { + attrVal, err := getxattr(path, attr) + if err != nil { + fmt.Fprintf(os.Stderr, "can not obtain extended attribute %v for %v:\n", attr, path) + continue + } + attr := ExtendedAttribute{ + Name: attr, + Value: attrVal, + } + + node.ExtendedAttributes = append(node.ExtendedAttributes, attr) + } + + return nil +} From 9d1b23964d7f718af19ad906514adf8cf5812965 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Fri, 17 May 2024 14:19:05 -0600 Subject: [PATCH 183/893] Update comment for licensing --- internal/fs/sd_windows.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index cc44433c376..5d98b4ef46b 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -346,7 +346,7 @@ func getPrivilegeName(luid uint64) string { return string(utf16.Decode(displayNameBuffer[:displayBufSize])) } -// The functions below are copied over from https://github.com/microsoft/go-winio/blob/main/zsyscall_windows.go +// The functions below are copied over from https://github.com/microsoft/go-winio/blob/main/zsyscall_windows.go under MIT license. // This windows api always returns an error even in case of success, warnings (partial success) and error cases. // @@ -424,7 +424,7 @@ func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err return } -// The code below was copied from https://github.com/microsoft/go-winio/blob/main/tools/mkwinsyscall/mkwinsyscall.go +// The code below was copied from https://github.com/microsoft/go-winio/blob/main/tools/mkwinsyscall/mkwinsyscall.go under MIT license. // errnoErr returns common boxed Errno values, to prevent // allocations at runtime. From 43bc304e423341c0255b2e6172ef494657a7b15f Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Fri, 17 May 2024 14:54:25 -0600 Subject: [PATCH 184/893] Add unreleased changelog --- changelog/unreleased/pull-4807 | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 changelog/unreleased/pull-4807 diff --git a/changelog/unreleased/pull-4807 b/changelog/unreleased/pull-4807 new file mode 100644 index 00000000000..12f8391e32e --- /dev/null +++ b/changelog/unreleased/pull-4807 @@ -0,0 +1,5 @@ +Enhancement: Back up and restore Extended Attributes on Windows NTFS + +Restic now backs up and restores Extended Attributes on Windows NTFS when backing up files and folders. + +https://github.com/restic/restic/pull/4807 \ No newline at end of file From c6d74458eeabb3076e0d83eda77809e5471c9975 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 23:57:52 +0200 Subject: [PATCH 185/893] sftp: improve handling of too short files --- internal/backend/sftp/sftp.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index efd66f76cb8..dd95b3cf8c3 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -43,6 +43,8 @@ type SFTP struct { var _ backend.Backend = &SFTP{} +var errTooShort = fmt.Errorf("file is too short") + func NewFactory() location.Factory { return location.NewLimitedBackendFactory("sftp", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open)) } @@ -212,6 +214,10 @@ func (r *SFTP) IsNotExist(err error) bool { return errors.Is(err, os.ErrNotExist) } +func (r *SFTP) IsPermanentError(err error) bool { + return r.IsNotExist(err) || errors.Is(err, errTooShort) || errors.Is(err, os.ErrPermission) +} + func buildSSHCommand(cfg Config) (cmd string, args []string, err error) { if cfg.Command != "" { args, err := backend.SplitShellStrings(cfg.Command) @@ -428,6 +434,18 @@ func (r *SFTP) openReader(_ context.Context, h backend.Handle, length int, offse return nil, err } + fi, err := f.Stat() + if err != nil { + _ = f.Close() + return nil, err + } + + size := fi.Size() + if size < offset+int64(length) { + _ = f.Close() + return nil, errTooShort + } + if offset > 0 { _, err = f.Seek(offset, 0) if err != nil { From eaa3f81d6b1a5d5abf520e0afa9db9d050ea56c3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 11 May 2024 22:08:12 +0200 Subject: [PATCH 186/893] sftp: check for truncated files without an extra backend request --- internal/backend/sftp/sftp.go | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index dd95b3cf8c3..7bab25bed7e 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -425,7 +425,24 @@ func (r *SFTP) checkNoSpace(dir string, size int64, origErr error) error { // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (r *SFTP) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { - return util.DefaultLoad(ctx, h, length, offset, r.openReader, fn) + return util.DefaultLoad(ctx, h, length, offset, r.openReader, func(rd io.Reader) error { + if length == 0 { + return fn(rd) + } + + // there is no direct way to efficiently check whether the file is too short + // rd is already a LimitedReader which can be used to track the number of bytes read + err := fn(rd) + + // check the underlying reader to be agnostic to however fn() handles the returned error + _, rderr := rd.Read([]byte{0}) + if rderr == io.EOF && rd.(*backend.LimitedReadCloser).N != 0 { + // file is too short + return fmt.Errorf("%w: %v", errTooShort, err) + } + + return err + }) } func (r *SFTP) openReader(_ context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { @@ -434,18 +451,6 @@ func (r *SFTP) openReader(_ context.Context, h backend.Handle, length int, offse return nil, err } - fi, err := f.Stat() - if err != nil { - _ = f.Close() - return nil, err - } - - size := fi.Size() - if size < offset+int64(length) { - _ = f.Close() - return nil, errTooShort - } - if offset > 0 { _, err = f.Seek(offset, 0) if err != nil { From b4895ebd76225b41866dba121d4bde0c37376837 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 11 May 2024 00:03:42 +0200 Subject: [PATCH 187/893] rest: rework error reporting and report too short files --- internal/backend/rest/rest.go | 68 ++++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 29 deletions(-) diff --git a/internal/backend/rest/rest.go b/internal/backend/rest/rest.go index d8171d90e87..5b59b8e4f7c 100644 --- a/internal/backend/rest/rest.go +++ b/internal/backend/rest/rest.go @@ -30,6 +30,20 @@ type Backend struct { layout.Layout } +// restError is returned whenever the server returns a non-successful HTTP status. +type restError struct { + backend.Handle + StatusCode int + Status string +} + +func (e *restError) Error() string { + if e.StatusCode == http.StatusNotFound && e.Handle.Type.String() != "invalid" { + return fmt.Sprintf("%v does not exist", e.Handle) + } + return fmt.Sprintf("unexpected HTTP response (%v): %v", e.StatusCode, e.Status) +} + func NewFactory() location.Factory { return location.NewHTTPBackendFactory("rest", ParseConfig, StripPassword, Create, Open) } @@ -96,7 +110,7 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, er } if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode) + return nil, &restError{backend.Handle{}, resp.StatusCode, resp.Status} } return be, nil @@ -150,26 +164,31 @@ func (b *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindR } if resp.StatusCode != http.StatusOK { - return errors.Errorf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode) + return &restError{h, resp.StatusCode, resp.Status} } return nil } -// notExistError is returned whenever the requested file does not exist on the -// server. -type notExistError struct { - backend.Handle +// IsNotExist returns true if the error was caused by a non-existing file. +func (b *Backend) IsNotExist(err error) bool { + var e *restError + return errors.As(err, &e) && e.StatusCode == http.StatusNotFound } -func (e *notExistError) Error() string { - return fmt.Sprintf("%v does not exist", e.Handle) -} +func (b *Backend) IsPermanentError(err error) bool { + if b.IsNotExist(err) { + return true + } -// IsNotExist returns true if the error was caused by a non-existing file. -func (b *Backend) IsNotExist(err error) bool { - var e *notExistError - return errors.As(err, &e) + var rerr *restError + if errors.As(err, &rerr) { + if rerr.StatusCode == http.StatusRequestedRangeNotSatisfiable || rerr.StatusCode == http.StatusUnauthorized || rerr.StatusCode == http.StatusForbidden { + return true + } + } + + return false } // Load runs fn with a reader that yields the contents of the file at h at the @@ -221,14 +240,13 @@ func (b *Backend) openReader(ctx context.Context, h backend.Handle, length int, return nil, errors.Wrap(err, "client.Do") } - if resp.StatusCode == http.StatusNotFound { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { _ = drainAndClose(resp) - return nil, ¬ExistError{h} + return nil, &restError{h, resp.StatusCode, resp.Status} } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - _ = drainAndClose(resp) - return nil, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status) + if length > 0 && resp.ContentLength != int64(length) { + return nil, &restError{h, http.StatusRequestedRangeNotSatisfiable, "partial out of bounds read"} } return resp.Body, nil @@ -251,12 +269,8 @@ func (b *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, return backend.FileInfo{}, err } - if resp.StatusCode == http.StatusNotFound { - return backend.FileInfo{}, ¬ExistError{h} - } - if resp.StatusCode != http.StatusOK { - return backend.FileInfo{}, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status) + return backend.FileInfo{}, &restError{h, resp.StatusCode, resp.Status} } if resp.ContentLength < 0 { @@ -288,12 +302,8 @@ func (b *Backend) Remove(ctx context.Context, h backend.Handle) error { return err } - if resp.StatusCode == http.StatusNotFound { - return ¬ExistError{h} - } - if resp.StatusCode != http.StatusOK { - return errors.Errorf("blob not removed, server response: %v (%v)", resp.Status, resp.StatusCode) + return &restError{h, resp.StatusCode, resp.Status} } return nil @@ -330,7 +340,7 @@ func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(backend. if resp.StatusCode != http.StatusOK { _ = drainAndClose(resp) - return errors.Errorf("List failed, server response: %v (%v)", resp.Status, resp.StatusCode) + return &restError{backend.Handle{Type: t}, resp.StatusCode, resp.Status} } if resp.Header.Get("Content-Type") == ContentTypeV2 { From e793c002ece0a2831394bbfcb7157a270d3ebd82 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 11 May 2024 00:07:04 +0200 Subject: [PATCH 188/893] local: stricter handling of short files --- internal/backend/local/local.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go index b89f2ff446f..ff7c4b7a5ff 100644 --- a/internal/backend/local/local.go +++ b/internal/backend/local/local.go @@ -2,6 +2,7 @@ package local import ( "context" + "fmt" "hash" "io" "os" @@ -30,6 +31,8 @@ type Local struct { // ensure statically that *Local implements backend.Backend. var _ backend.Backend = &Local{} +var errTooShort = fmt.Errorf("file is too short") + func NewFactory() location.Factory { return location.NewLimitedBackendFactory("local", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open)) } @@ -110,6 +113,10 @@ func (b *Local) IsNotExist(err error) bool { return errors.Is(err, os.ErrNotExist) } +func (b *Local) IsPermanentError(err error) bool { + return b.IsNotExist(err) || errors.Is(err, errTooShort) || errors.Is(err, os.ErrPermission) +} + // Save stores data in the backend at the handle. func (b *Local) Save(_ context.Context, h backend.Handle, rd backend.RewindReader) (err error) { finalname := b.Filename(h) @@ -219,6 +226,18 @@ func (b *Local) openReader(_ context.Context, h backend.Handle, length int, offs return nil, err } + fi, err := f.Stat() + if err != nil { + _ = f.Close() + return nil, err + } + + size := fi.Size() + if size < offset+int64(length) { + _ = f.Close() + return nil, errTooShort + } + if offset > 0 { _, err = f.Seek(offset, 0) if err != nil { From d40f23e71688f64ff62f80cc60b24522a0f4ad21 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 11 May 2024 00:11:23 +0200 Subject: [PATCH 189/893] azure/b2/gs/s3/swift: adapt cloud backend --- internal/backend/azure/azure.go | 19 +++++++++++++++++++ internal/backend/b2/b2.go | 28 +++++++++++++++++++++++++++- internal/backend/gs/gs.go | 20 ++++++++++++++++++++ internal/backend/s3/s3.go | 24 +++++++++++++++++++++++- internal/backend/swift/swift.go | 28 +++++++++++++++++++++++++++- 5 files changed, 116 insertions(+), 3 deletions(-) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index adaa37d976d..e9368c268e8 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -167,6 +167,20 @@ func (be *Backend) IsNotExist(err error) bool { return bloberror.HasCode(err, bloberror.BlobNotFound) } +func (be *Backend) IsPermanentError(err error) bool { + if be.IsNotExist(err) { + return true + } + + var aerr *azcore.ResponseError + if errors.As(err, &aerr) { + if aerr.StatusCode == http.StatusRequestedRangeNotSatisfiable || aerr.StatusCode == http.StatusUnauthorized || aerr.StatusCode == http.StatusForbidden { + return true + } + } + return false +} + // Join combines path components with slashes. func (be *Backend) Join(p ...string) string { return path.Join(p...) @@ -313,6 +327,11 @@ func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, return nil, err } + if length > 0 && (resp.ContentLength == nil || *resp.ContentLength != int64(length)) { + _ = resp.Body.Close() + return nil, &azcore.ResponseError{ErrorCode: "restic-file-too-short", StatusCode: http.StatusRequestedRangeNotSatisfiable} + } + return resp.Body, err } diff --git a/internal/backend/b2/b2.go b/internal/backend/b2/b2.go index bc6ef1a4d15..e3a52813d50 100644 --- a/internal/backend/b2/b2.go +++ b/internal/backend/b2/b2.go @@ -2,6 +2,7 @@ package b2 import ( "context" + "fmt" "hash" "io" "net/http" @@ -31,6 +32,8 @@ type b2Backend struct { canDelete bool } +var errTooShort = fmt.Errorf("file is too short") + // Billing happens in 1000 item granularity, but we are more interested in reducing the number of network round trips const defaultListMaxItems = 10 * 1000 @@ -186,13 +189,36 @@ func (be *b2Backend) IsNotExist(err error) bool { return false } +func (be *b2Backend) IsPermanentError(err error) bool { + // the library unfortunately endlessly retries authentication errors + return be.IsNotExist(err) || errors.Is(err, errTooShort) +} + // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (be *b2Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn) + return util.DefaultLoad(ctx, h, length, offset, be.openReader, func(rd io.Reader) error { + if length == 0 { + return fn(rd) + } + + // there is no direct way to efficiently check whether the file is too short + // use a LimitedReader to track the number of bytes read + limrd := &io.LimitedReader{R: rd, N: int64(length)} + err := fn(limrd) + + // check the underlying reader to be agnostic to however fn() handles the returned error + _, rderr := rd.Read([]byte{0}) + if rderr == io.EOF && limrd.N != 0 { + // file is too short + return fmt.Errorf("%w: %v", errTooShort, err) + } + + return err + }) } func (be *b2Backend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { diff --git a/internal/backend/gs/gs.go b/internal/backend/gs/gs.go index 77d20e05656..20da5245ac6 100644 --- a/internal/backend/gs/gs.go +++ b/internal/backend/gs/gs.go @@ -173,6 +173,21 @@ func (be *Backend) IsNotExist(err error) bool { return errors.Is(err, storage.ErrObjectNotExist) } +func (be *Backend) IsPermanentError(err error) bool { + if be.IsNotExist(err) { + return true + } + + var gerr *googleapi.Error + if errors.As(err, &gerr) { + if gerr.Code == http.StatusRequestedRangeNotSatisfiable || gerr.Code == http.StatusUnauthorized || gerr.Code == http.StatusForbidden { + return true + } + } + + return false +} + // Join combines path components with slashes. func (be *Backend) Join(p ...string) string { return path.Join(p...) @@ -273,6 +288,11 @@ func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, return nil, err } + if length > 0 && r.Attrs.Size < offset+int64(length) { + _ = r.Close() + return nil, &googleapi.Error{Code: http.StatusRequestedRangeNotSatisfiable, Message: "restic-file-too-short"} + } + return r, err } diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index d41f4479df6..afe1653f658 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -229,6 +229,21 @@ func (be *Backend) IsNotExist(err error) bool { return errors.As(err, &e) && e.Code == "NoSuchKey" } +func (be *Backend) IsPermanentError(err error) bool { + if be.IsNotExist(err) { + return true + } + + var merr minio.ErrorResponse + if errors.As(err, &merr) { + if merr.Code == "InvalidRange" || merr.Code == "AccessDenied" { + return true + } + } + + return false +} + // Join combines path components with slashes. func (be *Backend) Join(p ...string) string { return path.Join(p...) @@ -384,11 +399,18 @@ func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, } coreClient := minio.Core{Client: be.client} - rd, _, _, err := coreClient.GetObject(ctx, be.cfg.Bucket, objName, opts) + rd, info, _, err := coreClient.GetObject(ctx, be.cfg.Bucket, objName, opts) if err != nil { return nil, err } + if length > 0 { + if info.Size > 0 && info.Size != int64(length) { + _ = rd.Close() + return nil, minio.ErrorResponse{Code: "InvalidRange", Message: "restic-file-too-short"} + } + } + return rd, err } diff --git a/internal/backend/swift/swift.go b/internal/backend/swift/swift.go index 6943f018079..616fcf3b7e9 100644 --- a/internal/backend/swift/swift.go +++ b/internal/backend/swift/swift.go @@ -153,7 +153,18 @@ func (be *beSwift) openReader(ctx context.Context, h backend.Handle, length int, obj, _, err := be.conn.ObjectOpen(ctx, be.container, objName, false, headers) if err != nil { - return nil, errors.Wrap(err, "conn.ObjectOpen") + return nil, fmt.Errorf("conn.ObjectOpen: %w", err) + } + + if length > 0 { + // get response length, but don't cause backend calls + cctx, cancel := context.WithCancel(context.Background()) + cancel() + objLength, e := obj.Length(cctx) + if e == nil && objLength != int64(length) { + _ = obj.Close() + return nil, &swift.Error{StatusCode: http.StatusRequestedRangeNotSatisfiable, Text: "restic-file-too-short"} + } } return obj, nil @@ -242,6 +253,21 @@ func (be *beSwift) IsNotExist(err error) bool { return errors.As(err, &e) && e.StatusCode == http.StatusNotFound } +func (be *beSwift) IsPermanentError(err error) bool { + if be.IsNotExist(err) { + return true + } + + var serr *swift.Error + if errors.As(err, &serr) { + if serr.StatusCode == http.StatusRequestedRangeNotSatisfiable || serr.StatusCode == http.StatusUnauthorized || serr.StatusCode == http.StatusForbidden { + return true + } + } + + return false +} + // Delete removes all restic objects in the container. // It will not remove the container itself. func (be *beSwift) Delete(ctx context.Context) error { From cfc420664aa505b7cbfb94559aae618bb859ced7 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 11 May 2024 00:12:13 +0200 Subject: [PATCH 190/893] mem: stricter handling of out of bounds requests --- internal/backend/dryrun/dry_backend_test.go | 2 +- internal/backend/mem/mem_backend.go | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/internal/backend/dryrun/dry_backend_test.go b/internal/backend/dryrun/dry_backend_test.go index 56962107d73..793e544dbb8 100644 --- a/internal/backend/dryrun/dry_backend_test.go +++ b/internal/backend/dryrun/dry_backend_test.go @@ -96,7 +96,7 @@ func TestDry(t *testing.T) { } case "load": data := "" - err = step.be.Load(ctx, handle, 100, 0, func(rd io.Reader) error { + err = step.be.Load(ctx, handle, 0, 0, func(rd io.Reader) error { buf, err := io.ReadAll(rd) data = string(buf) return err diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go index 8b115b18729..532380f213a 100644 --- a/internal/backend/mem/mem_backend.go +++ b/internal/backend/mem/mem_backend.go @@ -43,6 +43,7 @@ func NewFactory() location.Factory { } var errNotFound = fmt.Errorf("not found") +var errTooSmall = errors.New("access beyond end of file") const connectionCount = 2 @@ -69,6 +70,10 @@ func (be *MemoryBackend) IsNotExist(err error) bool { return errors.Is(err, errNotFound) } +func (be *MemoryBackend) IsPermanentError(err error) bool { + return be.IsNotExist(err) || errors.Is(err, errTooSmall) +} + // Save adds new Data to the backend. func (be *MemoryBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { be.m.Lock() @@ -131,12 +136,12 @@ func (be *MemoryBackend) openReader(ctx context.Context, h backend.Handle, lengt } buf := be.data[h] - if offset > int64(len(buf)) { - return nil, errors.New("offset beyond end of file") + if offset+int64(length) > int64(len(buf)) { + return nil, errTooSmall } buf = buf[offset:] - if length > 0 && len(buf) > length { + if length > 0 { buf = buf[:length] } From 6a85df729760bb6d444d54518c225817584ce7fb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 11 May 2024 00:12:56 +0200 Subject: [PATCH 191/893] backend: add IsPermanentError() method to interface --- internal/backend/backend.go | 10 +++++++++- internal/backend/dryrun/dry_backend.go | 4 ++++ internal/backend/mock/backend.go | 9 +++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/internal/backend/backend.go b/internal/backend/backend.go index aa9920f9b88..102322c4f1d 100644 --- a/internal/backend/backend.go +++ b/internal/backend/backend.go @@ -38,7 +38,9 @@ type Backend interface { // Load runs fn with a reader that yields the contents of the file at h at the // given offset. If length is larger than zero, only a portion of the file - // is read. + // is read. If the length is larger than zero and the file is too short to return + // the requested length bytes, then an error MUST be returned that is recognized + // by IsPermanentError(). // // The function fn may be called multiple times during the same Load invocation // and therefore must be idempotent. @@ -66,6 +68,12 @@ type Backend interface { // for unwrapping it. IsNotExist(err error) bool + // IsPermanentError returns true if the error can very likely not be resolved + // by retrying the operation. Backends should return true if the file is missing, + // the requested range does not (completely) exist in the file or the user is + // not authorized to perform the requested operation. + IsPermanentError(err error) bool + // Delete removes all data in the backend. Delete(ctx context.Context) error } diff --git a/internal/backend/dryrun/dry_backend.go b/internal/backend/dryrun/dry_backend.go index b3db0210f7e..c17b240fa1b 100644 --- a/internal/backend/dryrun/dry_backend.go +++ b/internal/backend/dryrun/dry_backend.go @@ -72,6 +72,10 @@ func (be *Backend) IsNotExist(err error) bool { return be.b.IsNotExist(err) } +func (be *Backend) IsPermanentError(err error) bool { + return be.b.IsPermanentError(err) +} + func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error { return be.b.List(ctx, t, fn) } diff --git a/internal/backend/mock/backend.go b/internal/backend/mock/backend.go index 57b1ede1993..bd8c6d43b92 100644 --- a/internal/backend/mock/backend.go +++ b/internal/backend/mock/backend.go @@ -13,6 +13,7 @@ import ( type Backend struct { CloseFn func() error IsNotExistFn func(err error) bool + IsPermanentErrorFn func(err error) bool SaveFn func(ctx context.Context, h backend.Handle, rd backend.RewindReader) error OpenReaderFn func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) StatFn func(ctx context.Context, h backend.Handle) (backend.FileInfo, error) @@ -83,6 +84,14 @@ func (m *Backend) IsNotExist(err error) bool { return m.IsNotExistFn(err) } +func (m *Backend) IsPermanentError(err error) bool { + if m.IsPermanentErrorFn == nil { + return false + } + + return m.IsPermanentErrorFn(err) +} + // Save data in the backend. func (m *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { if m.SaveFn == nil { From 4740528a0b774245b4a0ba907e8d26b4a87aa79f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 11 May 2024 00:13:23 +0200 Subject: [PATCH 192/893] backend: add tests for IsPermanentError --- internal/backend/test/tests.go | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/internal/backend/test/tests.go b/internal/backend/test/tests.go index 414bf1c3bef..963659fda4a 100644 --- a/internal/backend/test/tests.go +++ b/internal/backend/test/tests.go @@ -99,6 +99,7 @@ func (s *Suite[C]) TestConfig(t *testing.T) { t.Fatalf("did not get expected error for non-existing config") } test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize error from LoadAll(): %v", err) + test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize error from LoadAll(): %v", err) err = b.Save(context.TODO(), backend.Handle{Type: backend.ConfigFile}, backend.NewByteReader([]byte(testString), b.Hasher())) if err != nil { @@ -135,6 +136,7 @@ func (s *Suite[C]) TestLoad(t *testing.T) { t.Fatalf("Load() did not return an error for non-existing blob") } test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize non-existing blob: %v", err) + test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize non-existing blob: %v", err) length := rand.Intn(1<<24) + 2000 @@ -181,8 +183,12 @@ func (s *Suite[C]) TestLoad(t *testing.T) { } getlen := l - if l >= len(d) && rand.Float32() >= 0.5 { - getlen = 0 + if l >= len(d) { + if rand.Float32() >= 0.5 { + getlen = 0 + } else { + getlen = len(d) + } } if l > 0 && l < len(d) { @@ -225,6 +231,18 @@ func (s *Suite[C]) TestLoad(t *testing.T) { } } + // test error checking for partial and fully out of bounds read + // only test for length > 0 as we currently do not need strict out of bounds handling for length==0 + for _, offset := range []int{length - 99, length - 50, length, length + 100} { + err = b.Load(context.TODO(), handle, 100, int64(offset), func(rd io.Reader) (ierr error) { + _, ierr = io.ReadAll(rd) + return ierr + }) + test.Assert(t, err != nil, "Load() did not return error on out of bounds read! o %v, l %v, filelength %v", offset, 100, length) + test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize out of range read: %v", err) + test.Assert(t, !b.IsNotExist(err), "IsNotExist() must not recognize out of range read: %v", err) + } + test.OK(t, b.Remove(context.TODO(), handle)) } @@ -762,6 +780,7 @@ func (s *Suite[C]) TestBackend(t *testing.T) { defer s.close(t, b) test.Assert(t, !b.IsNotExist(nil), "IsNotExist() recognized nil error") + test.Assert(t, !b.IsPermanentError(nil), "IsPermanentError() recognized nil error") for _, tpe := range []backend.FileType{ backend.PackFile, backend.KeyFile, backend.LockFile, @@ -782,11 +801,13 @@ func (s *Suite[C]) TestBackend(t *testing.T) { _, err = b.Stat(context.TODO(), h) test.Assert(t, err != nil, "blob data could be extracted before creation") test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Stat() error: %v", err) + test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize Stat() error: %v", err) // try to read not existing blob err = testLoad(b, h) test.Assert(t, err != nil, "blob could be read before creation") test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Load() error: %v", err) + test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize Load() error: %v", err) // try to get string out, should fail ret, err = beTest(context.TODO(), b, h) From bf8cc59889f05931ed0fc5a45aecf4071e55ca15 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 11 May 2024 20:22:10 +0200 Subject: [PATCH 193/893] Use generic backend-error-redesign feature flag instead of http-timeouts An individual flag for each change of the backend error handling would be too finegrained. Thus, add a generic flag. --- internal/backend/http_transport.go | 4 ++-- internal/feature/registry.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/backend/http_transport.go b/internal/backend/http_transport.go index 09eb3cf16ea..97fd521e358 100644 --- a/internal/backend/http_transport.go +++ b/internal/backend/http_transport.go @@ -89,7 +89,7 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { if err != nil { panic(err) } - if feature.Flag.Enabled(feature.HTTPTimeouts) { + if feature.Flag.Enabled(feature.BackendErrorRedesign) { h2.WriteByteTimeout = 120 * time.Second h2.ReadIdleTimeout = 60 * time.Second h2.PingTimeout = 60 * time.Second @@ -132,7 +132,7 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { } rt := http.RoundTripper(tr) - if feature.Flag.Enabled(feature.HTTPTimeouts) { + if feature.Flag.Enabled(feature.BackendErrorRedesign) { rt = newWatchdogRoundtripper(rt, 120*time.Second, 128*1024) } diff --git a/internal/feature/registry.go b/internal/feature/registry.go index b0e4d2ed78a..ac410514028 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -5,17 +5,17 @@ var Flag = New() // flag names are written in kebab-case const ( + BackendErrorRedesign FlagName = "backend-error-redesign" DeprecateLegacyIndex FlagName = "deprecate-legacy-index" DeprecateS3LegacyLayout FlagName = "deprecate-s3-legacy-layout" DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" - HTTPTimeouts FlagName = "http-timeouts" ) func init() { Flag.SetFlags(map[FlagName]FlagDesc{ + BackendErrorRedesign: {Type: Beta, Description: "enforce timeouts for stuck HTTP requests and use new backend error handling design."}, DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, DeprecateS3LegacyLayout: {Type: Beta, Description: "disable support for S3 legacy layout used up to restic 0.7.0. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your S3 repository if necessary."}, DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, - HTTPTimeouts: {Type: Beta, Description: "enforce timeouts for stuck HTTP requests."}, }) } From aeb7eb245c679380b2867601d50c227f2d86fc92 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 11 May 2024 20:25:04 +0200 Subject: [PATCH 194/893] retry: do not retry permanent errors This is currently gated behind a feature flag as some unexpected interactions might show up in the wild. --- internal/backend/retry/backend_retry.go | 19 +++++++---- internal/backend/retry/backend_retry_test.go | 34 ++++++++++++++++++-- 2 files changed, 45 insertions(+), 8 deletions(-) diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index c63338fb664..4f25e0c7c30 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -2,6 +2,7 @@ package retry import ( "context" + "errors" "fmt" "io" "time" @@ -9,6 +10,7 @@ import ( "github.com/cenkalti/backoff/v4" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/feature" ) // Backend retries operations on the backend in case of an error with a @@ -74,7 +76,16 @@ func (be *Backend) retry(ctx context.Context, msg string, f func() error) error bo.InitialInterval = 1 * time.Millisecond } - err := retryNotifyErrorWithSuccess(f, + err := retryNotifyErrorWithSuccess( + func() error { + err := f() + // don't retry permanent errors as those very likely cannot be fixed by retrying + // TODO remove IsNotExist(err) special cases when removing the feature flag + if feature.Flag.Enabled(feature.BackendErrorRedesign) && !errors.Is(err, &backoff.PermanentError{}) && be.Backend.IsPermanentError(err) { + return backoff.Permanent(err) + } + return err + }, backoff.WithContext(backoff.WithMaxRetries(bo, uint64(be.MaxTries)), ctx), func(err error, d time.Duration) { if be.Report != nil { @@ -128,11 +139,7 @@ func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.Rewind func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) (err error) { return be.retry(ctx, fmt.Sprintf("Load(%v, %v, %v)", h, length, offset), func() error { - err := be.Backend.Load(ctx, h, length, offset, consumer) - if be.Backend.IsNotExist(err) { - return backoff.Permanent(err) - } - return err + return be.Backend.Load(ctx, h, length, offset, consumer) }) } diff --git a/internal/backend/retry/backend_retry_test.go b/internal/backend/retry/backend_retry_test.go index 405cdfa5923..80964fb37f3 100644 --- a/internal/backend/retry/backend_retry_test.go +++ b/internal/backend/retry/backend_retry_test.go @@ -289,7 +289,7 @@ func TestBackendLoadNotExists(t *testing.T) { } return nil, notFound } - be.IsNotExistFn = func(err error) bool { + be.IsPermanentErrorFn = func(err error) bool { return errors.Is(err, notFound) } @@ -299,7 +299,7 @@ func TestBackendLoadNotExists(t *testing.T) { err := retryBackend.Load(context.TODO(), backend.Handle{}, 0, 0, func(rd io.Reader) (err error) { return nil }) - test.Assert(t, be.IsNotExistFn(err), "unexpected error %v", err) + test.Assert(t, be.IsPermanentErrorFn(err), "unexpected error %v", err) test.Equals(t, 1, attempt) } @@ -329,6 +329,36 @@ func TestBackendStatNotExists(t *testing.T) { test.Equals(t, 1, attempt) } +func TestBackendRetryPermanent(t *testing.T) { + // retry should not retry if the error matches IsPermanentError + notFound := errors.New("not found") + attempt := 0 + + be := mock.NewBackend() + be.IsPermanentErrorFn = func(err error) bool { + return errors.Is(err, notFound) + } + + TestFastRetries(t) + retryBackend := New(be, 2, nil, nil) + err := retryBackend.retry(context.TODO(), "test", func() error { + attempt++ + return notFound + }) + + test.Assert(t, be.IsPermanentErrorFn(err), "unexpected error %v", err) + test.Equals(t, 1, attempt) + + attempt = 0 + err = retryBackend.retry(context.TODO(), "test", func() error { + attempt++ + return errors.New("something") + }) + test.Assert(t, !be.IsPermanentErrorFn(err), "error unexpectedly considered permanent %v", err) + test.Equals(t, 3, attempt) + +} + func assertIsCanceled(t *testing.T, err error) { test.Assert(t, err == context.Canceled, "got unexpected err %v", err) } From 53561474d90634d4f4c234def82713c4eaad7ebe Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 11 May 2024 20:26:34 +0200 Subject: [PATCH 195/893] update changelog with persistent backend error handling --- changelog/unreleased/issue-4515 | 8 -------- changelog/unreleased/issue-4627 | 18 +++++++++++++----- 2 files changed, 13 insertions(+), 13 deletions(-) delete mode 100644 changelog/unreleased/issue-4515 diff --git a/changelog/unreleased/issue-4515 b/changelog/unreleased/issue-4515 deleted file mode 100644 index 3832dc6050c..00000000000 --- a/changelog/unreleased/issue-4515 +++ /dev/null @@ -1,8 +0,0 @@ -Change: Don't retry to load files that don't exist - -Restic used to always retry to load files. It now only retries to load -files if they exist. - -https://github.com/restic/restic/issues/4515 -https://github.com/restic/restic/issues/1523 -https://github.com/restic/restic/pull/4520 diff --git a/changelog/unreleased/issue-4627 b/changelog/unreleased/issue-4627 index bbc861b8e49..d9705453546 100644 --- a/changelog/unreleased/issue-4627 +++ b/changelog/unreleased/issue-4627 @@ -1,4 +1,4 @@ -Enhancement: Improve reliability of backend operations +Change: Redesign backend error handling to improve reliability Restic now downloads pack files in large chunks instead of using a streaming download. This prevents failures due to interrupted streams. The `restore` @@ -6,12 +6,20 @@ command now also retries downloading individual blobs that cannot be retrieved. HTTP requests that are stuck for more than two minutes while uploading or downloading are now forcibly interrupted. This ensures that stuck requests are -retried after a short timeout. These new request timeouts can temporarily be -disabled by setting the environment variable -`RESTIC_FEATURES=http-timeouts=false`. Note that this feature flag will be -removed in the next minor restic version. +retried after a short timeout. + +Attempts to access a missing file or a truncated file will no longer be retried. +This avoids unnecessary retries in those cases. + +Most parts of the new backend error handling can temporarily be disabled by +setting the environment variable +`RESTIC_FEATURES=backend-error-redesign=false`. Note that this feature flag will +be removed in the next minor restic version. https://github.com/restic/restic/issues/4627 https://github.com/restic/restic/issues/4193 https://github.com/restic/restic/pull/4605 https://github.com/restic/restic/pull/4792 +https://github.com/restic/restic/issues/4515 +https://github.com/restic/restic/issues/1523 +https://github.com/restic/restic/pull/4520 From 6328b7e1f5016b3f78740e5340ad5bcae8c25446 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 11 May 2024 21:36:16 +0200 Subject: [PATCH 196/893] replace "too small" with "too short" in error messages --- internal/cache/file.go | 2 +- internal/crypto/crypto.go | 2 +- internal/index/indexmap.go | 2 +- internal/pack/pack.go | 6 +++--- internal/repository/prune.go | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/cache/file.go b/internal/cache/file.go index 1bfe922d24d..8d8bc5e841d 100644 --- a/internal/cache/file.go +++ b/internal/cache/file.go @@ -61,7 +61,7 @@ func (c *Cache) load(h backend.Handle, length int, offset int64) (io.ReadCloser, if size < offset+int64(length) { _ = f.Close() _ = c.remove(h) - return nil, errors.Errorf("cached file %v is too small, removing", h) + return nil, errors.Errorf("cached file %v is too short, removing", h) } if offset > 0 { diff --git a/internal/crypto/crypto.go b/internal/crypto/crypto.go index 0f9179207d1..58c82e78cc5 100644 --- a/internal/crypto/crypto.go +++ b/internal/crypto/crypto.go @@ -299,7 +299,7 @@ func (k *Key) Open(dst, nonce, ciphertext, _ []byte) ([]byte, error) { // check for plausible length if len(ciphertext) < k.Overhead() { - return nil, errors.Errorf("trying to decrypt invalid data: ciphertext too small") + return nil, errors.Errorf("trying to decrypt invalid data: ciphertext too short") } l := len(ciphertext) - macSize diff --git a/internal/index/indexmap.go b/internal/index/indexmap.go index 2386e01b625..4a78b9f77b4 100644 --- a/internal/index/indexmap.go +++ b/internal/index/indexmap.go @@ -204,7 +204,7 @@ func (h *hashedArrayTree) Size() uint { func (h *hashedArrayTree) grow() { idx, subIdx := h.index(h.size) if int(idx) == len(h.blockList) { - // blockList is too small -> double list and block size + // blockList is too short -> double list and block size h.blockSize *= 2 h.mask = h.mask*2 + 1 h.maskShift++ diff --git a/internal/pack/pack.go b/internal/pack/pack.go index 53631a6fb73..7d8d87e71d6 100644 --- a/internal/pack/pack.go +++ b/internal/pack/pack.go @@ -239,7 +239,7 @@ func readRecords(rd io.ReaderAt, size int64, bufsize int) ([]byte, int, error) { case hlen == 0: err = InvalidFileError{Message: "header length is zero"} case hlen < crypto.Extension: - err = InvalidFileError{Message: "header length is too small"} + err = InvalidFileError{Message: "header length is too short"} case int64(hlen) > size-int64(headerLengthSize): err = InvalidFileError{Message: "header is larger than file"} case int64(hlen) > MaxHeaderSize-int64(headerLengthSize): @@ -263,7 +263,7 @@ func readRecords(rd io.ReaderAt, size int64, bufsize int) ([]byte, int, error) { func readHeader(rd io.ReaderAt, size int64) ([]byte, error) { debug.Log("size: %v", size) if size < int64(minFileSize) { - err := InvalidFileError{Message: "file is too small"} + err := InvalidFileError{Message: "file is too short"} return nil, errors.Wrap(err, "readHeader") } @@ -305,7 +305,7 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, hdr } if len(buf) < crypto.CiphertextLength(0) { - return nil, 0, errors.New("invalid header, too small") + return nil, 0, errors.New("invalid header, too short") } hdrSize = headerLengthSize + uint32(len(buf)) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 77811e3214f..8ab16ab1506 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -444,7 +444,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi // This is equivalent to sorting by unused / total space. // Instead of unused[i] / used[i] > unused[j] / used[j] we use // unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64 - // Moreover packs containing trees and too small packs are sorted to the beginning + // Moreover packs containing trees and too short packs are sorted to the beginning sort.Slice(repackCandidates, func(i, j int) bool { pi := repackCandidates[i].packInfo pj := repackCandidates[j].packInfo From 394c8ca3ed6a13f608c452695df6863538a83e60 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 12 May 2024 11:55:34 +0200 Subject: [PATCH 197/893] rest/rclone/s3/sftp/swift: move short file detection behind feature gate These backends tend to use a large variety of server implementations. Some of those implementations might prove problematic with the new checks. --- internal/backend/rest/rest.go | 3 ++- internal/backend/s3/s3.go | 3 ++- internal/backend/sftp/sftp.go | 3 ++- internal/backend/swift/swift.go | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/internal/backend/rest/rest.go b/internal/backend/rest/rest.go index 5b59b8e4f7c..f743c3e5084 100644 --- a/internal/backend/rest/rest.go +++ b/internal/backend/rest/rest.go @@ -17,6 +17,7 @@ import ( "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" ) // make sure the rest backend implements backend.Backend @@ -245,7 +246,7 @@ func (b *Backend) openReader(ctx context.Context, h backend.Handle, length int, return nil, &restError{h, resp.StatusCode, resp.Status} } - if length > 0 && resp.ContentLength != int64(length) { + if feature.Flag.Enabled(feature.BackendErrorRedesign) && length > 0 && resp.ContentLength != int64(length) { return nil, &restError{h, http.StatusRequestedRangeNotSatisfiable, "partial out of bounds read"} } diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index afe1653f658..a2c95ac328f 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -17,6 +17,7 @@ import ( "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" @@ -404,7 +405,7 @@ func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, return nil, err } - if length > 0 { + if feature.Flag.Enabled(feature.BackendErrorRedesign) && length > 0 { if info.Size > 0 && info.Size != int64(length) { _ = rd.Close() return nil, minio.ErrorResponse{Code: "InvalidRange", Message: "restic-file-too-short"} diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index 7bab25bed7e..b624c50605e 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -20,6 +20,7 @@ import ( "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/cenkalti/backoff/v4" "github.com/pkg/sftp" @@ -426,7 +427,7 @@ func (r *SFTP) checkNoSpace(dir string, size int64, origErr error) error { // given offset. func (r *SFTP) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { return util.DefaultLoad(ctx, h, length, offset, r.openReader, func(rd io.Reader) error { - if length == 0 { + if length == 0 || !feature.Flag.Enabled(feature.BackendErrorRedesign) { return fn(rd) } diff --git a/internal/backend/swift/swift.go b/internal/backend/swift/swift.go index 616fcf3b7e9..1643af7fcc5 100644 --- a/internal/backend/swift/swift.go +++ b/internal/backend/swift/swift.go @@ -19,6 +19,7 @@ import ( "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/ncw/swift/v2" ) @@ -156,7 +157,7 @@ func (be *beSwift) openReader(ctx context.Context, h backend.Handle, length int, return nil, fmt.Errorf("conn.ObjectOpen: %w", err) } - if length > 0 { + if feature.Flag.Enabled(feature.BackendErrorRedesign) && length > 0 { // get response length, but don't cause backend calls cctx, cancel := context.WithCancel(context.Background()) cancel() From 53d15bcd1b339ea55436c4c1b8c41a41ed5e5855 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 12 May 2024 12:34:54 +0200 Subject: [PATCH 198/893] retry: add circuit breaker to load method If a file exhausts its retry attempts, then it is likely not accessible the next time. Thus, immediately fail all load calls for that file to avoid useless retries. --- internal/backend/retry/backend_retry.go | 29 ++++++++++- internal/backend/retry/backend_retry_test.go | 52 ++++++++++++++++++++ 2 files changed, 80 insertions(+), 1 deletion(-) diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index 4f25e0c7c30..31934ec96de 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "sync" "time" "github.com/cenkalti/backoff/v4" @@ -20,6 +21,8 @@ type Backend struct { MaxTries int Report func(string, error, time.Duration) Success func(string, int) + + failedLoads sync.Map } // statically ensure that RetryBackend implements backend.Backend. @@ -132,15 +135,39 @@ func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.Rewind }) } +// Failed loads expire after an hour +var failedLoadExpiry = time.Hour + // Load returns a reader that yields the contents of the file at h at the // given offset. If length is larger than zero, only a portion of the file // is returned. rd must be closed after use. If an error is returned, the // ReadCloser must be nil. func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) (err error) { - return be.retry(ctx, fmt.Sprintf("Load(%v, %v, %v)", h, length, offset), + key := h + key.IsMetadata = false + + // Implement the circuit breaker pattern for files that exhausted all retries due to a non-permanent error + if v, ok := be.failedLoads.Load(key); ok { + if time.Since(v.(time.Time)) > failedLoadExpiry { + be.failedLoads.Delete(key) + } else { + // fail immediately if the file was already problematic during the last hour + return fmt.Errorf("circuit breaker open for file %v", h) + } + } + + err = be.retry(ctx, fmt.Sprintf("Load(%v, %v, %v)", h, length, offset), func() error { return be.Backend.Load(ctx, h, length, offset, consumer) }) + + if feature.Flag.Enabled(feature.BackendErrorRedesign) && err != nil && !be.IsPermanentError(err) { + // We've exhausted the retries, the file is likely inaccessible. By excluding permanent + // errors, not found or truncated files are not recorded. + be.failedLoads.LoadOrStore(key, time.Now()) + } + + return err } // Stat returns information about the File identified by h. diff --git a/internal/backend/retry/backend_retry_test.go b/internal/backend/retry/backend_retry_test.go index 80964fb37f3..a515b0b7d03 100644 --- a/internal/backend/retry/backend_retry_test.go +++ b/internal/backend/retry/backend_retry_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "io" + "strings" "testing" "time" @@ -303,6 +304,57 @@ func TestBackendLoadNotExists(t *testing.T) { test.Equals(t, 1, attempt) } +func TestBackendLoadCircuitBreaker(t *testing.T) { + // retry should not retry if the error matches IsPermanentError + notFound := errors.New("not found") + otherError := errors.New("something") + attempt := 0 + + be := mock.NewBackend() + be.IsPermanentErrorFn = func(err error) bool { + return errors.Is(err, notFound) + } + be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { + attempt++ + return nil, otherError + } + nilRd := func(rd io.Reader) (err error) { + return nil + } + + TestFastRetries(t) + retryBackend := New(be, 2, nil, nil) + // trip the circuit breaker for file "other" + err := retryBackend.Load(context.TODO(), backend.Handle{Name: "other"}, 0, 0, nilRd) + test.Equals(t, otherError, err, "unexpected error") + test.Equals(t, 3, attempt) + + attempt = 0 + err = retryBackend.Load(context.TODO(), backend.Handle{Name: "other"}, 0, 0, nilRd) + test.Assert(t, strings.Contains(err.Error(), "circuit breaker open for file"), "expected circuit breaker error, got %v") + test.Equals(t, 0, attempt) + + // don't trip for permanent errors + be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { + attempt++ + return nil, notFound + } + err = retryBackend.Load(context.TODO(), backend.Handle{Name: "notfound"}, 0, 0, nilRd) + test.Equals(t, notFound, err, "expected circuit breaker to only affect other file, got %v") + err = retryBackend.Load(context.TODO(), backend.Handle{Name: "notfound"}, 0, 0, nilRd) + test.Equals(t, notFound, err, "persistent error must not trigger circuit breaker, got %v") + + // wait for circuit breaker to expire + time.Sleep(5 * time.Millisecond) + old := failedLoadExpiry + defer func() { + failedLoadExpiry = old + }() + failedLoadExpiry = 3 * time.Millisecond + err = retryBackend.Load(context.TODO(), backend.Handle{Name: "other"}, 0, 0, nilRd) + test.Equals(t, notFound, err, "expected circuit breaker to reset, got %v") +} + func TestBackendStatNotExists(t *testing.T) { // stat should not retry if the error matches IsNotExist notFound := errors.New("not found") From 47232bf8b0cb7a0777e78ab83c1ffd423e5a5e64 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 25 Apr 2024 21:20:23 +0200 Subject: [PATCH 199/893] backend: move LimitReadCloser to util package The helper is only intended for usage by backend implementations. --- internal/backend/local/local.go | 2 +- internal/backend/sftp/sftp.go | 4 ++-- internal/backend/util/limited_reader.go | 15 +++++++++++++++ internal/backend/utils.go | 12 ------------ internal/cache/file.go | 3 ++- 5 files changed, 20 insertions(+), 16 deletions(-) create mode 100644 internal/backend/util/limited_reader.go diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go index ff7c4b7a5ff..599bee0f69d 100644 --- a/internal/backend/local/local.go +++ b/internal/backend/local/local.go @@ -247,7 +247,7 @@ func (b *Local) openReader(_ context.Context, h backend.Handle, length int, offs } if length > 0 { - return backend.LimitReadCloser(f, int64(length)), nil + return util.LimitReadCloser(f, int64(length)), nil } return f, nil diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index b624c50605e..3591c153068 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -437,7 +437,7 @@ func (r *SFTP) Load(ctx context.Context, h backend.Handle, length int, offset in // check the underlying reader to be agnostic to however fn() handles the returned error _, rderr := rd.Read([]byte{0}) - if rderr == io.EOF && rd.(*backend.LimitedReadCloser).N != 0 { + if rderr == io.EOF && rd.(*util.LimitedReadCloser).N != 0 { // file is too short return fmt.Errorf("%w: %v", errTooShort, err) } @@ -463,7 +463,7 @@ func (r *SFTP) openReader(_ context.Context, h backend.Handle, length int, offse if length > 0 { // unlimited reads usually use io.Copy which needs WriteTo support at the underlying reader // limited reads are usually combined with io.ReadFull which reads all required bytes into a buffer in one go - return backend.LimitReadCloser(f, int64(length)), nil + return util.LimitReadCloser(f, int64(length)), nil } return f, nil diff --git a/internal/backend/util/limited_reader.go b/internal/backend/util/limited_reader.go new file mode 100644 index 00000000000..fdee1c06a98 --- /dev/null +++ b/internal/backend/util/limited_reader.go @@ -0,0 +1,15 @@ +package util + +import "io" + +// LimitedReadCloser wraps io.LimitedReader and exposes the Close() method. +type LimitedReadCloser struct { + io.Closer + io.LimitedReader +} + +// LimitReadCloser returns a new reader wraps r in an io.LimitedReader, but also +// exposes the Close() method. +func LimitReadCloser(r io.ReadCloser, n int64) *LimitedReadCloser { + return &LimitedReadCloser{Closer: r, LimitedReader: io.LimitedReader{R: r, N: n}} +} diff --git a/internal/backend/utils.go b/internal/backend/utils.go index 16160829574..919a1ad9212 100644 --- a/internal/backend/utils.go +++ b/internal/backend/utils.go @@ -62,15 +62,3 @@ func LoadAll(ctx context.Context, buf []byte, be Backend, h Handle) ([]byte, err return buf, nil } - -// LimitedReadCloser wraps io.LimitedReader and exposes the Close() method. -type LimitedReadCloser struct { - io.Closer - io.LimitedReader -} - -// LimitReadCloser returns a new reader wraps r in an io.LimitedReader, but also -// exposes the Close() method. -func LimitReadCloser(r io.ReadCloser, n int64) *LimitedReadCloser { - return &LimitedReadCloser{Closer: r, LimitedReader: io.LimitedReader{R: r, N: n}} -} diff --git a/internal/cache/file.go b/internal/cache/file.go index 8d8bc5e841d..59444a788fc 100644 --- a/internal/cache/file.go +++ b/internal/cache/file.go @@ -8,6 +8,7 @@ import ( "github.com/pkg/errors" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/fs" @@ -74,7 +75,7 @@ func (c *Cache) load(h backend.Handle, length int, offset int64) (io.ReadCloser, if length <= 0 { return f, nil } - return backend.LimitReadCloser(f, int64(length)), nil + return util.LimitReadCloser(f, int64(length)), nil } // Save saves a file in the cache. From 1d6d3656b0bd2975da4cbd2890bfc0e1b2cd4de4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 18:59:29 +0200 Subject: [PATCH 200/893] repository: move backend.LoadAll to repository.LoadRaw LoadRaw also includes improved context cancellation handling similar to the implementation in repository.LoadUnpacked. The removed cache backend test will be added again later on. --- cmd/restic/cmd_cat.go | 7 +- cmd/restic/cmd_debug.go | 5 +- internal/backend/test/tests.go | 21 ++++- internal/backend/utils.go | 64 -------------- internal/cache/backend_test.go | 40 +-------- internal/repository/key.go | 3 +- internal/repository/raw.go | 63 ++++++++++++++ .../utils_test.go => repository/raw_test.go} | 86 +++---------------- internal/repository/repair_pack_test.go | 3 +- internal/repository/repository_test.go | 4 +- internal/restic/repository.go | 5 ++ 11 files changed, 113 insertions(+), 188 deletions(-) delete mode 100644 internal/backend/utils.go create mode 100644 internal/repository/raw.go rename internal/{backend/utils_test.go => repository/raw_test.go} (50%) diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index ccec9b5d915..8d11a9dc429 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -7,7 +7,6 @@ import ( "github.com/spf13/cobra" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -146,9 +145,9 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error { return nil case "pack": - h := backend.Handle{Type: restic.PackFile, Name: id.String()} - buf, err := backend.LoadAll(ctx, nil, repo.Backend(), h) - if err != nil { + buf, err := repo.LoadRaw(ctx, restic.PackFile, id) + // allow returning broken pack files + if buf == nil { return err } diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 3abb9d7eb8a..93e627f2780 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -492,8 +492,9 @@ func examinePack(ctx context.Context, opts DebugExamineOptions, repo restic.Repo } Printf(" file size is %v\n", fi.Size) - buf, err := backend.LoadAll(ctx, nil, repo.Backend(), h) - if err != nil { + buf, err := repo.LoadRaw(ctx, restic.PackFile, id) + // also process damaged pack files + if buf == nil { return err } gotID := restic.Hash(buf) diff --git a/internal/backend/test/tests.go b/internal/backend/test/tests.go index 963659fda4a..4c260d264d5 100644 --- a/internal/backend/test/tests.go +++ b/internal/backend/test/tests.go @@ -36,6 +36,19 @@ func beTest(ctx context.Context, be backend.Backend, h backend.Handle) (bool, er return err == nil, err } +func LoadAll(ctx context.Context, be backend.Backend, h backend.Handle) ([]byte, error) { + var buf []byte + err := be.Load(ctx, h, 0, 0, func(rd io.Reader) error { + var err error + buf, err = io.ReadAll(rd) + return err + }) + if err != nil { + return nil, err + } + return buf, nil +} + // TestStripPasswordCall tests that the StripPassword method of a factory can be called without crashing. // It does not verify whether passwords are removed correctly func (s *Suite[C]) TestStripPasswordCall(_ *testing.T) { @@ -94,7 +107,7 @@ func (s *Suite[C]) TestConfig(t *testing.T) { var testString = "Config" // create config and read it back - _, err := backend.LoadAll(context.TODO(), nil, b, backend.Handle{Type: backend.ConfigFile}) + _, err := LoadAll(context.TODO(), b, backend.Handle{Type: backend.ConfigFile}) if err == nil { t.Fatalf("did not get expected error for non-existing config") } @@ -110,7 +123,7 @@ func (s *Suite[C]) TestConfig(t *testing.T) { // same config for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} { h := backend.Handle{Type: backend.ConfigFile, Name: name} - buf, err := backend.LoadAll(context.TODO(), nil, b, h) + buf, err := LoadAll(context.TODO(), b, h) if err != nil { t.Fatalf("unable to read config with name %q: %+v", name, err) } @@ -519,7 +532,7 @@ func (s *Suite[C]) TestSave(t *testing.T) { err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher())) test.OK(t, err) - buf, err := backend.LoadAll(context.TODO(), nil, b, h) + buf, err := LoadAll(context.TODO(), b, h) test.OK(t, err) if len(buf) != len(data) { t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf)) @@ -821,7 +834,7 @@ func (s *Suite[C]) TestBackend(t *testing.T) { // test Load() h := backend.Handle{Type: tpe, Name: ts.id} - buf, err := backend.LoadAll(context.TODO(), nil, b, h) + buf, err := LoadAll(context.TODO(), b, h) test.OK(t, err) test.Equals(t, ts.data, string(buf)) diff --git a/internal/backend/utils.go b/internal/backend/utils.go deleted file mode 100644 index 919a1ad9212..00000000000 --- a/internal/backend/utils.go +++ /dev/null @@ -1,64 +0,0 @@ -package backend - -import ( - "bytes" - "context" - "encoding/hex" - "fmt" - "io" - - "github.com/minio/sha256-simd" - - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/errors" -) - -func verifyContentMatchesName(s string, data []byte) (bool, error) { - if len(s) != hex.EncodedLen(sha256.Size) { - return false, fmt.Errorf("invalid length for ID: %q", s) - } - - b, err := hex.DecodeString(s) - if err != nil { - return false, fmt.Errorf("invalid ID: %s", err) - } - var id [sha256.Size]byte - copy(id[:], b) - - hashed := sha256.Sum256(data) - return id == hashed, nil -} - -// LoadAll reads all data stored in the backend for the handle into the given -// buffer, which is truncated. If the buffer is not large enough or nil, a new -// one is allocated. -func LoadAll(ctx context.Context, buf []byte, be Backend, h Handle) ([]byte, error) { - retriedInvalidData := false - err := be.Load(ctx, h, 0, 0, func(rd io.Reader) error { - // make sure this is idempotent, in case an error occurs this function may be called multiple times! - wr := bytes.NewBuffer(buf[:0]) - _, cerr := io.Copy(wr, rd) - if cerr != nil { - return cerr - } - buf = wr.Bytes() - - // retry loading damaged data only once. If a file fails to download correctly - // the second time, then it is likely corrupted at the backend. Return the data - // to the caller in that case to let it decide what to do with the data. - if !retriedInvalidData && h.Type != ConfigFile { - if matches, err := verifyContentMatchesName(h.Name, buf); err == nil && !matches { - debug.Log("retry loading broken blob %v", h) - retriedInvalidData = true - return errors.Errorf("loadAll(%v): invalid data returned", h) - } - } - return nil - }) - - if err != nil { - return nil, err - } - - return buf, nil -} diff --git a/internal/cache/backend_test.go b/internal/cache/backend_test.go index 68fbb02b3f9..c8d6678540a 100644 --- a/internal/cache/backend_test.go +++ b/internal/cache/backend_test.go @@ -12,12 +12,13 @@ import ( "github.com/pkg/errors" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/mem" + backendtest "github.com/restic/restic/internal/backend/test" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" ) func loadAndCompare(t testing.TB, be backend.Backend, h backend.Handle, data []byte) { - buf, err := backend.LoadAll(context.TODO(), nil, be, h) + buf, err := backendtest.LoadAll(context.TODO(), be, h) if err != nil { t.Fatal(err) } @@ -140,7 +141,7 @@ func TestErrorBackend(t *testing.T) { loadTest := func(wg *sync.WaitGroup, be backend.Backend) { defer wg.Done() - buf, err := backend.LoadAll(context.TODO(), nil, be, h) + buf, err := backendtest.LoadAll(context.TODO(), be, h) if err == testErr { return } @@ -165,38 +166,3 @@ func TestErrorBackend(t *testing.T) { wg.Wait() } - -func TestBackendRemoveBroken(t *testing.T) { - be := mem.New() - c := TestNewCache(t) - - h, data := randomData(5234142) - // save directly in backend - save(t, be, h, data) - - // prime cache with broken copy - broken := append([]byte{}, data...) - broken[0] ^= 0xff - err := c.Save(h, bytes.NewReader(broken)) - test.OK(t, err) - - // loadall retries if broken data was returned - buf, err := backend.LoadAll(context.TODO(), nil, c.Wrap(be), h) - test.OK(t, err) - - if !bytes.Equal(buf, data) { - t.Fatalf("wrong data returned") - } - - // check that the cache now contains the correct data - rd, err := c.load(h, 0, 0) - defer func() { - _ = rd.Close() - }() - test.OK(t, err) - cached, err := io.ReadAll(rd) - test.OK(t, err) - if !bytes.Equal(cached, data) { - t.Fatalf("wrong data cache") - } -} diff --git a/internal/repository/key.go b/internal/repository/key.go index 0604b44dfb9..08f997544bd 100644 --- a/internal/repository/key.go +++ b/internal/repository/key.go @@ -178,8 +178,7 @@ func SearchKey(ctx context.Context, s *Repository, password string, maxKeys int, // LoadKey loads a key from the backend. func LoadKey(ctx context.Context, s *Repository, id restic.ID) (k *Key, err error) { - h := backend.Handle{Type: restic.KeyFile, Name: id.String()} - data, err := backend.LoadAll(ctx, nil, s.be, h) + data, err := s.LoadRaw(ctx, restic.KeyFile, id) if err != nil { return nil, err } diff --git a/internal/repository/raw.go b/internal/repository/raw.go new file mode 100644 index 00000000000..d173908d401 --- /dev/null +++ b/internal/repository/raw.go @@ -0,0 +1,63 @@ +package repository + +import ( + "bytes" + "context" + "fmt" + "io" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" +) + +// LoadRaw reads all data stored in the backend for the file with id and filetype t. +// If the backend returns data that does not match the id, then the buffer is returned +// along with an error that is a restic.ErrInvalidData error. +func (r *Repository) LoadRaw(ctx context.Context, t restic.FileType, id restic.ID) (buf []byte, err error) { + h := backend.Handle{Type: t, Name: id.String()} + + ctx, cancel := context.WithCancel(ctx) + + var dataErr error + retriedInvalidData := false + err = r.be.Load(ctx, h, 0, 0, func(rd io.Reader) error { + // make sure this is idempotent, in case an error occurs this function may be called multiple times! + wr := bytes.NewBuffer(buf[:0]) + _, cerr := io.Copy(wr, rd) + if cerr != nil { + return cerr + } + buf = wr.Bytes() + + // retry loading damaged data only once. If a file fails to download correctly + // the second time, then it is likely corrupted at the backend. + if h.Type != backend.ConfigFile { + if id != restic.Hash(buf) { + if !retriedInvalidData { + debug.Log("retry loading broken blob %v", h) + retriedInvalidData = true + } else { + // with a canceled context there is not guarantee which error will + // be returned by `be.Load`. + dataErr = fmt.Errorf("loadAll(%v): %w", h, restic.ErrInvalidData) + cancel() + } + return restic.ErrInvalidData + } + } + return nil + }) + + // Return corrupted data to the caller if it is still broken the second time to + // let the caller decide what to do with the data. + if dataErr != nil { + return buf, dataErr + } + + if err != nil { + return nil, err + } + + return buf, nil +} diff --git a/internal/backend/utils_test.go b/internal/repository/raw_test.go similarity index 50% rename from internal/backend/utils_test.go rename to internal/repository/raw_test.go index ad9540e5417..42be8827ba3 100644 --- a/internal/backend/utils_test.go +++ b/internal/repository/raw_test.go @@ -1,4 +1,4 @@ -package backend_test +package repository_test import ( "bytes" @@ -10,6 +10,8 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/backend/mock" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -19,9 +21,10 @@ const MiB = 1 << 20 func TestLoadAll(t *testing.T) { b := mem.New() - var buf []byte + repo, err := repository.New(b, repository.Options{}) + rtest.OK(t, err) - for i := 0; i < 20; i++ { + for i := 0; i < 5; i++ { data := rtest.Random(23+i, rand.Intn(MiB)+500*KiB) id := restic.Hash(data) @@ -29,7 +32,7 @@ func TestLoadAll(t *testing.T) { err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher())) rtest.OK(t, err) - buf, err := backend.LoadAll(context.TODO(), buf, b, backend.Handle{Type: backend.PackFile, Name: id.String()}) + buf, err := repo.LoadRaw(context.TODO(), backend.PackFile, id) rtest.OK(t, err) if len(buf) != len(data) { @@ -44,16 +47,6 @@ func TestLoadAll(t *testing.T) { } } -func save(t testing.TB, be backend.Backend, buf []byte) backend.Handle { - id := restic.Hash(buf) - h := backend.Handle{Name: id.String(), Type: backend.PackFile} - err := be.Save(context.TODO(), h, backend.NewByteReader(buf, be.Hasher())) - if err != nil { - t.Fatal(err) - } - return h -} - type quickRetryBackend struct { backend.Backend } @@ -69,6 +62,8 @@ func (be *quickRetryBackend) Load(ctx context.Context, h backend.Handle, length func TestLoadAllBroken(t *testing.T) { b := mock.NewBackend() + repo, err := repository.New(b, repository.Options{}) + rtest.OK(t, err) data := rtest.Random(23, rand.Intn(MiB)+500*KiB) id := restic.Hash(data) @@ -80,70 +75,17 @@ func TestLoadAllBroken(t *testing.T) { } // must fail on first try - _, err := backend.LoadAll(context.TODO(), nil, b, backend.Handle{Type: backend.PackFile, Name: id.String()}) - if err == nil { - t.Fatalf("missing expected error") - } + _, err = repo.LoadRaw(context.TODO(), backend.PackFile, id) + rtest.Assert(t, errors.Is(err, restic.ErrInvalidData), "missing expected ErrInvalidData error, got %v", err) // must return the broken data after a retry be := &quickRetryBackend{Backend: b} - buf, err := backend.LoadAll(context.TODO(), nil, be, backend.Handle{Type: backend.PackFile, Name: id.String()}) + repo, err = repository.New(be, repository.Options{}) rtest.OK(t, err) + buf, err := repo.LoadRaw(context.TODO(), backend.PackFile, id) + rtest.Assert(t, errors.Is(err, restic.ErrInvalidData), "missing expected ErrInvalidData error, got %v", err) if !bytes.Equal(buf, data) { t.Fatalf("wrong data returned") } } - -func TestLoadAllAppend(t *testing.T) { - b := mem.New() - - h1 := save(t, b, []byte("foobar test string")) - randomData := rtest.Random(23, rand.Intn(MiB)+500*KiB) - h2 := save(t, b, randomData) - - var tests = []struct { - handle backend.Handle - buf []byte - want []byte - }{ - { - handle: h1, - buf: nil, - want: []byte("foobar test string"), - }, - { - handle: h1, - buf: []byte("xxx"), - want: []byte("foobar test string"), - }, - { - handle: h2, - buf: nil, - want: randomData, - }, - { - handle: h2, - buf: make([]byte, 0, 200), - want: randomData, - }, - { - handle: h2, - buf: []byte("foobarbaz"), - want: randomData, - }, - } - - for _, test := range tests { - t.Run("", func(t *testing.T) { - buf, err := backend.LoadAll(context.TODO(), test.buf, b, test.handle) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(buf, test.want) { - t.Errorf("wrong data returned, want %q, got %q", test.want, buf) - } - }) - } -} diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index 078017d213f..0d16d251ff6 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/restic/restic/internal/backend" + backendtest "github.com/restic/restic/internal/backend/test" "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -24,7 +25,7 @@ func listBlobs(repo restic.Repository) restic.BlobSet { } func replaceFile(t *testing.T, repo restic.Repository, h backend.Handle, damage func([]byte) []byte) { - buf, err := backend.LoadAll(context.TODO(), nil, repo.Backend(), h) + buf, err := backendtest.LoadAll(context.TODO(), repo.Backend(), h) test.OK(t, err) buf = damage(buf) test.OK(t, repo.Backend().Remove(context.TODO(), h)) diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 48a56a1fd51..d7481117a07 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -9,13 +9,13 @@ import ( "math/rand" "os" "path/filepath" - "strings" "testing" "time" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/local" "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -259,7 +259,7 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) { if err == nil { t.Fatal("missing expected error") } - rtest.Assert(t, strings.Contains(err.Error(), "invalid data returned"), "unexpected error: %v", err) + rtest.Assert(t, errors.Is(err, restic.ErrInvalidData), "unexpected error: %v", err) } type damageOnceBackend struct { diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 7a3389e00d0..5393e07013c 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -57,6 +57,11 @@ type Repository interface { // LoadUnpacked loads and decrypts the file with the given type and ID. LoadUnpacked(ctx context.Context, t FileType, id ID) (data []byte, err error) SaveUnpacked(context.Context, FileType, []byte) (ID, error) + + // LoadRaw reads all data stored in the backend for the file with id and filetype t. + // If the backend returns data that does not match the id, then the buffer is returned + // along with an error that is a restic.ErrInvalidData error. + LoadRaw(ctx context.Context, t FileType, id ID) (data []byte, err error) } type FileType = backend.FileType From 779c8d352702783f949bf916e915784fe846ec8a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 19:01:20 +0200 Subject: [PATCH 201/893] debug/repair packs/upgrade repo v2: use repository.LoadRaw This replaces calling the low-level backend.Load() method. --- cmd/restic/cmd_debug.go | 23 ++++++++--------------- cmd/restic/cmd_repair_packs.go | 19 ++++++++----------- internal/migrations/upgrade_repo_v2.go | 7 +------ 3 files changed, 17 insertions(+), 32 deletions(-) diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 93e627f2780..a63ac8c4c91 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -316,10 +316,11 @@ func loadBlobs(ctx context.Context, opts DebugExamineOptions, repo restic.Reposi if err != nil { panic(err) } - be := repo.Backend() - h := backend.Handle{ - Name: packID.String(), - Type: restic.PackFile, + + pack, err := repo.LoadRaw(ctx, restic.PackFile, packID) + // allow processing broken pack files + if pack == nil { + return err } wg, ctx := errgroup.WithContext(ctx) @@ -331,19 +332,11 @@ func loadBlobs(ctx context.Context, opts DebugExamineOptions, repo restic.Reposi wg.Go(func() error { for _, blob := range list { Printf(" loading blob %v at %v (length %v)\n", blob.ID, blob.Offset, blob.Length) - buf := make([]byte, blob.Length) - err := be.Load(ctx, h, int(blob.Length), int64(blob.Offset), func(rd io.Reader) error { - n, err := io.ReadFull(rd, buf) - if err != nil { - return fmt.Errorf("read error after %d bytes: %v", n, err) - } - return nil - }) - if err != nil { - Warnf("error read: %v\n", err) + if int(blob.Offset+blob.Length) > len(pack) { + Warnf("skipping truncated blob\n") continue } - + buf := pack[blob.Offset : blob.Offset+blob.Length] key := repo.Key() nonce, plaintext := buf[:key.NonceSize()], buf[key.NonceSize():] diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index 636213965a7..c57a316906d 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -1,11 +1,11 @@ package main import ( + "bytes" "context" "io" "os" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -68,20 +68,17 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T printer.P("saving backup copies of pack files to current folder") for id := range ids { - f, err := os.OpenFile("pack-"+id.String(), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o666) - if err != nil { + buf, err := repo.LoadRaw(ctx, restic.PackFile, id) + // corrupted data is fine + if buf == nil { return err } - err = repo.Backend().Load(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}, 0, 0, func(rd io.Reader) error { - _, err := f.Seek(0, 0) - if err != nil { - return err - } - _, err = io.Copy(f, rd) - return err - }) + f, err := os.OpenFile("pack-"+id.String(), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o666) if err != nil { + return err + } + if _, err := io.Copy(f, bytes.NewReader(buf)); err != nil { _ = f.Close() return err } diff --git a/internal/migrations/upgrade_repo_v2.go b/internal/migrations/upgrade_repo_v2.go index 585d9e8c745..6f4225947d6 100644 --- a/internal/migrations/upgrade_repo_v2.go +++ b/internal/migrations/upgrade_repo_v2.go @@ -3,7 +3,6 @@ package migrations import ( "context" "fmt" - "io" "os" "path/filepath" @@ -89,11 +88,7 @@ func (m *UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error h := backend.Handle{Type: restic.ConfigFile} // read raw config file and save it to a temp dir, just in case - var rawConfigFile []byte - err = repo.Backend().Load(ctx, h, 0, 0, func(rd io.Reader) (err error) { - rawConfigFile, err = io.ReadAll(rd) - return err - }) + rawConfigFile, err := repo.LoadRaw(ctx, restic.ConfigFile, restic.ID{}) if err != nil { return fmt.Errorf("load config file failed: %w", err) } From 021fb49559352e0f93befd78d592f7006184fe5b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 8 May 2024 22:42:37 +0200 Subject: [PATCH 202/893] repository: Implement repository.LoadUnpacked using LoadRaw Both functions were using a similar implementation. --- internal/repository/repository.go | 37 +------------------------------ 1 file changed, 1 insertion(+), 36 deletions(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index a922b44e3a8..9272a5e05a0 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -174,46 +174,11 @@ func (r *Repository) LoadUnpacked(ctx context.Context, t restic.FileType, id res id = restic.ID{} } - ctx, cancel := context.WithCancel(ctx) - - h := backend.Handle{Type: t, Name: id.String()} - retriedInvalidData := false - var dataErr error - wr := new(bytes.Buffer) - - err := r.be.Load(ctx, h, 0, 0, func(rd io.Reader) error { - // make sure this call is idempotent, in case an error occurs - wr.Reset() - _, cerr := io.Copy(wr, rd) - if cerr != nil { - return cerr - } - - buf := wr.Bytes() - if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) { - debug.Log("retry loading broken blob %v", h) - if !retriedInvalidData { - retriedInvalidData = true - } else { - // with a canceled context there is not guarantee which error will - // be returned by `be.Load`. - dataErr = fmt.Errorf("load(%v): %w", h, restic.ErrInvalidData) - cancel() - } - return restic.ErrInvalidData - - } - return nil - }) - - if dataErr != nil { - return nil, dataErr - } + buf, err := r.LoadRaw(ctx, t, id) if err != nil { return nil, err } - buf := wr.Bytes() nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():] plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil) if err != nil { From 6563f1d2ca5f9634d233381ad65143d201be2204 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 8 May 2024 22:43:58 +0200 Subject: [PATCH 203/893] repository: remove redundant debug log --- internal/repository/repository.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 9272a5e05a0..6ff20bfe67b 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -238,11 +238,6 @@ func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic. var lastError error for _, blob := range blobs { debug.Log("blob %v/%v found: %v", t, id, blob) - - if blob.Type != t { - debug.Log("blob %v has wrong block type, want %v", blob, t) - } - // load blob from pack h := backend.Handle{Type: restic.PackFile, Name: blob.PackID.String(), IsMetadata: t.IsMetadata()} From 503c8140b1107314e6d6a687d01de37158bb9a86 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 8 May 2024 23:26:41 +0200 Subject: [PATCH 204/893] repository: unify blob decoding code --- internal/repository/repository.go | 34 ++++++++----------------------- 1 file changed, 9 insertions(+), 25 deletions(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 6ff20bfe67b..f4d83c1e1f8 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -248,42 +248,26 @@ func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic. buf = buf[:blob.Length] } - n, err := backend.ReadAt(ctx, r.be, h, int64(blob.Offset), buf) + _, err := backend.ReadAt(ctx, r.be, h, int64(blob.Offset), buf) if err != nil { debug.Log("error loading blob %v: %v", blob, err) lastError = err continue } - if uint(n) != blob.Length { - lastError = errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d", - id.Str(), blob.Length, uint(n)) - debug.Log("lastError: %v", lastError) - continue - } + it := NewPackBlobIterator(blob.PackID, newByteReader(buf), uint(blob.Offset), []restic.Blob{blob.Blob}, r.key, r.getZstdDecoder()) + pbv, err := it.Next() - // decrypt - nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():] - plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil) - if err != nil { - lastError = errors.Errorf("decrypting blob %v failed: %v", id, err) - continue + if err == nil { + err = pbv.Err } - - if blob.IsCompressed() { - plaintext, err = r.getZstdDecoder().DecodeAll(plaintext, make([]byte, 0, blob.DataLength())) - if err != nil { - lastError = errors.Errorf("decompressing blob %v failed: %v", id, err) - continue - } - } - - // check hash - if !restic.Hash(plaintext).Equal(id) { - lastError = errors.Errorf("blob %v returned invalid hash", id) + if err != nil { + debug.Log("error decoding blob %v: %v", blob, err) + lastError = err continue } + plaintext := pbv.Plaintext if len(plaintext) > cap(buf) { return plaintext, nil } From e9390352a77e8eaf59fc7797d109317208a631dd Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 13:07:21 +0200 Subject: [PATCH 205/893] cache: code cleanups --- internal/cache/backend.go | 15 +++++---------- internal/cache/file.go | 16 ++++++++++------ internal/cache/file_test.go | 8 ++++---- 3 files changed, 19 insertions(+), 20 deletions(-) diff --git a/internal/cache/backend.go b/internal/cache/backend.go index 5cbdb544465..7a7e5cd9fe5 100644 --- a/internal/cache/backend.go +++ b/internal/cache/backend.go @@ -79,10 +79,9 @@ func (b *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindR return err } - err = b.Cache.Save(h, rd) + err = b.Cache.save(h, rd) if err != nil { debug.Log("unable to save %v to cache: %v", h, err) - _ = b.Cache.remove(h) return err } @@ -120,7 +119,7 @@ func (b *Backend) cacheFile(ctx context.Context, h backend.Handle) error { if !b.Cache.Has(h) { // nope, it's still not in the cache, pull it from the repo and save it err := b.Backend.Load(ctx, h, 0, 0, func(rd io.Reader) error { - return b.Cache.Save(h, rd) + return b.Cache.save(h, rd) }) if err != nil { // try to remove from the cache, ignore errors @@ -198,13 +197,9 @@ func (b *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, debug.Log("cache Stat(%v)", h) fi, err := b.Backend.Stat(ctx, h) - if err != nil { - if b.Backend.IsNotExist(err) { - // try to remove from the cache, ignore errors - _ = b.Cache.remove(h) - } - - return fi, err + if err != nil && b.Backend.IsNotExist(err) { + // try to remove from the cache, ignore errors + _ = b.Cache.remove(h) } return fi, err diff --git a/internal/cache/file.go b/internal/cache/file.go index 59444a788fc..b2e9ec61800 100644 --- a/internal/cache/file.go +++ b/internal/cache/file.go @@ -32,7 +32,7 @@ func (c *Cache) canBeCached(t backend.FileType) bool { return ok } -// Load returns a reader that yields the contents of the file with the +// load returns a reader that yields the contents of the file with the // given handle. rd must be closed after use. If an error is returned, the // ReadCloser is nil. func (c *Cache) load(h backend.Handle, length int, offset int64) (io.ReadCloser, error) { @@ -78,8 +78,8 @@ func (c *Cache) load(h backend.Handle, length int, offset int64) (io.ReadCloser, return util.LimitReadCloser(f, int64(length)), nil } -// Save saves a file in the cache. -func (c *Cache) Save(h backend.Handle, rd io.Reader) error { +// save saves a file in the cache. +func (c *Cache) save(h backend.Handle, rd io.Reader) error { debug.Log("Save to cache: %v", h) if rd == nil { return errors.New("Save() called with nil reader") @@ -139,13 +139,17 @@ func (c *Cache) Save(h backend.Handle, rd io.Reader) error { return errors.WithStack(err) } -// Remove deletes a file. When the file is not cache, no error is returned. +// remove deletes a file. When the file is not cached, no error is returned. func (c *Cache) remove(h backend.Handle) error { - if !c.Has(h) { + if !c.canBeCached(h.Type) { return nil } - return fs.Remove(c.filename(h)) + err := fs.Remove(c.filename(h)) + if errors.Is(err, os.ErrNotExist) { + err = nil + } + return err } // Clear removes all files of type t from the cache that are not contained in diff --git a/internal/cache/file_test.go b/internal/cache/file_test.go index 7935f9806cf..2ce238888ce 100644 --- a/internal/cache/file_test.go +++ b/internal/cache/file_test.go @@ -30,7 +30,7 @@ func generateRandomFiles(t testing.TB, tpe backend.FileType, c *Cache) restic.ID t.Errorf("index %v present before save", id) } - err := c.Save(h, bytes.NewReader(buf)) + err := c.save(h, bytes.NewReader(buf)) if err != nil { t.Fatal(err) } @@ -151,7 +151,7 @@ func TestFileLoad(t *testing.T) { Type: restic.PackFile, Name: id.String(), } - if err := c.Save(h, bytes.NewReader(data)); err != nil { + if err := c.save(h, bytes.NewReader(data)); err != nil { t.Fatalf("Save() returned error: %v", err) } @@ -237,7 +237,7 @@ func TestFileSaveConcurrent(t *testing.T) { } for i := 0; i < nproc/2; i++ { - g.Go(func() error { return c.Save(h, bytes.NewReader(data)) }) + g.Go(func() error { return c.save(h, bytes.NewReader(data)) }) // Can't use load because only the main goroutine may call t.Fatal. g.Go(func() error { @@ -280,7 +280,7 @@ func TestFileSaveAfterDamage(t *testing.T) { Type: restic.PackFile, Name: id.String(), } - if err := c.Save(h, bytes.NewReader(data)); err == nil { + if err := c.save(h, bytes.NewReader(data)); err == nil { t.Fatal("Missing error when saving to deleted cache directory") } } From 2ace242f366e65cf3a4fcfa138e4d7eb4e4d13f3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 15:26:41 +0200 Subject: [PATCH 206/893] repository: make reloading broken files explicit --- internal/cache/backend.go | 11 ++--- internal/cache/file.go | 10 +++-- internal/repository/raw.go | 61 ++++++++++++-------------- internal/repository/repository_test.go | 14 +++--- 4 files changed, 45 insertions(+), 51 deletions(-) diff --git a/internal/cache/backend.go b/internal/cache/backend.go index 7a7e5cd9fe5..27b37e9d9d8 100644 --- a/internal/cache/backend.go +++ b/internal/cache/backend.go @@ -161,14 +161,10 @@ func (b *Backend) Load(ctx context.Context, h backend.Handle, length int, offset // try loading from cache without checking that the handle is actually cached inCache, err := b.loadFromCache(h, length, offset, consumer) if inCache { - if err == nil { - return nil - } - - // drop from cache and retry once - _ = b.Cache.remove(h) + debug.Log("error loading %v from cache: %v", h, err) + // the caller must explicitly use cache.Forget() to remove the cache entry + return err } - debug.Log("error loading %v from cache: %v", h, err) // if we don't automatically cache this file type, fall back to the backend if !autoCacheTypes(h) { @@ -184,6 +180,7 @@ func (b *Backend) Load(ctx context.Context, h backend.Handle, length int, offset inCache, err = b.loadFromCache(h, length, offset, consumer) if inCache { + debug.Log("error loading %v from cache: %v", h, err) return err } diff --git a/internal/cache/file.go b/internal/cache/file.go index b2e9ec61800..921add24c58 100644 --- a/internal/cache/file.go +++ b/internal/cache/file.go @@ -55,14 +55,12 @@ func (c *Cache) load(h backend.Handle, length int, offset int64) (io.ReadCloser, size := fi.Size() if size <= int64(crypto.CiphertextLength(0)) { _ = f.Close() - _ = c.remove(h) - return nil, errors.Errorf("cached file %v is truncated, removing", h) + return nil, errors.Errorf("cached file %v is truncated", h) } if size < offset+int64(length) { _ = f.Close() - _ = c.remove(h) - return nil, errors.Errorf("cached file %v is too short, removing", h) + return nil, errors.Errorf("cached file %v is too short", h) } if offset > 0 { @@ -139,6 +137,10 @@ func (c *Cache) save(h backend.Handle, rd io.Reader) error { return errors.WithStack(err) } +func (c *Cache) Forget(h backend.Handle) error { + return c.remove(h) +} + // remove deletes a file. When the file is not cached, no error is returned. func (c *Cache) remove(h backend.Handle) error { if !c.canBeCached(h.Type) { diff --git a/internal/repository/raw.go b/internal/repository/raw.go index d173908d401..31443b0107e 100644 --- a/internal/repository/raw.go +++ b/internal/repository/raw.go @@ -7,7 +7,6 @@ import ( "io" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" ) @@ -17,47 +16,41 @@ import ( func (r *Repository) LoadRaw(ctx context.Context, t restic.FileType, id restic.ID) (buf []byte, err error) { h := backend.Handle{Type: t, Name: id.String()} - ctx, cancel := context.WithCancel(ctx) + buf, err = loadRaw(ctx, r.be, h) - var dataErr error - retriedInvalidData := false - err = r.be.Load(ctx, h, 0, 0, func(rd io.Reader) error { - // make sure this is idempotent, in case an error occurs this function may be called multiple times! - wr := bytes.NewBuffer(buf[:0]) - _, cerr := io.Copy(wr, rd) - if cerr != nil { - return cerr + // retry loading damaged data only once. If a file fails to download correctly + // the second time, then it is likely corrupted at the backend. + if h.Type != backend.ConfigFile && id != restic.Hash(buf) { + if r.Cache != nil { + // Cleanup cache to make sure it's not the cached copy that is broken. + // Ignore error as there's not much we can do in that case. + _ = r.Cache.Forget(h) } - buf = wr.Bytes() - // retry loading damaged data only once. If a file fails to download correctly - // the second time, then it is likely corrupted at the backend. - if h.Type != backend.ConfigFile { - if id != restic.Hash(buf) { - if !retriedInvalidData { - debug.Log("retry loading broken blob %v", h) - retriedInvalidData = true - } else { - // with a canceled context there is not guarantee which error will - // be returned by `be.Load`. - dataErr = fmt.Errorf("loadAll(%v): %w", h, restic.ErrInvalidData) - cancel() - } - return restic.ErrInvalidData - } - } - return nil - }) + buf, err = loadRaw(ctx, r.be, h) - // Return corrupted data to the caller if it is still broken the second time to - // let the caller decide what to do with the data. - if dataErr != nil { - return buf, dataErr + if err == nil && id != restic.Hash(buf) { + // Return corrupted data to the caller if it is still broken the second time to + // let the caller decide what to do with the data. + return buf, fmt.Errorf("LoadRaw(%v): %w", h, restic.ErrInvalidData) + } } if err != nil { return nil, err } - return buf, nil } + +func loadRaw(ctx context.Context, be backend.Backend, h backend.Handle) (buf []byte, err error) { + err = be.Load(ctx, h, 0, 0, func(rd io.Reader) error { + wr := new(bytes.Buffer) + _, cerr := io.Copy(wr, rd) + if cerr != nil { + return cerr + } + buf = wr.Bytes() + return cerr + }) + return buf, err +} diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index d7481117a07..28829e4cf8b 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -9,6 +9,7 @@ import ( "math/rand" "os" "path/filepath" + "sync" "testing" "time" @@ -264,6 +265,7 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) { type damageOnceBackend struct { backend.Backend + m sync.Map } func (be *damageOnceBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { @@ -271,13 +273,13 @@ func (be *damageOnceBackend) Load(ctx context.Context, h backend.Handle, length if h.Type == restic.ConfigFile { return be.Backend.Load(ctx, h, length, offset, fn) } - // return broken data on the first try - err := be.Backend.Load(ctx, h, length+1, offset, fn) - if err != nil { - // retry - err = be.Backend.Load(ctx, h, length, offset, fn) + + _, retry := be.m.Swap(h, true) + if !retry { + // return broken data on the first try + length++ } - return err + return be.Backend.Load(ctx, h, length, offset, fn) } func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) { From e33ce7f408ea78392810ede89c42cd195b60a2f1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 15:44:05 +0200 Subject: [PATCH 207/893] repository: retry failed LoadBlob once --- internal/repository/repository.go | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index f4d83c1e1f8..77652278f17 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -235,11 +235,27 @@ func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic. // try cached pack files first sortCachedPacksFirst(r.Cache, blobs) + buf, err := r.loadBlob(ctx, blobs, buf) + if err != nil { + if r.Cache != nil { + for _, blob := range blobs { + h := backend.Handle{Type: restic.PackFile, Name: blob.PackID.String(), IsMetadata: blob.Type.IsMetadata()} + // ignore errors as there's not much we can do here + _ = r.Cache.Forget(h) + } + } + + buf, err = r.loadBlob(ctx, blobs, buf) + } + return buf, err +} + +func (r *Repository) loadBlob(ctx context.Context, blobs []restic.PackedBlob, buf []byte) ([]byte, error) { var lastError error for _, blob := range blobs { - debug.Log("blob %v/%v found: %v", t, id, blob) + debug.Log("blob %v found: %v", blob.BlobHandle, blob) // load blob from pack - h := backend.Handle{Type: restic.PackFile, Name: blob.PackID.String(), IsMetadata: t.IsMetadata()} + h := backend.Handle{Type: restic.PackFile, Name: blob.PackID.String(), IsMetadata: blob.Type.IsMetadata()} switch { case cap(buf) < int(blob.Length): @@ -281,7 +297,7 @@ func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic. return nil, lastError } - return nil, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs)) + return nil, errors.Errorf("loading %v from %v packs failed", blobs[0].BlobHandle, len(blobs)) } // LookupBlobSize returns the size of blob id. From 7017adb7e9e40b0bb35593da4ade58f852b9e25c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 15:44:27 +0200 Subject: [PATCH 208/893] repository: retry failed ListPack once --- internal/repository/repository.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 77652278f17..1efaf454854 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -869,7 +869,17 @@ func (r *Repository) List(ctx context.Context, t restic.FileType, fn func(restic func (r *Repository) ListPack(ctx context.Context, id restic.ID, size int64) ([]restic.Blob, uint32, error) { h := backend.Handle{Type: restic.PackFile, Name: id.String()} - return pack.List(r.Key(), backend.ReaderAt(ctx, r.Backend(), h), size) + entries, hdrSize, err := pack.List(r.Key(), backend.ReaderAt(ctx, r.Backend(), h), size) + if err != nil { + if r.Cache != nil { + // ignore error as there is not much we can do here + _ = r.Cache.Forget(h) + } + + // retry on error + entries, hdrSize, err = pack.List(r.Key(), backend.ReaderAt(ctx, r.Backend(), h), size) + } + return entries, hdrSize, err } // Delete calls backend.Delete() if implemented, and returns an error From e401af07b291828c73e7dab970d92e57e01afc7d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 17:45:58 +0200 Subject: [PATCH 209/893] check: fix error message formatting --- internal/checker/checker.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index f60a11f6296..016b9c5bff3 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -608,12 +608,12 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []r // failed to load the pack file, return as further checks cannot succeed anyways debug.Log(" error streaming pack (partial %v): %v", isPartialReadError, err) if isPartialReadError { - return &ErrPackData{PackID: id, errs: append(errs, errors.Errorf("partial download error: %w", err))} + return &ErrPackData{PackID: id, errs: append(errs, fmt.Errorf("partial download error: %w", err))} } // The check command suggests to repair files for which a `ErrPackData` is returned. However, this file // completely failed to download such that there's no point in repairing anything. - return errors.Errorf("download error: %w", err) + return fmt.Errorf("download error: %w", err) } if !hash.Equal(id) { debug.Log("pack ID does not match, want %v, got %v", id, hash) From 433a6aad292b8799a65c9cbf5a1488b1b9f06908 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 17:46:36 +0200 Subject: [PATCH 210/893] repository: remove redundant blob loading fallback from RepairPacks LoadBlobsFromPack already implements the same fallback behavior. --- internal/repository/repair_pack.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/internal/repository/repair_pack.go b/internal/repository/repair_pack.go index a4261517ac4..a0bd56012d9 100644 --- a/internal/repository/repair_pack.go +++ b/internal/repository/repair_pack.go @@ -31,12 +31,8 @@ func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, err := repo.LoadBlobsFromPack(wgCtx, b.PackID, blobs, func(blob restic.BlobHandle, buf []byte, err error) error { if err != nil { - // Fallback path - buf, err = repo.LoadBlob(wgCtx, blob.Type, blob.ID, nil) - if err != nil { - printer.E("failed to load blob %v: %v", blob.ID, err) - return nil - } + printer.E("failed to load blob %v: %v", blob.ID, err) + return nil } id, _, _, err := repo.SaveBlob(wgCtx, blob.Type, buf, restic.ID{}, true) if !id.Equal(blob.ID) { From 8cce06d915c60d2daa2ee24eb9dae8616db82dfe Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 18:19:48 +0200 Subject: [PATCH 211/893] repair packs: drop experimental warning This warning should already have been removed once the feature flag was dropped. --- cmd/restic/cmd_repair_packs.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index c57a316906d..ab8c7f47598 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -17,8 +17,6 @@ var cmdRepairPacks = &cobra.Command{ Use: "packs [packIDs...]", Short: "Salvage damaged pack files", Long: ` -WARNING: The CLI for this command is experimental and will likely change in the future! - The "repair packs" command extracts intact blobs from the specified pack files, rebuilds the index to remove the damaged pack files and removes the pack files from the repository. From 97a307df1ab111e9307e7eb5d341b5032b36b842 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 18:21:53 +0200 Subject: [PATCH 212/893] cache: Always use cached file if it exists A file is always cached whole. Thus, any out of bounds access will also fail when directed at the backend. To handle case in which the cached file is broken, then caller must call Cache.Forget(h) for the file in question. --- internal/cache/backend.go | 7 ++++--- internal/cache/file.go | 22 ++++++++++++---------- internal/cache/file_test.go | 24 +++++++++++++----------- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/internal/cache/backend.go b/internal/cache/backend.go index 27b37e9d9d8..b0edfcbe585 100644 --- a/internal/cache/backend.go +++ b/internal/cache/backend.go @@ -40,7 +40,8 @@ func (b *Backend) Remove(ctx context.Context, h backend.Handle) error { return err } - return b.Cache.remove(h) + err = b.Cache.remove(h) + return err } func autoCacheTypes(h backend.Handle) bool { @@ -133,9 +134,9 @@ func (b *Backend) cacheFile(ctx context.Context, h backend.Handle) error { // loadFromCache will try to load the file from the cache. func (b *Backend) loadFromCache(h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) (bool, error) { - rd, err := b.Cache.load(h, length, offset) + rd, inCache, err := b.Cache.load(h, length, offset) if err != nil { - return false, err + return inCache, err } err = consumer(rd) diff --git a/internal/cache/file.go b/internal/cache/file.go index 921add24c58..b54bd806d96 100644 --- a/internal/cache/file.go +++ b/internal/cache/file.go @@ -34,46 +34,48 @@ func (c *Cache) canBeCached(t backend.FileType) bool { // load returns a reader that yields the contents of the file with the // given handle. rd must be closed after use. If an error is returned, the -// ReadCloser is nil. -func (c *Cache) load(h backend.Handle, length int, offset int64) (io.ReadCloser, error) { +// ReadCloser is nil. The bool return value indicates whether the requested +// file exists in the cache. It can be true even when no reader is returned +// because length or offset are out of bounds +func (c *Cache) load(h backend.Handle, length int, offset int64) (io.ReadCloser, bool, error) { debug.Log("Load(%v, %v, %v) from cache", h, length, offset) if !c.canBeCached(h.Type) { - return nil, errors.New("cannot be cached") + return nil, false, errors.New("cannot be cached") } f, err := fs.Open(c.filename(h)) if err != nil { - return nil, errors.WithStack(err) + return nil, false, errors.WithStack(err) } fi, err := f.Stat() if err != nil { _ = f.Close() - return nil, errors.WithStack(err) + return nil, true, errors.WithStack(err) } size := fi.Size() if size <= int64(crypto.CiphertextLength(0)) { _ = f.Close() - return nil, errors.Errorf("cached file %v is truncated", h) + return nil, true, errors.Errorf("cached file %v is truncated", h) } if size < offset+int64(length) { _ = f.Close() - return nil, errors.Errorf("cached file %v is too short", h) + return nil, true, errors.Errorf("cached file %v is too short", h) } if offset > 0 { if _, err = f.Seek(offset, io.SeekStart); err != nil { _ = f.Close() - return nil, err + return nil, true, err } } if length <= 0 { - return f, nil + return f, true, nil } - return util.LimitReadCloser(f, int64(length)), nil + return util.LimitReadCloser(f, int64(length)), true, nil } // save saves a file in the cache. diff --git a/internal/cache/file_test.go b/internal/cache/file_test.go index 2ce238888ce..331e3251daa 100644 --- a/internal/cache/file_test.go +++ b/internal/cache/file_test.go @@ -14,7 +14,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" ) @@ -22,7 +22,7 @@ import ( func generateRandomFiles(t testing.TB, tpe backend.FileType, c *Cache) restic.IDSet { ids := restic.NewIDSet() for i := 0; i < rand.Intn(15)+10; i++ { - buf := test.Random(rand.Int(), 1<<19) + buf := rtest.Random(rand.Int(), 1<<19) id := restic.Hash(buf) h := backend.Handle{Type: tpe, Name: id.String()} @@ -48,10 +48,11 @@ func randomID(s restic.IDSet) restic.ID { } func load(t testing.TB, c *Cache, h backend.Handle) []byte { - rd, err := c.load(h, 0, 0) + rd, inCache, err := c.load(h, 0, 0) if err != nil { t.Fatal(err) } + rtest.Equals(t, true, inCache, "expected inCache flag to be true") if rd == nil { t.Fatalf("load() returned nil reader") @@ -144,7 +145,7 @@ func TestFileLoad(t *testing.T) { c := TestNewCache(t) // save about 5 MiB of data in the cache - data := test.Random(rand.Int(), 5234142) + data := rtest.Random(rand.Int(), 5234142) id := restic.ID{} copy(id[:], data) h := backend.Handle{ @@ -169,10 +170,11 @@ func TestFileLoad(t *testing.T) { for _, test := range tests { t.Run(fmt.Sprintf("%v/%v", test.length, test.offset), func(t *testing.T) { - rd, err := c.load(h, test.length, test.offset) + rd, inCache, err := c.load(h, test.length, test.offset) if err != nil { t.Fatal(err) } + rtest.Equals(t, true, inCache, "expected inCache flag to be true") buf, err := io.ReadAll(rd) if err != nil { @@ -225,7 +227,7 @@ func TestFileSaveConcurrent(t *testing.T) { var ( c = TestNewCache(t) - data = test.Random(1, 10000) + data = rtest.Random(1, 10000) g errgroup.Group id restic.ID ) @@ -245,7 +247,7 @@ func TestFileSaveConcurrent(t *testing.T) { // ensure is ENOENT or nil error. time.Sleep(time.Duration(100+rand.Intn(200)) * time.Millisecond) - f, err := c.load(h, 0, 0) + f, _, err := c.load(h, 0, 0) t.Logf("Load error: %v", err) switch { case err == nil: @@ -264,17 +266,17 @@ func TestFileSaveConcurrent(t *testing.T) { }) } - test.OK(t, g.Wait()) + rtest.OK(t, g.Wait()) saved := load(t, c, h) - test.Equals(t, data, saved) + rtest.Equals(t, data, saved) } func TestFileSaveAfterDamage(t *testing.T) { c := TestNewCache(t) - test.OK(t, fs.RemoveAll(c.path)) + rtest.OK(t, fs.RemoveAll(c.path)) // save a few bytes of data in the cache - data := test.Random(123456789, 42) + data := rtest.Random(123456789, 42) id := restic.Hash(data) h := backend.Handle{ Type: restic.PackFile, From e734746f752c89e27cad722640deb342695ac7b3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 18:24:28 +0200 Subject: [PATCH 213/893] cache: forget cached file at most once This is inspired by the circuit breaker pattern used for distributed systems. If too many requests fails, then it is better to immediately fail new requests for a limited time to give the backend time to recover. By only forgetting a file in the cache at most once, we can ensure that a broken file is only retrieved once again from the backend. If the file stored there is broken, previously it would be cached and deleted continuously. Now, it is retrieved only once again, all later requests just use the cached copy and either succeed or fail immediately. --- internal/cache/backend.go | 6 +++--- internal/cache/cache.go | 3 +++ internal/cache/file.go | 22 ++++++++++++++++++---- 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/internal/cache/backend.go b/internal/cache/backend.go index b0edfcbe585..24a44d750e6 100644 --- a/internal/cache/backend.go +++ b/internal/cache/backend.go @@ -40,7 +40,7 @@ func (b *Backend) Remove(ctx context.Context, h backend.Handle) error { return err } - err = b.Cache.remove(h) + _, err = b.Cache.remove(h) return err } @@ -124,7 +124,7 @@ func (b *Backend) cacheFile(ctx context.Context, h backend.Handle) error { }) if err != nil { // try to remove from the cache, ignore errors - _ = b.Cache.remove(h) + _, _ = b.Cache.remove(h) } return err } @@ -197,7 +197,7 @@ func (b *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, fi, err := b.Backend.Stat(ctx, h) if err != nil && b.Backend.IsNotExist(err) { // try to remove from the cache, ignore errors - _ = b.Cache.remove(h) + _, _ = b.Cache.remove(h) } return fi, err diff --git a/internal/cache/cache.go b/internal/cache/cache.go index 19b3182df5e..a55b51c700f 100644 --- a/internal/cache/cache.go +++ b/internal/cache/cache.go @@ -6,6 +6,7 @@ import ( "path/filepath" "regexp" "strconv" + "sync" "time" "github.com/pkg/errors" @@ -20,6 +21,8 @@ type Cache struct { path string Base string Created bool + + forgotten sync.Map } const dirMode = 0700 diff --git a/internal/cache/file.go b/internal/cache/file.go index b54bd806d96..12f5f23c571 100644 --- a/internal/cache/file.go +++ b/internal/cache/file.go @@ -1,6 +1,7 @@ package cache import ( + "fmt" "io" "os" "path/filepath" @@ -140,20 +141,33 @@ func (c *Cache) save(h backend.Handle, rd io.Reader) error { } func (c *Cache) Forget(h backend.Handle) error { - return c.remove(h) + h.IsMetadata = false + + if _, ok := c.forgotten.Load(h); ok { + // Delete a file at most once while restic runs. + // This prevents repeatedly caching and forgetting broken files + return fmt.Errorf("circuit breaker prevents repeated deletion of cached file %v", h) + } + + removed, err := c.remove(h) + if removed { + c.forgotten.Store(h, struct{}{}) + } + return err } // remove deletes a file. When the file is not cached, no error is returned. -func (c *Cache) remove(h backend.Handle) error { +func (c *Cache) remove(h backend.Handle) (bool, error) { if !c.canBeCached(h.Type) { - return nil + return false, nil } err := fs.Remove(c.filename(h)) + removed := err == nil if errors.Is(err, os.ErrNotExist) { err = nil } - return err + return removed, err } // Clear removes all files of type t from the cache that are not contained in From 385cee09dc37c73eee39f7cb89d85393559ddf6d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 18:30:04 +0200 Subject: [PATCH 214/893] repository: fix caching of tree packs in LoadBlobsFromPack --- internal/repository/repository.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 1efaf454854..bbdaa16a7a9 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -993,7 +993,7 @@ func streamPack(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn } func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, dec *zstd.Decoder, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { - h := backend.Handle{Type: restic.PackFile, Name: packID.String(), IsMetadata: false} + h := backend.Handle{Type: restic.PackFile, Name: packID.String(), IsMetadata: blobs[0].Type.IsMetadata()} dataStart := blobs[0].Offset dataEnd := blobs[len(blobs)-1].Offset + blobs[len(blobs)-1].Length From 3ff063e913f992c0748ce9b7fffc5955d4096524 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 18:48:22 +0200 Subject: [PATCH 215/893] check: verify pack a second time if broken --- internal/checker/checker.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 016b9c5bff3..1ae6b23f103 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -725,6 +725,16 @@ func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *p } err := checkPack(ctx, c.repo, ps.id, ps.blobs, ps.size, bufRd, dec) + if err != nil { + // retry pack verification to detect transient errors + err2 := checkPack(ctx, c.repo, ps.id, ps.blobs, ps.size, bufRd, dec) + if err2 != nil { + err = err2 + } else { + err = fmt.Errorf("second check successful, original error %w", err) + } + } + p.Add(1) if err == nil { continue From 5214af88e2dc1ebd1c229ebfd28d78516b5d6166 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 21:20:57 +0200 Subject: [PATCH 216/893] cache: test forget behavior --- internal/cache/backend_test.go | 74 +++++++++++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 1 deletion(-) diff --git a/internal/cache/backend_test.go b/internal/cache/backend_test.go index c8d6678540a..0de90471e5c 100644 --- a/internal/cache/backend_test.go +++ b/internal/cache/backend_test.go @@ -5,6 +5,7 @@ import ( "context" "io" "math/rand" + "strings" "sync" "testing" "time" @@ -91,7 +92,7 @@ func TestBackend(t *testing.T) { loadAndCompare(t, be, h, data) // load data via cache - loadAndCompare(t, be, h, data) + loadAndCompare(t, wbe, h, data) // remove directly remove(t, be, h) @@ -114,6 +115,77 @@ func TestBackend(t *testing.T) { } } +type loadCountingBackend struct { + backend.Backend + ctr int +} + +func (l *loadCountingBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + l.ctr++ + return l.Backend.Load(ctx, h, length, offset, fn) +} + +func TestOutOfBoundsAccess(t *testing.T) { + be := &loadCountingBackend{Backend: mem.New()} + c := TestNewCache(t) + wbe := c.Wrap(be) + + h, data := randomData(50) + save(t, be, h, data) + + // load out of bounds + err := wbe.Load(context.TODO(), h, 100, 100, func(rd io.Reader) error { + t.Error("cache returned non-existant file section") + return errors.New("broken") + }) + test.Assert(t, strings.Contains(err.Error(), " is too short"), "expected too short error, got %v", err) + test.Equals(t, 1, be.ctr, "expected file to be loaded only once") + // file must nevertheless get cached + if !c.Has(h) { + t.Errorf("cache doesn't have file after load") + } + + // start within bounds, but request too large chunk + err = wbe.Load(context.TODO(), h, 100, 0, func(rd io.Reader) error { + t.Error("cache returned non-existant file section") + return errors.New("broken") + }) + test.Assert(t, strings.Contains(err.Error(), " is too short"), "expected too short error, got %v", err) + test.Equals(t, 1, be.ctr, "expected file to be loaded only once") +} + +func TestForget(t *testing.T) { + be := &loadCountingBackend{Backend: mem.New()} + c := TestNewCache(t) + wbe := c.Wrap(be) + + h, data := randomData(50) + save(t, be, h, data) + + loadAndCompare(t, wbe, h, data) + test.Equals(t, 1, be.ctr, "expected file to be loaded once") + + // must still exist even if load returns an error + exp := errors.New("error") + err := wbe.Load(context.TODO(), h, 0, 0, func(rd io.Reader) error { + return exp + }) + test.Equals(t, exp, err, "wrong error") + test.Assert(t, c.Has(h), "missing cache entry") + + test.OK(t, c.Forget(h)) + test.Assert(t, !c.Has(h), "cache entry should have been removed") + + // cache it again + loadAndCompare(t, wbe, h, data) + test.Assert(t, c.Has(h), "missing cache entry") + + // forget must delete file only once + err = c.Forget(h) + test.Assert(t, strings.Contains(err.Error(), "circuit breaker prevents repeated deletion of cached file"), "wrong error message %q", err) + test.Assert(t, c.Has(h), "cache entry should still exist") +} + type loadErrorBackend struct { backend.Backend loadError error From ac805d6838b835ab416dabf26d08e682e1fa3c38 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 21:21:08 +0200 Subject: [PATCH 217/893] cache: cleanup debug logs --- internal/cache/backend.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/cache/backend.go b/internal/cache/backend.go index 24a44d750e6..63bb6f85f0a 100644 --- a/internal/cache/backend.go +++ b/internal/cache/backend.go @@ -181,7 +181,9 @@ func (b *Backend) Load(ctx context.Context, h backend.Handle, length int, offset inCache, err = b.loadFromCache(h, length, offset, consumer) if inCache { - debug.Log("error loading %v from cache: %v", h, err) + if err != nil { + debug.Log("error loading %v from cache: %v", h, err) + } return err } From 4f45668b7ca3903939be74c22b325066a3ea52a0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 21:21:42 +0200 Subject: [PATCH 218/893] repository: rework and extend LoadRaw tests --- internal/repository/raw_test.go | 71 ++++++++++++++++++++------------- 1 file changed, 44 insertions(+), 27 deletions(-) diff --git a/internal/repository/raw_test.go b/internal/repository/raw_test.go index 42be8827ba3..28786dbcd8f 100644 --- a/internal/repository/raw_test.go +++ b/internal/repository/raw_test.go @@ -4,12 +4,12 @@ import ( "bytes" "context" "io" - "math/rand" "testing" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/backend/mock" + "github.com/restic/restic/internal/cache" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -19,13 +19,13 @@ import ( const KiB = 1 << 10 const MiB = 1 << 20 -func TestLoadAll(t *testing.T) { +func TestLoadRaw(t *testing.T) { b := mem.New() repo, err := repository.New(b, repository.Options{}) rtest.OK(t, err) for i := 0; i < 5; i++ { - data := rtest.Random(23+i, rand.Intn(MiB)+500*KiB) + data := rtest.Random(23+i, 500*KiB) id := restic.Hash(data) h := backend.Handle{Name: id.String(), Type: backend.PackFile} @@ -47,25 +47,12 @@ func TestLoadAll(t *testing.T) { } } -type quickRetryBackend struct { - backend.Backend -} - -func (be *quickRetryBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { - err := be.Backend.Load(ctx, h, length, offset, fn) - if err != nil { - // retry - err = be.Backend.Load(ctx, h, length, offset, fn) - } - return err -} - -func TestLoadAllBroken(t *testing.T) { +func TestLoadRawBroken(t *testing.T) { b := mock.NewBackend() repo, err := repository.New(b, repository.Options{}) rtest.OK(t, err) - data := rtest.Random(23, rand.Intn(MiB)+500*KiB) + data := rtest.Random(23, 10*KiB) id := restic.Hash(data) // damage buffer data[0] ^= 0xff @@ -74,18 +61,48 @@ func TestLoadAllBroken(t *testing.T) { return io.NopCloser(bytes.NewReader(data)), nil } - // must fail on first try - _, err = repo.LoadRaw(context.TODO(), backend.PackFile, id) + // must detect but still return corrupt data + buf, err := repo.LoadRaw(context.TODO(), backend.PackFile, id) + rtest.Assert(t, bytes.Equal(buf, data), "wrong data returned") rtest.Assert(t, errors.Is(err, restic.ErrInvalidData), "missing expected ErrInvalidData error, got %v", err) - // must return the broken data after a retry - be := &quickRetryBackend{Backend: b} - repo, err = repository.New(be, repository.Options{}) + // cause the first access to fail, but repair the data for the second access + data[0] ^= 0xff + loadCtr := 0 + b.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { + data[0] ^= 0xff + loadCtr++ + return io.NopCloser(bytes.NewReader(data)), nil + } + + // must retry load of corrupted data + buf, err = repo.LoadRaw(context.TODO(), backend.PackFile, id) rtest.OK(t, err) - buf, err := repo.LoadRaw(context.TODO(), backend.PackFile, id) - rtest.Assert(t, errors.Is(err, restic.ErrInvalidData), "missing expected ErrInvalidData error, got %v", err) + rtest.Assert(t, bytes.Equal(buf, data), "wrong data returned") + rtest.Equals(t, 2, loadCtr, "missing retry on broken data") +} + +func TestLoadRawBrokenWithCache(t *testing.T) { + b := mock.NewBackend() + c := cache.TestNewCache(t) + repo, err := repository.New(b, repository.Options{}) + rtest.OK(t, err) + repo.UseCache(c) - if !bytes.Equal(buf, data) { - t.Fatalf("wrong data returned") + data := rtest.Random(23, 10*KiB) + id := restic.Hash(data) + + loadCtr := 0 + // cause the first access to fail, but repair the data for the second access + b.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { + data[0] ^= 0xff + loadCtr++ + return io.NopCloser(bytes.NewReader(data)), nil } + + // must retry load of corrupted data + buf, err := repo.LoadRaw(context.TODO(), backend.SnapshotFile, id) + rtest.OK(t, err) + rtest.Assert(t, bytes.Equal(buf, data), "wrong data returned") + rtest.Equals(t, 2, loadCtr, "missing retry on broken data") } From bf160967713123277c7ed40a237142c69a75ae6f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 21:22:19 +0200 Subject: [PATCH 219/893] repository: test LoadBlob retries --- internal/repository/repository_test.go | 31 +++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 28829e4cf8b..924e5126953 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -15,11 +15,14 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/local" + "github.com/restic/restic/internal/backend/mem" + "github.com/restic/restic/internal/cache" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" ) @@ -140,6 +143,28 @@ func testLoadBlob(t *testing.T, version uint) { } } +func TestLoadBlobBroken(t *testing.T) { + be := mem.New() + repo := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}).(*repository.Repository) + buf := test.Random(42, 1000) + + var wg errgroup.Group + repo.StartPackUploader(context.TODO(), &wg) + id, _, _, err := repo.SaveBlob(context.TODO(), restic.TreeBlob, buf, restic.ID{}, false) + rtest.OK(t, err) + rtest.OK(t, repo.Flush(context.Background())) + + // setup cache after saving the blob to make sure that the damageOnceBackend damages the cached data + c := cache.TestNewCache(t) + repo.UseCache(c) + + data, err := repo.LoadBlob(context.TODO(), restic.TreeBlob, id, nil) + rtest.OK(t, err) + rtest.Assert(t, bytes.Equal(buf, data), "data mismatch") + pack := repo.Index().Lookup(restic.BlobHandle{Type: restic.TreeBlob, ID: id})[0].PackID + rtest.Assert(t, c.Has(backend.Handle{Type: restic.PackFile, Name: pack.String()}), "expected tree pack to be cached") +} + func BenchmarkLoadBlob(b *testing.B) { repository.BenchmarkAllVersions(b, benchmarkLoadBlob) } @@ -255,11 +280,7 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) { err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, repo.Backend().Hasher())) rtest.OK(t, err) - // without a retry backend this will just return an error that the file is broken _, err = repo.LoadUnpacked(context.TODO(), restic.IndexFile, id) - if err == nil { - t.Fatal("missing expected error") - } rtest.Assert(t, errors.Is(err, restic.ErrInvalidData), "unexpected error: %v", err) } @@ -277,7 +298,7 @@ func (be *damageOnceBackend) Load(ctx context.Context, h backend.Handle, length _, retry := be.m.Swap(h, true) if !retry { // return broken data on the first try - length++ + offset++ } return be.Backend.Load(ctx, h, length, offset, fn) } From 987c3b250cca0fa0251f9371b2dfca1f1f9c2edd Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 21:39:52 +0200 Subject: [PATCH 220/893] repository: test retries of ListPack --- internal/repository/repository_test.go | 36 ++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 924e5126953..6f67e2c5e48 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -295,6 +295,7 @@ func (be *damageOnceBackend) Load(ctx context.Context, h backend.Handle, length return be.Backend.Load(ctx, h, length, offset, fn) } + h.IsMetadata = false _, retry := be.m.Swap(h, true) if !retry { // return broken data on the first try @@ -421,3 +422,38 @@ func TestInvalidCompression(t *testing.T) { _, err = repository.New(nil, repository.Options{Compression: comp}) rtest.Assert(t, err != nil, "missing error") } + +func TestListPack(t *testing.T) { + be := mem.New() + repo := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}).(*repository.Repository) + buf := test.Random(42, 1000) + + var wg errgroup.Group + repo.StartPackUploader(context.TODO(), &wg) + id, _, _, err := repo.SaveBlob(context.TODO(), restic.TreeBlob, buf, restic.ID{}, false) + rtest.OK(t, err) + rtest.OK(t, repo.Flush(context.Background())) + + // setup cache after saving the blob to make sure that the damageOnceBackend damages the cached data + c := cache.TestNewCache(t) + repo.UseCache(c) + + // Forcibly cache pack file + packID := repo.Index().Lookup(restic.BlobHandle{Type: restic.TreeBlob, ID: id})[0].PackID + rtest.OK(t, repo.Backend().Load(context.TODO(), backend.Handle{Type: restic.PackFile, IsMetadata: true, Name: packID.String()}, 0, 0, func(rd io.Reader) error { return nil })) + + // Get size to list pack + var size int64 + rtest.OK(t, repo.List(context.TODO(), restic.PackFile, func(id restic.ID, sz int64) error { + if id == packID { + size = sz + } + return nil + })) + + blobs, _, err := repo.ListPack(context.TODO(), packID, size) + rtest.OK(t, err) + rtest.Assert(t, len(blobs) == 1 && blobs[0].ID == id, "unexpected blobs in pack: %v", blobs) + + rtest.Assert(t, !c.Has(backend.Handle{Type: restic.PackFile, Name: packID.String()}), "tree pack should no longer be cached as ListPack does not set IsMetadata in the backend.Handle") +} From ff0744b3af3c4697cecb6dd6c664eeae64d46839 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 22:12:53 +0200 Subject: [PATCH 221/893] check: test checkPack retries --- internal/checker/checker.go | 24 ++++--- internal/checker/checker_test.go | 112 ++++++++++++++++++++++--------- 2 files changed, 96 insertions(+), 40 deletions(-) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 1ae6b23f103..1cee4355cd0 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -532,6 +532,21 @@ func (e *partialReadError) Error() string { // checkPack reads a pack and checks the integrity of all blobs. func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { + err := checkPackInner(ctx, r, id, blobs, size, bufRd, dec) + if err != nil { + // retry pack verification to detect transient errors + err2 := checkPackInner(ctx, r, id, blobs, size, bufRd, dec) + if err2 != nil { + err = err2 + } else { + err = fmt.Errorf("check successful on second attempt, original error %w", err) + } + } + return err +} + +func checkPackInner(ctx context.Context, r restic.Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { + debug.Log("checking pack %v", id.String()) if len(blobs) == 0 { @@ -725,15 +740,6 @@ func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *p } err := checkPack(ctx, c.repo, ps.id, ps.blobs, ps.size, bufRd, dec) - if err != nil { - // retry pack verification to detect transient errors - err2 := checkPack(ctx, c.repo, ps.id, ps.blobs, ps.size, bufRd, dec) - if err2 != nil { - err = err2 - } else { - err = fmt.Errorf("second check successful, original error %w", err) - } - } p.Add(1) if err == nil { diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index 9746e9f5d8b..5fc82eed094 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -8,6 +8,7 @@ import ( "path/filepath" "sort" "strconv" + "strings" "sync" "testing" "time" @@ -325,42 +326,91 @@ func induceError(data []byte) { data[pos] ^= 1 } +// errorOnceBackend randomly modifies data when reading a file for the first time. +type errorOnceBackend struct { + backend.Backend + m sync.Map +} + +func (b *errorOnceBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) error { + _, isRetry := b.m.Swap(h, struct{}{}) + return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error { + if !isRetry && h.Type != restic.ConfigFile { + return consumer(errorReadCloser{rd}) + } + return consumer(rd) + }) +} + func TestCheckerModifiedData(t *testing.T) { repo := repository.TestRepository(t) sn := archiver.TestSnapshot(t, repo, ".", nil) t.Logf("archived as %v", sn.ID().Str()) - beError := &errorBackend{Backend: repo.Backend()} - checkRepo := repository.TestOpenBackend(t, beError) - - chkr := checker.New(checkRepo, false) - - hints, errs := chkr.LoadIndex(context.TODO(), nil) - if len(errs) > 0 { - t.Fatalf("expected no errors, got %v: %v", len(errs), errs) - } - - if len(hints) > 0 { - t.Errorf("expected no hints, got %v: %v", len(hints), hints) - } - - beError.ProduceErrors = true - errFound := false - for _, err := range checkPacks(chkr) { - t.Logf("pack error: %v", err) - } - - for _, err := range checkStruct(chkr) { - t.Logf("struct error: %v", err) - } - - for _, err := range checkData(chkr) { - t.Logf("data error: %v", err) - errFound = true - } - - if !errFound { - t.Fatal("no error found, checker is broken") + errBe := &errorBackend{Backend: repo.Backend()} + + for _, test := range []struct { + name string + be backend.Backend + damage func() + check func(t *testing.T, err error) + }{ + { + "errorBackend", + errBe, + func() { + errBe.ProduceErrors = true + }, + func(t *testing.T, err error) { + if err == nil { + t.Fatal("no error found, checker is broken") + } + }, + }, + { + "errorOnceBackend", + &errorOnceBackend{Backend: repo.Backend()}, + func() {}, + func(t *testing.T, err error) { + if !strings.Contains(err.Error(), "check successful on second attempt, original error pack") { + t.Fatalf("wrong error found, got %v", err) + } + }, + }, + } { + t.Run(test.name, func(t *testing.T) { + checkRepo := repository.TestOpenBackend(t, test.be) + + chkr := checker.New(checkRepo, false) + + hints, errs := chkr.LoadIndex(context.TODO(), nil) + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } + + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } + + test.damage() + var err error + for _, err := range checkPacks(chkr) { + t.Logf("pack error: %v", err) + } + + for _, err := range checkStruct(chkr) { + t.Logf("struct error: %v", err) + } + + for _, cerr := range checkData(chkr) { + t.Logf("data error: %v", cerr) + if err == nil { + err = cerr + } + } + + test.check(t, err) + }) } } From 8f8d872a68eabf2a3f476948a4be88bb005d7c68 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 9 May 2024 23:26:21 +0200 Subject: [PATCH 222/893] fix compatibility with go 1.19 --- internal/checker/checker_test.go | 2 +- internal/repository/repository_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index 5fc82eed094..ee18f893a46 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -333,7 +333,7 @@ type errorOnceBackend struct { } func (b *errorOnceBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, consumer func(rd io.Reader) error) error { - _, isRetry := b.m.Swap(h, struct{}{}) + _, isRetry := b.m.LoadOrStore(h, struct{}{}) return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error { if !isRetry && h.Type != restic.ConfigFile { return consumer(errorReadCloser{rd}) diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 6f67e2c5e48..67622fdf812 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -296,8 +296,8 @@ func (be *damageOnceBackend) Load(ctx context.Context, h backend.Handle, length } h.IsMetadata = false - _, retry := be.m.Swap(h, true) - if !retry { + _, isRetry := be.m.LoadOrStore(h, true) + if !isRetry { // return broken data on the first try offset++ } From 74d90653e0788d9e6e73a9427fa4fe6decaad843 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 11 May 2024 00:18:11 +0200 Subject: [PATCH 223/893] check: use ReadFull to load pack header in checkPack This ensures that the pack header is actually read completely. Previously, for a truncated file it was possible to only read a part of the header, as backend.Load(...) is not guaranteed to return as many bytes as requested by the length parameter. --- internal/checker/checker.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 1cee4355cd0..d6474f86e83 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -605,11 +605,13 @@ func checkPackInner(ctx context.Context, r restic.Repository, id restic.ID, blob if err != nil { return &partialReadError{err} } + curPos += minHdrStart - curPos } // read remainder, which should be the pack header var err error - hdrBuf, err = io.ReadAll(bufRd) + hdrBuf = make([]byte, int(size-int64(curPos))) + _, err = io.ReadFull(bufRd, hdrBuf) if err != nil { return &partialReadError{err} } From 0c1ba6d95dafe290589162ff61fe2ac0d1c8cfc8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 00:43:50 +0200 Subject: [PATCH 224/893] backend: remove unused Location method --- internal/backend/azure/azure.go | 5 ----- internal/backend/b2/b2.go | 5 ----- internal/backend/backend.go | 4 ---- internal/backend/dryrun/dry_backend.go | 5 ----- internal/backend/dryrun/dry_backend_test.go | 6 ------ internal/backend/gs/gs.go | 5 ----- internal/backend/local/local.go | 5 ----- internal/backend/mem/mem_backend.go | 5 ----- internal/backend/mock/backend.go | 10 ---------- internal/backend/rest/rest.go | 5 ----- internal/backend/s3/s3.go | 5 ----- internal/backend/sftp/sftp.go | 5 ----- internal/backend/swift/swift.go | 5 ----- internal/backend/test/tests.go | 11 ----------- 14 files changed, 81 deletions(-) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index e9368c268e8..9f879f98990 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -190,11 +190,6 @@ func (be *Backend) Connections() uint { return be.connections } -// Location returns this backend's location (the container name). -func (be *Backend) Location() string { - return be.Join(be.cfg.AccountName, be.cfg.Prefix) -} - // Hasher may return a hash function for calculating a content hash for the backend func (be *Backend) Hasher() hash.Hash { return md5.New() diff --git a/internal/backend/b2/b2.go b/internal/backend/b2/b2.go index e3a52813d50..1a5e72aaa71 100644 --- a/internal/backend/b2/b2.go +++ b/internal/backend/b2/b2.go @@ -162,11 +162,6 @@ func (be *b2Backend) Connections() uint { return be.cfg.Connections } -// Location returns the location for the backend. -func (be *b2Backend) Location() string { - return be.cfg.Bucket -} - // Hasher may return a hash function for calculating a content hash for the backend func (be *b2Backend) Hasher() hash.Hash { return nil diff --git a/internal/backend/backend.go b/internal/backend/backend.go index 102322c4f1d..3b0599c3076 100644 --- a/internal/backend/backend.go +++ b/internal/backend/backend.go @@ -14,10 +14,6 @@ import ( // the context package need not be wrapped, as context cancellation is checked // separately by the retrying logic. type Backend interface { - // Location returns a string that describes the type and location of the - // repository. - Location() string - // Connections returns the maximum number of concurrent backend operations. Connections() uint diff --git a/internal/backend/dryrun/dry_backend.go b/internal/backend/dryrun/dry_backend.go index c17b240fa1b..8af0ce9adc4 100644 --- a/internal/backend/dryrun/dry_backend.go +++ b/internal/backend/dryrun/dry_backend.go @@ -46,11 +46,6 @@ func (be *Backend) Connections() uint { return be.b.Connections() } -// Location returns the location of the backend. -func (be *Backend) Location() string { - return "DRY:" + be.b.Location() -} - // Delete removes all data in the backend. func (be *Backend) Delete(_ context.Context) error { return nil diff --git a/internal/backend/dryrun/dry_backend_test.go b/internal/backend/dryrun/dry_backend_test.go index 793e544dbb8..be98f531099 100644 --- a/internal/backend/dryrun/dry_backend_test.go +++ b/internal/backend/dryrun/dry_backend_test.go @@ -36,7 +36,6 @@ func TestDry(t *testing.T) { content string wantErr string }{ - {d, "loc", "", "DRY:RAM", ""}, {d, "delete", "", "", ""}, {d, "stat", "a", "", "not found"}, {d, "list", "", "", ""}, @@ -76,11 +75,6 @@ func TestDry(t *testing.T) { if files != step.content { t.Errorf("%d. List = %q, want %q", i, files, step.content) } - case "loc": - loc := step.be.Location() - if loc != step.content { - t.Errorf("%d. Location = %q, want %q", i, loc, step.content) - } case "delete": err = step.be.Delete(ctx) case "remove": diff --git a/internal/backend/gs/gs.go b/internal/backend/gs/gs.go index 20da5245ac6..305e9b9c108 100644 --- a/internal/backend/gs/gs.go +++ b/internal/backend/gs/gs.go @@ -197,11 +197,6 @@ func (be *Backend) Connections() uint { return be.connections } -// Location returns this backend's location (the bucket name). -func (be *Backend) Location() string { - return be.Join(be.bucketName, be.prefix) -} - // Hasher may return a hash function for calculating a content hash for the backend func (be *Backend) Hasher() hash.Hash { return md5.New() diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go index 599bee0f69d..f041d608a18 100644 --- a/internal/backend/local/local.go +++ b/internal/backend/local/local.go @@ -93,11 +93,6 @@ func (b *Local) Connections() uint { return b.Config.Connections } -// Location returns this backend's location (the directory name). -func (b *Local) Location() string { - return b.Path -} - // Hasher may return a hash function for calculating a content hash for the backend func (b *Local) Hasher() hash.Hash { return nil diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go index 532380f213a..981c0a18250 100644 --- a/internal/backend/mem/mem_backend.go +++ b/internal/backend/mem/mem_backend.go @@ -222,11 +222,6 @@ func (be *MemoryBackend) Connections() uint { return connectionCount } -// Location returns the location of the backend (RAM). -func (be *MemoryBackend) Location() string { - return "RAM" -} - // Hasher may return a hash function for calculating a content hash for the backend func (be *MemoryBackend) Hasher() hash.Hash { return xxhash.New() diff --git a/internal/backend/mock/backend.go b/internal/backend/mock/backend.go index bd8c6d43b92..a0319844333 100644 --- a/internal/backend/mock/backend.go +++ b/internal/backend/mock/backend.go @@ -21,7 +21,6 @@ type Backend struct { RemoveFn func(ctx context.Context, h backend.Handle) error DeleteFn func(ctx context.Context) error ConnectionsFn func() uint - LocationFn func() string HasherFn func() hash.Hash HasAtomicReplaceFn func() bool } @@ -49,15 +48,6 @@ func (m *Backend) Connections() uint { return m.ConnectionsFn() } -// Location returns a location string. -func (m *Backend) Location() string { - if m.LocationFn == nil { - return "" - } - - return m.LocationFn() -} - // Hasher may return a hash function for calculating a content hash for the backend func (m *Backend) Hasher() hash.Hash { if m.HasherFn == nil { diff --git a/internal/backend/rest/rest.go b/internal/backend/rest/rest.go index f743c3e5084..1af88ec3f8f 100644 --- a/internal/backend/rest/rest.go +++ b/internal/backend/rest/rest.go @@ -121,11 +121,6 @@ func (b *Backend) Connections() uint { return b.connections } -// Location returns this backend's location (the server's URL). -func (b *Backend) Location() string { - return b.url.String() -} - // Hasher may return a hash function for calculating a content hash for the backend func (b *Backend) Hasher() hash.Hash { return nil diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index a2c95ac328f..bddb5774139 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -321,11 +321,6 @@ func (be *Backend) Connections() uint { return be.cfg.Connections } -// Location returns this backend's location (the bucket name). -func (be *Backend) Location() string { - return be.Join(be.cfg.Bucket, be.cfg.Prefix) -} - // Hasher may return a hash function for calculating a content hash for the backend func (be *Backend) Hasher() hash.Hash { return nil diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index 3591c153068..70fc30a62c9 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -292,11 +292,6 @@ func (r *SFTP) Connections() uint { return r.Config.Connections } -// Location returns this backend's location (the directory name). -func (r *SFTP) Location() string { - return r.p -} - // Hasher may return a hash function for calculating a content hash for the backend func (r *SFTP) Hasher() hash.Hash { return nil diff --git a/internal/backend/swift/swift.go b/internal/backend/swift/swift.go index 1643af7fcc5..e6412d0bff0 100644 --- a/internal/backend/swift/swift.go +++ b/internal/backend/swift/swift.go @@ -118,11 +118,6 @@ func (be *beSwift) Connections() uint { return be.connections } -// Location returns this backend's location (the container name). -func (be *beSwift) Location() string { - return be.container -} - // Hasher may return a hash function for calculating a content hash for the backend func (be *beSwift) Hasher() hash.Hash { return md5.New() diff --git a/internal/backend/test/tests.go b/internal/backend/test/tests.go index 4c260d264d5..e2879a3bdeb 100644 --- a/internal/backend/test/tests.go +++ b/internal/backend/test/tests.go @@ -88,17 +88,6 @@ func (s *Suite[C]) TestCreateWithConfig(t *testing.T) { } } -// TestLocation tests that a location string is returned. -func (s *Suite[C]) TestLocation(t *testing.T) { - b := s.open(t) - defer s.close(t, b) - - l := b.Location() - if l == "" { - t.Fatalf("invalid location string %q", l) - } -} - // TestConfig saves and loads a config from the backend. func (s *Suite[C]) TestConfig(t *testing.T) { b := s.open(t) From 979519818969fb8093b0df1bcf69a12e80120177 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 01:13:28 +0200 Subject: [PATCH 225/893] debug: remove Backend.Stat() usage --- cmd/restic/cmd_debug.go | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index a63ac8c4c91..9fb6969d03b 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -20,7 +20,6 @@ import ( "github.com/spf13/cobra" "golang.org/x/sync/errgroup" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/index" @@ -475,21 +474,12 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamine func examinePack(ctx context.Context, opts DebugExamineOptions, repo restic.Repository, id restic.ID) error { Printf("examine %v\n", id) - h := backend.Handle{ - Type: restic.PackFile, - Name: id.String(), - } - fi, err := repo.Backend().Stat(ctx, h) - if err != nil { - return err - } - Printf(" file size is %v\n", fi.Size) - buf, err := repo.LoadRaw(ctx, restic.PackFile, id) // also process damaged pack files if buf == nil { return err } + Printf(" file size is %v\n", len(buf)) gotID := restic.Hash(buf) if !id.Equal(gotID) { Printf(" wanted hash %v, got %v\n", id, gotID) @@ -508,7 +498,7 @@ func examinePack(ctx context.Context, opts DebugExamineOptions, repo restic.Repo continue } - checkPackSize(blobs, fi.Size) + checkPackSize(blobs, len(buf)) err = loadBlobs(ctx, opts, repo, id, blobs) if err != nil { @@ -521,11 +511,11 @@ func examinePack(ctx context.Context, opts DebugExamineOptions, repo restic.Repo Printf(" ========================================\n") Printf(" inspect the pack itself\n") - blobs, _, err := repo.ListPack(ctx, id, fi.Size) + blobs, _, err := repo.ListPack(ctx, id, int64(len(buf))) if err != nil { return fmt.Errorf("pack %v: %v", id.Str(), err) } - checkPackSize(blobs, fi.Size) + checkPackSize(blobs, len(buf)) if !blobsLoaded { return loadBlobs(ctx, opts, repo, id, blobs) @@ -533,7 +523,7 @@ func examinePack(ctx context.Context, opts DebugExamineOptions, repo restic.Repo return nil } -func checkPackSize(blobs []restic.Blob, fileSize int64) { +func checkPackSize(blobs []restic.Blob, fileSize int) { // track current size and offset var size, offset uint64 From 8274f5b101d01ade31e108b4ab32333a955d7433 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 01:14:02 +0200 Subject: [PATCH 226/893] prune: remove Backend.IsNotExist() Only handling one specific error is not particularly useful. --- cmd/restic/cmd_prune.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index cbec100df05..d62a7df7569 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -285,10 +285,6 @@ func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots r err = restic.FindUsedBlobs(ctx, repo, snapshotTrees, usedBlobs, bar) if err != nil { - if repo.Backend().IsNotExist(err) { - return nil, errors.Fatal("unable to load a tree from the repository: " + err.Error()) - } - return nil, err } return usedBlobs, nil From ab9077bc13278481d59392d78d6cdf53aba6d40b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 01:16:23 +0200 Subject: [PATCH 227/893] replace usages of backend.Remove() with repository.RemoveUnpacked() RemoveUnpacked will eventually block removal of all filetypes other than snapshots. However, getting there requires a major refactor to provide some components with privileged access. --- cmd/restic/cmd_rewrite.go | 7 ++----- cmd/restic/cmd_tag.go | 4 +--- cmd/restic/integration_helpers_test.go | 4 ++-- internal/checker/checker_test.go | 23 +++++++---------------- internal/repository/repack_test.go | 2 +- internal/repository/repository.go | 5 +++++ internal/restic/lock.go | 15 +++++++-------- internal/restic/lock_test.go | 5 ++--- internal/restic/parallel.go | 4 +--- internal/restic/repository.go | 2 ++ 10 files changed, 30 insertions(+), 41 deletions(-) diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 38a868c5c97..83ace7a11a5 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -8,7 +8,6 @@ import ( "github.com/spf13/cobra" "golang.org/x/sync/errgroup" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" @@ -181,8 +180,7 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r if dryRun { Verbosef("would delete empty snapshot\n") } else { - h := backend.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()} - if err = repo.Backend().Remove(ctx, h); err != nil { + if err = repo.RemoveUnpacked(ctx, restic.SnapshotFile, *sn.ID()); err != nil { return false, err } debug.Log("removed empty snapshot %v", sn.ID()) @@ -241,8 +239,7 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r Verbosef("saved new snapshot %v\n", id.Str()) if forget { - h := backend.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()} - if err = repo.Backend().Remove(ctx, h); err != nil { + if err = repo.RemoveUnpacked(ctx, restic.SnapshotFile, *sn.ID()); err != nil { return false, err } debug.Log("removed old snapshot %v", sn.ID()) diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index 3bf386f2cd3..033dc5ebea0 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -5,7 +5,6 @@ import ( "github.com/spf13/cobra" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" @@ -86,8 +85,7 @@ func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Sna debug.Log("new snapshot saved as %v", id) // Remove the old snapshot. - h := backend.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()} - if err = repo.Backend().Remove(ctx, h); err != nil { + if err = repo.RemoveUnpacked(ctx, restic.SnapshotFile, *sn.ID()); err != nil { return false, err } diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index e7a90dd560a..2812eda6d7e 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -267,7 +267,7 @@ func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) { defer unlock() for id := range remove { - rtest.OK(t, r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()})) + rtest.OK(t, r.RemoveUnpacked(ctx, restic.PackFile, id)) } } @@ -291,7 +291,7 @@ func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, rem if treePacks.Has(id) != removeTreePacks || keep.Has(id) { return nil } - return r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}) + return r.RemoveUnpacked(ctx, restic.PackFile, id) })) } diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index ee18f893a46..38a166000cf 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -94,11 +94,8 @@ func TestMissingPack(t *testing.T) { repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - packHandle := backend.Handle{ - Type: restic.PackFile, - Name: "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6", - } - test.OK(t, repo.Backend().Remove(context.TODO(), packHandle)) + packID := restic.TestParseID("657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6") + test.OK(t, repo.RemoveUnpacked(context.TODO(), restic.PackFile, packID)) chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) @@ -113,7 +110,7 @@ func TestMissingPack(t *testing.T) { "expected exactly one error, got %v", len(errs)) if err, ok := errs[0].(*checker.PackError); ok { - test.Equals(t, packHandle.Name, err.ID.String()) + test.Equals(t, packID, err.ID) } else { t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) } @@ -125,11 +122,8 @@ func TestUnreferencedPack(t *testing.T) { // index 3f1a only references pack 60e0 packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" - indexHandle := backend.Handle{ - Type: restic.IndexFile, - Name: "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44", - } - test.OK(t, repo.Backend().Remove(context.TODO(), indexHandle)) + indexID := restic.TestParseID("3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44") + test.OK(t, repo.RemoveUnpacked(context.TODO(), restic.IndexFile, indexID)) chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) @@ -154,11 +148,8 @@ func TestUnreferencedBlobs(t *testing.T) { repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - snapshotHandle := backend.Handle{ - Type: restic.SnapshotFile, - Name: "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02", - } - test.OK(t, repo.Backend().Remove(context.TODO(), snapshotHandle)) + snapshotID := restic.TestParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02") + test.OK(t, repo.RemoveUnpacked(context.TODO(), restic.SnapshotFile, snapshotID)) unusedBlobsBySnapshot := restic.BlobHandles{ restic.TestParseHandle("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849", restic.DataBlob), diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index 2f786710135..949f607dfb9 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -167,7 +167,7 @@ func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs rest } for id := range repackedBlobs { - err = repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}) + err = repo.RemoveUnpacked(context.TODO(), restic.PackFile, id) if err != nil { t.Fatal(err) } diff --git a/internal/repository/repository.go b/internal/repository/repository.go index bbdaa16a7a9..4c06d8134f8 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -520,6 +520,11 @@ func (r *Repository) verifyUnpacked(buf []byte, t restic.FileType, expected []by return nil } +func (r *Repository) RemoveUnpacked(ctx context.Context, t restic.FileType, id restic.ID) error { + // TODO prevent everything except removing snapshots for non-repository code + return r.be.Remove(ctx, backend.Handle{Type: t, Name: id.String()}) +} + // Flush saves all remaining packs and the index func (r *Repository) Flush(ctx context.Context) error { if err := r.flushPacks(ctx); err != nil { diff --git a/internal/restic/lock.go b/internal/restic/lock.go index 182a3442d9b..127ac643ffe 100644 --- a/internal/restic/lock.go +++ b/internal/restic/lock.go @@ -12,7 +12,6 @@ import ( "testing" "time" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/debug" @@ -226,7 +225,7 @@ func (l *Lock) Unlock() error { return nil } - return l.repo.Backend().Remove(context.TODO(), backend.Handle{Type: LockFile, Name: l.lockID.String()}) + return l.repo.RemoveUnpacked(context.TODO(), LockFile, *l.lockID) } var StaleLockTimeout = 30 * time.Minute @@ -286,7 +285,7 @@ func (l *Lock) Refresh(ctx context.Context) error { oldLockID := l.lockID l.lockID = &id - return l.repo.Backend().Remove(context.TODO(), backend.Handle{Type: LockFile, Name: oldLockID.String()}) + return l.repo.RemoveUnpacked(context.TODO(), LockFile, *oldLockID) } // RefreshStaleLock is an extended variant of Refresh that can also refresh stale lock files. @@ -315,13 +314,13 @@ func (l *Lock) RefreshStaleLock(ctx context.Context) error { exists, err = l.checkExistence(ctx) if err != nil { // cleanup replacement lock - _ = l.repo.Backend().Remove(context.TODO(), backend.Handle{Type: LockFile, Name: id.String()}) + _ = l.repo.RemoveUnpacked(context.TODO(), LockFile, id) return err } if !exists { // cleanup replacement lock - _ = l.repo.Backend().Remove(context.TODO(), backend.Handle{Type: LockFile, Name: id.String()}) + _ = l.repo.RemoveUnpacked(context.TODO(), LockFile, id) return ErrRemovedLock } @@ -332,7 +331,7 @@ func (l *Lock) RefreshStaleLock(ctx context.Context) error { oldLockID := l.lockID l.lockID = &id - return l.repo.Backend().Remove(context.TODO(), backend.Handle{Type: LockFile, Name: oldLockID.String()}) + return l.repo.RemoveUnpacked(context.TODO(), LockFile, *oldLockID) } func (l *Lock) checkExistence(ctx context.Context) (bool, error) { @@ -400,7 +399,7 @@ func RemoveStaleLocks(ctx context.Context, repo Repository) (uint, error) { } if lock.Stale() { - err = repo.Backend().Remove(ctx, backend.Handle{Type: LockFile, Name: id.String()}) + err = repo.RemoveUnpacked(ctx, LockFile, id) if err == nil { processed++ } @@ -416,7 +415,7 @@ func RemoveStaleLocks(ctx context.Context, repo Repository) (uint, error) { func RemoveAllLocks(ctx context.Context, repo Repository) (uint, error) { var processed uint32 err := ParallelList(ctx, repo, LockFile, repo.Connections(), func(ctx context.Context, id ID, _ int64) error { - err := repo.Backend().Remove(ctx, backend.Handle{Type: LockFile, Name: id.String()}) + err := repo.RemoveUnpacked(ctx, LockFile, id) if err == nil { atomic.AddUint32(&processed, 1) } diff --git a/internal/restic/lock_test.go b/internal/restic/lock_test.go index 0d282aaf7f1..ae10f403498 100644 --- a/internal/restic/lock_test.go +++ b/internal/restic/lock_test.go @@ -131,8 +131,7 @@ func createFakeLock(repo restic.SaverUnpacked, t time.Time, pid int) (restic.ID, } func removeLock(repo restic.Repository, id restic.ID) error { - h := backend.Handle{Type: restic.LockFile, Name: id.String()} - return repo.Backend().Remove(context.TODO(), h) + return repo.RemoveUnpacked(context.TODO(), restic.LockFile, id) } var staleLockTests = []struct { @@ -318,7 +317,7 @@ func TestLockRefreshStaleMissing(t *testing.T) { lockID := checkSingleLock(t, repo) // refresh must fail if lock was removed - rtest.OK(t, repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.LockFile, Name: lockID.String()})) + rtest.OK(t, repo.RemoveUnpacked(context.TODO(), restic.LockFile, lockID)) time.Sleep(time.Millisecond) err = lock.RefreshStaleLock(context.TODO()) rtest.Assert(t, err == restic.ErrRemovedLock, "unexpected error, expected %v, got %v", restic.ErrRemovedLock, err) diff --git a/internal/restic/parallel.go b/internal/restic/parallel.go index cefbf03589c..11460bbbde3 100644 --- a/internal/restic/parallel.go +++ b/internal/restic/parallel.go @@ -3,7 +3,6 @@ package restic import ( "context" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/ui/progress" "golang.org/x/sync/errgroup" @@ -77,8 +76,7 @@ func ParallelRemove(ctx context.Context, repo Repository, fileList IDSet, fileTy for i := 0; i < int(workerCount); i++ { wg.Go(func() error { for id := range fileChan { - h := backend.Handle{Type: fileType, Name: id.String()} - err := repo.Backend().Remove(ctx, h) + err := repo.RemoveUnpacked(ctx, fileType, id) if report != nil { err = report(id, err) } diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 5393e07013c..89a6c3ca0d6 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -57,6 +57,8 @@ type Repository interface { // LoadUnpacked loads and decrypts the file with the given type and ID. LoadUnpacked(ctx context.Context, t FileType, id ID) (data []byte, err error) SaveUnpacked(context.Context, FileType, []byte) (ID, error) + // RemoveUnpacked removes a file from the repository. This will eventually be restricted to deleting only snapshots. + RemoveUnpacked(ctx context.Context, t FileType, id ID) error // LoadRaw reads all data stored in the backend for the file with id and filetype t. // If the backend returns data that does not match the id, then the buffer is returned From 34d90aecf922cdc080be97997443e4ed08ca48ab Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 15:50:52 +0200 Subject: [PATCH 228/893] migrations: move logic of upgrade_repo_v2 to repository package The migration modifies repository internals and thus should live within the repository package. --- internal/migrations/upgrade_repo_v2.go | 89 +---------------- internal/migrations/upgrade_repo_v2_test.go | 76 --------------- internal/repository/upgrade_repo.go | 103 ++++++++++++++++++++ internal/repository/upgrade_repo_test.go | 82 ++++++++++++++++ 4 files changed, 187 insertions(+), 163 deletions(-) create mode 100644 internal/repository/upgrade_repo.go create mode 100644 internal/repository/upgrade_repo_test.go diff --git a/internal/migrations/upgrade_repo_v2.go b/internal/migrations/upgrade_repo_v2.go index 6f4225947d6..23a7f1ff0aa 100644 --- a/internal/migrations/upgrade_repo_v2.go +++ b/internal/migrations/upgrade_repo_v2.go @@ -3,10 +3,8 @@ package migrations import ( "context" "fmt" - "os" - "path/filepath" - "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) @@ -14,26 +12,6 @@ func init() { register(&UpgradeRepoV2{}) } -type UpgradeRepoV2Error struct { - UploadNewConfigError error - ReuploadOldConfigError error - - BackupFilePath string -} - -func (err *UpgradeRepoV2Error) Error() string { - if err.ReuploadOldConfigError != nil { - return fmt.Sprintf("error uploading config (%v), re-uploading old config filed failed as well (%v), but there is a backup of the config file in %v", err.UploadNewConfigError, err.ReuploadOldConfigError, err.BackupFilePath) - } - - return fmt.Sprintf("error uploading config (%v), re-uploaded old config was successful, there is a backup of the config file in %v", err.UploadNewConfigError, err.BackupFilePath) -} - -func (err *UpgradeRepoV2Error) Unwrap() error { - // consider the original upload error as the primary cause - return err.UploadNewConfigError -} - type UpgradeRepoV2 struct{} func (*UpgradeRepoV2) Name() string { @@ -56,70 +34,7 @@ func (*UpgradeRepoV2) Check(_ context.Context, repo restic.Repository) (bool, st func (*UpgradeRepoV2) RepoCheck() bool { return true } -func (*UpgradeRepoV2) upgrade(ctx context.Context, repo restic.Repository) error { - h := backend.Handle{Type: backend.ConfigFile} - - if !repo.Backend().HasAtomicReplace() { - // remove the original file for backends which do not support atomic overwriting - err := repo.Backend().Remove(ctx, h) - if err != nil { - return fmt.Errorf("remove config failed: %w", err) - } - } - - // upgrade config - cfg := repo.Config() - cfg.Version = 2 - - err := restic.SaveConfig(ctx, repo, cfg) - if err != nil { - return fmt.Errorf("save new config file failed: %w", err) - } - - return nil -} func (m *UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error { - tempdir, err := os.MkdirTemp("", "restic-migrate-upgrade-repo-v2-") - if err != nil { - return fmt.Errorf("create temp dir failed: %w", err) - } - - h := backend.Handle{Type: restic.ConfigFile} - - // read raw config file and save it to a temp dir, just in case - rawConfigFile, err := repo.LoadRaw(ctx, restic.ConfigFile, restic.ID{}) - if err != nil { - return fmt.Errorf("load config file failed: %w", err) - } - - backupFileName := filepath.Join(tempdir, "config") - err = os.WriteFile(backupFileName, rawConfigFile, 0600) - if err != nil { - return fmt.Errorf("write config file backup to %v failed: %w", tempdir, err) - } - - // run the upgrade - err = m.upgrade(ctx, repo) - if err != nil { - - // build an error we can return to the caller - repoError := &UpgradeRepoV2Error{ - UploadNewConfigError: err, - BackupFilePath: backupFileName, - } - - // try contingency methods, reupload the original file - _ = repo.Backend().Remove(ctx, h) - err = repo.Backend().Save(ctx, h, backend.NewByteReader(rawConfigFile, nil)) - if err != nil { - repoError.ReuploadOldConfigError = err - } - - return repoError - } - - _ = os.Remove(backupFileName) - _ = os.Remove(tempdir) - return nil + return repository.UpgradeRepo(ctx, repo.(*repository.Repository)) } diff --git a/internal/migrations/upgrade_repo_v2_test.go b/internal/migrations/upgrade_repo_v2_test.go index 845d20e92ba..59f2394e0a7 100644 --- a/internal/migrations/upgrade_repo_v2_test.go +++ b/internal/migrations/upgrade_repo_v2_test.go @@ -2,15 +2,9 @@ package migrations import ( "context" - "os" - "path/filepath" - "sync" "testing" - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/test" ) func TestUpgradeRepoV2(t *testing.T) { @@ -35,73 +29,3 @@ func TestUpgradeRepoV2(t *testing.T) { t.Fatal(err) } } - -type failBackend struct { - backend.Backend - - mu sync.Mutex - ConfigFileSavesUntilError uint -} - -func (be *failBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { - if h.Type != backend.ConfigFile { - return be.Backend.Save(ctx, h, rd) - } - - be.mu.Lock() - if be.ConfigFileSavesUntilError == 0 { - be.mu.Unlock() - return errors.New("failure induced for testing") - } - - be.ConfigFileSavesUntilError-- - be.mu.Unlock() - - return be.Backend.Save(ctx, h, rd) -} - -func TestUpgradeRepoV2Failure(t *testing.T) { - be := repository.TestBackend(t) - - // wrap backend so that it fails upgrading the config after the initial write - be = &failBackend{ - ConfigFileSavesUntilError: 1, - Backend: be, - } - - repo := repository.TestRepositoryWithBackend(t, be, 1, repository.Options{}) - if repo.Config().Version != 1 { - t.Fatal("test repo has wrong version") - } - - m := &UpgradeRepoV2{} - - ok, _, err := m.Check(context.Background(), repo) - if err != nil { - t.Fatal(err) - } - - if !ok { - t.Fatal("migration check returned false") - } - - err = m.Apply(context.Background(), repo) - if err == nil { - t.Fatal("expected error returned from Apply(), got nil") - } - - upgradeErr := err.(*UpgradeRepoV2Error) - if upgradeErr.UploadNewConfigError == nil { - t.Fatal("expected upload error, got nil") - } - - if upgradeErr.ReuploadOldConfigError == nil { - t.Fatal("expected reupload error, got nil") - } - - if upgradeErr.BackupFilePath == "" { - t.Fatal("no backup file path found") - } - test.OK(t, os.Remove(upgradeErr.BackupFilePath)) - test.OK(t, os.Remove(filepath.Dir(upgradeErr.BackupFilePath))) -} diff --git a/internal/repository/upgrade_repo.go b/internal/repository/upgrade_repo.go new file mode 100644 index 00000000000..3e86cc37707 --- /dev/null +++ b/internal/repository/upgrade_repo.go @@ -0,0 +1,103 @@ +package repository + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/restic" +) + +type upgradeRepoV2Error struct { + UploadNewConfigError error + ReuploadOldConfigError error + + BackupFilePath string +} + +func (err *upgradeRepoV2Error) Error() string { + if err.ReuploadOldConfigError != nil { + return fmt.Sprintf("error uploading config (%v), re-uploading old config filed failed as well (%v), but there is a backup of the config file in %v", err.UploadNewConfigError, err.ReuploadOldConfigError, err.BackupFilePath) + } + + return fmt.Sprintf("error uploading config (%v), re-uploaded old config was successful, there is a backup of the config file in %v", err.UploadNewConfigError, err.BackupFilePath) +} + +func (err *upgradeRepoV2Error) Unwrap() error { + // consider the original upload error as the primary cause + return err.UploadNewConfigError +} + +func upgradeRepository(ctx context.Context, repo *Repository) error { + h := backend.Handle{Type: backend.ConfigFile} + + if !repo.be.HasAtomicReplace() { + // remove the original file for backends which do not support atomic overwriting + err := repo.be.Remove(ctx, h) + if err != nil { + return fmt.Errorf("remove config failed: %w", err) + } + } + + // upgrade config + cfg := repo.Config() + cfg.Version = 2 + + err := restic.SaveConfig(ctx, repo, cfg) + if err != nil { + return fmt.Errorf("save new config file failed: %w", err) + } + + return nil +} + +func UpgradeRepo(ctx context.Context, repo *Repository) error { + if repo.Config().Version != 1 { + return fmt.Errorf("repository has version %v, only upgrades from version 1 are supported", repo.Config().Version) + } + + tempdir, err := os.MkdirTemp("", "restic-migrate-upgrade-repo-v2-") + if err != nil { + return fmt.Errorf("create temp dir failed: %w", err) + } + + h := backend.Handle{Type: restic.ConfigFile} + + // read raw config file and save it to a temp dir, just in case + rawConfigFile, err := repo.LoadRaw(ctx, restic.ConfigFile, restic.ID{}) + if err != nil { + return fmt.Errorf("load config file failed: %w", err) + } + + backupFileName := filepath.Join(tempdir, "config") + err = os.WriteFile(backupFileName, rawConfigFile, 0600) + if err != nil { + return fmt.Errorf("write config file backup to %v failed: %w", tempdir, err) + } + + // run the upgrade + err = upgradeRepository(ctx, repo) + if err != nil { + + // build an error we can return to the caller + repoError := &upgradeRepoV2Error{ + UploadNewConfigError: err, + BackupFilePath: backupFileName, + } + + // try contingency methods, reupload the original file + _ = repo.Backend().Remove(ctx, h) + err = repo.Backend().Save(ctx, h, backend.NewByteReader(rawConfigFile, nil)) + if err != nil { + repoError.ReuploadOldConfigError = err + } + + return repoError + } + + _ = os.Remove(backupFileName) + _ = os.Remove(tempdir) + return nil +} diff --git a/internal/repository/upgrade_repo_test.go b/internal/repository/upgrade_repo_test.go new file mode 100644 index 00000000000..47c5f856ce8 --- /dev/null +++ b/internal/repository/upgrade_repo_test.go @@ -0,0 +1,82 @@ +package repository + +import ( + "context" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/errors" + rtest "github.com/restic/restic/internal/test" +) + +func TestUpgradeRepoV2(t *testing.T) { + repo := TestRepositoryWithVersion(t, 1) + if repo.Config().Version != 1 { + t.Fatal("test repo has wrong version") + } + + err := UpgradeRepo(context.Background(), repo.(*Repository)) + rtest.OK(t, err) +} + +type failBackend struct { + backend.Backend + + mu sync.Mutex + ConfigFileSavesUntilError uint +} + +func (be *failBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { + if h.Type != backend.ConfigFile { + return be.Backend.Save(ctx, h, rd) + } + + be.mu.Lock() + if be.ConfigFileSavesUntilError == 0 { + be.mu.Unlock() + return errors.New("failure induced for testing") + } + + be.ConfigFileSavesUntilError-- + be.mu.Unlock() + + return be.Backend.Save(ctx, h, rd) +} + +func TestUpgradeRepoV2Failure(t *testing.T) { + be := TestBackend(t) + + // wrap backend so that it fails upgrading the config after the initial write + be = &failBackend{ + ConfigFileSavesUntilError: 1, + Backend: be, + } + + repo := TestRepositoryWithBackend(t, be, 1, Options{}) + if repo.Config().Version != 1 { + t.Fatal("test repo has wrong version") + } + + err := UpgradeRepo(context.Background(), repo.(*Repository)) + if err == nil { + t.Fatal("expected error returned from Apply(), got nil") + } + + upgradeErr := err.(*upgradeRepoV2Error) + if upgradeErr.UploadNewConfigError == nil { + t.Fatal("expected upload error, got nil") + } + + if upgradeErr.ReuploadOldConfigError == nil { + t.Fatal("expected reupload error, got nil") + } + + if upgradeErr.BackupFilePath == "" { + t.Fatal("no backup file path found") + } + rtest.OK(t, os.Remove(upgradeErr.BackupFilePath)) + rtest.OK(t, os.Remove(filepath.Dir(upgradeErr.BackupFilePath))) +} From a1ca5e15c4056d43e1e8ef4ddecf427dcee63115 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 15:55:45 +0200 Subject: [PATCH 229/893] migrations: add temporary hack for s3_layout The migration will be removed after the next restic release anyways. Thus, there's no need for a clean implementation. --- internal/checker/checker.go | 6 ++++-- internal/migrations/s3_layout.go | 5 +++-- internal/repository/s3_backend.go | 12 ++++++++++++ 3 files changed, 19 insertions(+), 4 deletions(-) create mode 100644 internal/repository/s3_backend.go diff --git a/internal/checker/checker.go b/internal/checker/checker.go index d6474f86e83..f19439622b4 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -256,8 +256,10 @@ func isS3Legacy(b backend.Backend) bool { func (c *Checker) Packs(ctx context.Context, errChan chan<- error) { defer close(errChan) - if isS3Legacy(c.repo.Backend()) { - errChan <- ErrLegacyLayout + if r, ok := c.repo.(*repository.Repository); ok { + if isS3Legacy(repository.AsS3Backend(r)) { + errChan <- ErrLegacyLayout + } } debug.Log("checking for %d packs", len(c.packs)) diff --git a/internal/migrations/s3_layout.go b/internal/migrations/s3_layout.go index 6b40013eec5..8b994b8fc17 100644 --- a/internal/migrations/s3_layout.go +++ b/internal/migrations/s3_layout.go @@ -11,6 +11,7 @@ import ( "github.com/restic/restic/internal/backend/s3" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) @@ -24,7 +25,7 @@ type S3Layout struct{} // Check tests whether the migration can be applied. func (m *S3Layout) Check(_ context.Context, repo restic.Repository) (bool, string, error) { - be := backend.AsBackend[*s3.Backend](repo.Backend()) + be := repository.AsS3Backend(repo.(*repository.Repository)) if be == nil { debug.Log("backend is not s3") return false, "backend is not s3", nil @@ -76,7 +77,7 @@ func (m *S3Layout) moveFiles(ctx context.Context, be *s3.Backend, l layout.Layou // Apply runs the migration. func (m *S3Layout) Apply(ctx context.Context, repo restic.Repository) error { - be := backend.AsBackend[*s3.Backend](repo.Backend()) + be := repository.AsS3Backend(repo.(*repository.Repository)) if be == nil { debug.Log("backend is not s3") return errors.New("backend is not s3") diff --git a/internal/repository/s3_backend.go b/internal/repository/s3_backend.go new file mode 100644 index 00000000000..4c77c69a2b9 --- /dev/null +++ b/internal/repository/s3_backend.go @@ -0,0 +1,12 @@ +package repository + +import ( + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/s3" +) + +// AsS3Backend extracts the S3 backend from a repository +// TODO remove me once restic 0.17 was released +func AsS3Backend(repo *Repository) *s3.Backend { + return backend.AsBackend[*s3.Backend](repo.be) +} From d8b184b3d36378802b1a291acf8ed70ddf0c2343 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 16:01:44 +0200 Subject: [PATCH 230/893] repository: convert test helper to return *repository.Repository --- internal/repository/lock_test.go | 4 ++-- internal/repository/prune_test.go | 4 ++-- internal/repository/repair_index_test.go | 4 ++-- internal/repository/repair_pack_test.go | 12 ++++++------ internal/repository/repository_internal_test.go | 4 ++-- internal/repository/repository_test.go | 6 +++--- internal/repository/testing.go | 12 ++++++------ internal/repository/upgrade_repo_test.go | 4 ++-- 8 files changed, 25 insertions(+), 25 deletions(-) diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index 644fc6b374a..7bc32c0fb74 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -19,7 +19,7 @@ import ( type backendWrapper func(r backend.Backend) (backend.Backend, error) -func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { +func openLockTestRepo(t *testing.T, wrapper backendWrapper) *Repository { be := backend.Backend(mem.New()) // initialize repo TestRepositoryWithBackend(t, be, 0, Options{}) @@ -34,7 +34,7 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { return TestOpenBackend(t, be) } -func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) { +func checkedLockRepo(ctx context.Context, t *testing.T, repo *Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) { lock, wrappedCtx, err := lockerInst.Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, wrappedCtx.Err()) diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go index bff221f492c..e12ba6e3dfd 100644 --- a/internal/repository/prune_test.go +++ b/internal/repository/prune_test.go @@ -14,7 +14,7 @@ import ( ) func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { - repo := repository.TestRepository(t).(*repository.Repository) + repo := repository.TestRepository(t) createRandomBlobs(t, repo, 4, 0.5, true) createRandomBlobs(t, repo, 5, 0.5, true) keep, _ := selectBlobs(t, repo, 0.5) @@ -37,7 +37,7 @@ func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { rtest.OK(t, plan.Execute(context.TODO(), &progress.NoopPrinter{})) - repo = repository.TestOpenBackend(t, repo.Backend()).(*repository.Repository) + repo = repository.TestOpenBackend(t, repo.Backend()) checker.TestCheckRepo(t, repo, true) if errOnUnused { diff --git a/internal/repository/repair_index_test.go b/internal/repository/repair_index_test.go index adaee3832fa..5b2c9804418 100644 --- a/internal/repository/repair_index_test.go +++ b/internal/repository/repair_index_test.go @@ -17,7 +17,7 @@ func listIndex(t *testing.T, repo restic.Lister) restic.IDSet { } func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, repo *repository.Repository)) { - repo := repository.TestRepository(t).(*repository.Repository) + repo := repository.TestRepository(t) createRandomBlobs(t, repo, 4, 0.5, true) createRandomBlobs(t, repo, 5, 0.5, true) indexes := listIndex(t, repo) @@ -25,7 +25,7 @@ func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, damage(t, repo) - repo = repository.TestOpenBackend(t, repo.Backend()).(*repository.Repository) + repo = repository.TestOpenBackend(t, repo.Backend()) rtest.OK(t, repository.RepairIndex(context.TODO(), repo, repository.RepairIndexOptions{ ReadAllPacks: readAllPacks, }, &progress.NoopPrinter{})) diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index 0d16d251ff6..ccb9bd13122 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -24,7 +24,7 @@ func listBlobs(repo restic.Repository) restic.BlobSet { return blobs } -func replaceFile(t *testing.T, repo restic.Repository, h backend.Handle, damage func([]byte) []byte) { +func replaceFile(t *testing.T, repo *repository.Repository, h backend.Handle, damage func([]byte) []byte) { buf, err := backendtest.LoadAll(context.TODO(), repo.Backend(), h) test.OK(t, err) buf = damage(buf) @@ -39,17 +39,17 @@ func TestRepairBrokenPack(t *testing.T) { func testRepairBrokenPack(t *testing.T, version uint) { tests := []struct { name string - damage func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) + damage func(t *testing.T, repo *repository.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) }{ { "valid pack", - func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, repo *repository.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { return packsBefore, restic.NewBlobSet() }, }, { "broken pack", - func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, repo *repository.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { wrongBlob := createRandomWrongBlob(t, repo) damagedPacks := findPacksForBlobs(t, repo, restic.NewBlobSet(wrongBlob)) return damagedPacks, restic.NewBlobSet(wrongBlob) @@ -57,7 +57,7 @@ func testRepairBrokenPack(t *testing.T, version uint) { }, { "partially broken pack", - func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, repo *repository.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { // damage one of the pack files damagedID := packsBefore.List()[0] replaceFile(t, repo, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, @@ -80,7 +80,7 @@ func testRepairBrokenPack(t *testing.T, version uint) { }, }, { "truncated pack", - func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, repo *repository.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { // damage one of the pack files damagedID := packsBefore.List()[0] replaceFile(t, repo, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, diff --git a/internal/repository/repository_internal_test.go b/internal/repository/repository_internal_test.go index 16e6e848438..35082774c47 100644 --- a/internal/repository/repository_internal_test.go +++ b/internal/repository/repository_internal_test.go @@ -353,7 +353,7 @@ func testStreamPack(t *testing.T, version uint) { } func TestBlobVerification(t *testing.T) { - repo := TestRepository(t).(*Repository) + repo := TestRepository(t) type DamageType string const ( @@ -402,7 +402,7 @@ func TestBlobVerification(t *testing.T) { } func TestUnpackedVerification(t *testing.T) { - repo := TestRepository(t).(*Repository) + repo := TestRepository(t) type DamageType string const ( diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 67622fdf812..a4733707cd3 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -145,7 +145,7 @@ func testLoadBlob(t *testing.T, version uint) { func TestLoadBlobBroken(t *testing.T) { be := mem.New() - repo := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}).(*repository.Repository) + repo := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}) buf := test.Random(42, 1000) var wg errgroup.Group @@ -374,7 +374,7 @@ func TestRepositoryIncrementalIndex(t *testing.T) { } func testRepositoryIncrementalIndex(t *testing.T, version uint) { - repo := repository.TestRepositoryWithVersion(t, version).(*repository.Repository) + repo := repository.TestRepositoryWithVersion(t, version) index.IndexFull = func(*index.Index, bool) bool { return true } @@ -425,7 +425,7 @@ func TestInvalidCompression(t *testing.T) { func TestListPack(t *testing.T) { be := mem.New() - repo := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}).(*repository.Repository) + repo := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}) buf := test.Random(42, 1000) var wg errgroup.Group diff --git a/internal/repository/testing.go b/internal/repository/testing.go index 9fb643a46e8..5d0db1a31db 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -46,7 +46,7 @@ const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) // TestRepositoryWithBackend returns a repository initialized with a test // password. If be is nil, an in-memory backend is used. A constant polynomial // is used for the chunker and low-security test parameters. -func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, opts Options) restic.Repository { +func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, opts Options) *Repository { t.Helper() TestUseLowSecurityKDFParameters(t) restic.TestDisableCheckPolynomial(t) @@ -76,12 +76,12 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o // in-memory backend. When the environment variable RESTIC_TEST_REPO is set to // a non-existing directory, a local backend is created there and this is used // instead. The directory is not removed, but left there for inspection. -func TestRepository(t testing.TB) restic.Repository { +func TestRepository(t testing.TB) *Repository { t.Helper() return TestRepositoryWithVersion(t, 0) } -func TestRepositoryWithVersion(t testing.TB, version uint) restic.Repository { +func TestRepositoryWithVersion(t testing.TB, version uint) *Repository { t.Helper() dir := os.Getenv("RESTIC_TEST_REPO") opts := Options{} @@ -103,7 +103,7 @@ func TestRepositoryWithVersion(t testing.TB, version uint) restic.Repository { return TestRepositoryWithBackend(t, nil, version, opts) } -func TestFromFixture(t testing.TB, repoFixture string) (restic.Repository, func()) { +func TestFromFixture(t testing.TB, repoFixture string) (*Repository, func()) { repodir, cleanup := test.Env(t, repoFixture) repo := TestOpenLocal(t, repodir) @@ -111,7 +111,7 @@ func TestFromFixture(t testing.TB, repoFixture string) (restic.Repository, func( } // TestOpenLocal opens a local repository. -func TestOpenLocal(t testing.TB, dir string) restic.Repository { +func TestOpenLocal(t testing.TB, dir string) *Repository { var be backend.Backend be, err := local.Open(context.TODO(), local.Config{Path: dir, Connections: 2}) if err != nil { @@ -123,7 +123,7 @@ func TestOpenLocal(t testing.TB, dir string) restic.Repository { return TestOpenBackend(t, be) } -func TestOpenBackend(t testing.TB, be backend.Backend) restic.Repository { +func TestOpenBackend(t testing.TB, be backend.Backend) *Repository { repo, err := New(be, Options{}) if err != nil { t.Fatal(err) diff --git a/internal/repository/upgrade_repo_test.go b/internal/repository/upgrade_repo_test.go index 47c5f856ce8..85555692a55 100644 --- a/internal/repository/upgrade_repo_test.go +++ b/internal/repository/upgrade_repo_test.go @@ -18,7 +18,7 @@ func TestUpgradeRepoV2(t *testing.T) { t.Fatal("test repo has wrong version") } - err := UpgradeRepo(context.Background(), repo.(*Repository)) + err := UpgradeRepo(context.Background(), repo) rtest.OK(t, err) } @@ -60,7 +60,7 @@ func TestUpgradeRepoV2Failure(t *testing.T) { t.Fatal("test repo has wrong version") } - err := UpgradeRepo(context.Background(), repo.(*Repository)) + err := UpgradeRepo(context.Background(), repo) if err == nil { t.Fatal("expected error returned from Apply(), got nil") } From 67e2ba0d407a45dd0ee6ae1b3faea5eb16b7aca1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 16:02:33 +0200 Subject: [PATCH 231/893] repository: Lock requires *repository.Repository This allows the Lock function to access the backend, even once the Backend method is removed from the interface. --- internal/repository/lock.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/repository/lock.go b/internal/repository/lock.go index fd8214cd1d0..7035e3c596f 100644 --- a/internal/repository/lock.go +++ b/internal/repository/lock.go @@ -36,13 +36,13 @@ var lockerInst = &locker{ refreshabilityTimeout: restic.StaleLockTimeout - defaultRefreshInterval*3/2, } -func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { +func Lock(ctx context.Context, repo *Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { return lockerInst.Lock(ctx, repo, exclusive, retryLock, printRetry, logger) } // Lock wraps the ctx such that it is cancelled when the repository is unlocked // cancelling the original context also stops the lock refresh -func (l *locker) Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { +func (l *locker) Lock(ctx context.Context, repo *Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { lockFn := restic.NewLock if exclusive { @@ -102,7 +102,7 @@ retryLoop: refreshChan := make(chan struct{}) forceRefreshChan := make(chan refreshLockRequest) - go l.refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) + go l.refreshLocks(ctx, repo.be, lockInfo, refreshChan, forceRefreshChan, logger) go l.monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) return &Unlocker{lockInfo}, ctx, nil From e40943a75d174de4d3579583329c35d0a3a5c80b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 16:08:22 +0200 Subject: [PATCH 232/893] restic: remove backend usage from lock test --- internal/restic/lock_test.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/internal/restic/lock_test.go b/internal/restic/lock_test.go index ae10f403498..0ca5e815f24 100644 --- a/internal/restic/lock_test.go +++ b/internal/restic/lock_test.go @@ -190,13 +190,16 @@ func TestLockStale(t *testing.T) { } } -func lockExists(repo restic.Repository, t testing.TB, id restic.ID) bool { - h := backend.Handle{Type: restic.LockFile, Name: id.String()} - _, err := repo.Backend().Stat(context.TODO(), h) - if err != nil && !repo.Backend().IsNotExist(err) { - t.Fatal(err) - } - return err == nil +func lockExists(repo restic.Repository, t testing.TB, lockID restic.ID) bool { + var exists bool + rtest.OK(t, repo.List(context.TODO(), restic.LockFile, func(id restic.ID, size int64) error { + if id == lockID { + exists = true + } + return nil + })) + + return exists } func TestLockWithStaleLock(t *testing.T) { From 94e863885ce8eb89c1ba0f5735c00f14f7f30927 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 16:28:23 +0200 Subject: [PATCH 233/893] check: move verification of individual pack file to repository --- cmd/restic/cmd_check.go | 3 +- internal/checker/checker.go | 194 +-------------------------------- internal/repository/check.go | 205 +++++++++++++++++++++++++++++++++++ 3 files changed, 208 insertions(+), 194 deletions(-) create mode 100644 internal/repository/check.go diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index c44edae7e79..671cab0e615 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -15,6 +15,7 @@ import ( "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" ) @@ -347,7 +348,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args for err := range errChan { errorsFound = true Warnf("%v\n", err) - if err, ok := err.(*checker.ErrPackData); ok { + if err, ok := err.(*repository.ErrPackData); ok { salvagePacks = append(salvagePacks, err.PackID) } } diff --git a/internal/checker/checker.go b/internal/checker/checker.go index f19439622b4..dc83aef5b33 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -2,21 +2,16 @@ package checker import ( "bufio" - "bytes" "context" "fmt" - "io" "runtime" - "sort" "sync" "github.com/klauspost/compress/zstd" - "github.com/minio/sha256-simd" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/s3" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/hashing" "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" @@ -90,16 +85,6 @@ func (err *ErrOldIndexFormat) Error() string { return fmt.Sprintf("index %v has old format", err.ID) } -// ErrPackData is returned if errors are discovered while verifying a packfile -type ErrPackData struct { - PackID restic.ID - errs []error -} - -func (e *ErrPackData) Error() string { - return fmt.Sprintf("pack %v contains %v errors: %v", e.PackID, len(e.errs), e.errs) -} - func (c *Checker) LoadSnapshots(ctx context.Context) error { var err error c.snapshots, err = restic.MemorizeList(ctx, c.repo, restic.SnapshotFile) @@ -524,182 +509,6 @@ func (c *Checker) GetPacks() map[restic.ID]int64 { return c.packs } -type partialReadError struct { - err error -} - -func (e *partialReadError) Error() string { - return e.err.Error() -} - -// checkPack reads a pack and checks the integrity of all blobs. -func checkPack(ctx context.Context, r restic.Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { - err := checkPackInner(ctx, r, id, blobs, size, bufRd, dec) - if err != nil { - // retry pack verification to detect transient errors - err2 := checkPackInner(ctx, r, id, blobs, size, bufRd, dec) - if err2 != nil { - err = err2 - } else { - err = fmt.Errorf("check successful on second attempt, original error %w", err) - } - } - return err -} - -func checkPackInner(ctx context.Context, r restic.Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { - - debug.Log("checking pack %v", id.String()) - - if len(blobs) == 0 { - return &ErrPackData{PackID: id, errs: []error{errors.New("pack is empty or not indexed")}} - } - - // sanity check blobs in index - sort.Slice(blobs, func(i, j int) bool { - return blobs[i].Offset < blobs[j].Offset - }) - idxHdrSize := pack.CalculateHeaderSize(blobs) - lastBlobEnd := 0 - nonContinuousPack := false - for _, blob := range blobs { - if lastBlobEnd != int(blob.Offset) { - nonContinuousPack = true - } - lastBlobEnd = int(blob.Offset + blob.Length) - } - // size was calculated by masterindex.PackSize, thus there's no need to recalculate it here - - var errs []error - if nonContinuousPack { - debug.Log("Index for pack contains gaps / overlaps, blobs: %v", blobs) - errs = append(errs, errors.New("index for pack contains gaps / overlapping blobs")) - } - - // calculate hash on-the-fly while reading the pack and capture pack header - var hash restic.ID - var hdrBuf []byte - h := backend.Handle{Type: backend.PackFile, Name: id.String()} - err := r.Backend().Load(ctx, h, int(size), 0, func(rd io.Reader) error { - hrd := hashing.NewReader(rd, sha256.New()) - bufRd.Reset(hrd) - - it := repository.NewPackBlobIterator(id, newBufReader(bufRd), 0, blobs, r.Key(), dec) - for { - val, err := it.Next() - if err == repository.ErrPackEOF { - break - } else if err != nil { - return &partialReadError{err} - } - debug.Log(" check blob %v: %v", val.Handle.ID, val.Handle) - if val.Err != nil { - debug.Log(" error verifying blob %v: %v", val.Handle.ID, val.Err) - errs = append(errs, errors.Errorf("blob %v: %v", val.Handle.ID, val.Err)) - } - } - - // skip enough bytes until we reach the possible header start - curPos := lastBlobEnd - minHdrStart := int(size) - pack.MaxHeaderSize - if minHdrStart > curPos { - _, err := bufRd.Discard(minHdrStart - curPos) - if err != nil { - return &partialReadError{err} - } - curPos += minHdrStart - curPos - } - - // read remainder, which should be the pack header - var err error - hdrBuf = make([]byte, int(size-int64(curPos))) - _, err = io.ReadFull(bufRd, hdrBuf) - if err != nil { - return &partialReadError{err} - } - - hash = restic.IDFromHash(hrd.Sum(nil)) - return nil - }) - if err != nil { - var e *partialReadError - isPartialReadError := errors.As(err, &e) - // failed to load the pack file, return as further checks cannot succeed anyways - debug.Log(" error streaming pack (partial %v): %v", isPartialReadError, err) - if isPartialReadError { - return &ErrPackData{PackID: id, errs: append(errs, fmt.Errorf("partial download error: %w", err))} - } - - // The check command suggests to repair files for which a `ErrPackData` is returned. However, this file - // completely failed to download such that there's no point in repairing anything. - return fmt.Errorf("download error: %w", err) - } - if !hash.Equal(id) { - debug.Log("pack ID does not match, want %v, got %v", id, hash) - return &ErrPackData{PackID: id, errs: append(errs, errors.Errorf("unexpected pack id %v", hash))} - } - - blobs, hdrSize, err := pack.List(r.Key(), bytes.NewReader(hdrBuf), int64(len(hdrBuf))) - if err != nil { - return &ErrPackData{PackID: id, errs: append(errs, err)} - } - - if uint32(idxHdrSize) != hdrSize { - debug.Log("Pack header size does not match, want %v, got %v", idxHdrSize, hdrSize) - errs = append(errs, errors.Errorf("pack header size does not match, want %v, got %v", idxHdrSize, hdrSize)) - } - - idx := r.Index() - for _, blob := range blobs { - // Check if blob is contained in index and position is correct - idxHas := false - for _, pb := range idx.Lookup(blob.BlobHandle) { - if pb.PackID == id && pb.Blob == blob { - idxHas = true - break - } - } - if !idxHas { - errs = append(errs, errors.Errorf("blob %v is not contained in index or position is incorrect", blob.ID)) - continue - } - } - - if len(errs) > 0 { - return &ErrPackData{PackID: id, errs: errs} - } - - return nil -} - -type bufReader struct { - rd *bufio.Reader - buf []byte -} - -func newBufReader(rd *bufio.Reader) *bufReader { - return &bufReader{ - rd: rd, - } -} - -func (b *bufReader) Discard(n int) (discarded int, err error) { - return b.rd.Discard(n) -} - -func (b *bufReader) ReadFull(n int) (buf []byte, err error) { - if cap(b.buf) < n { - b.buf = make([]byte, n) - } - b.buf = b.buf[:n] - - _, err = io.ReadFull(b.rd, b.buf) - if err != nil { - return nil, err - } - return b.buf, nil -} - // ReadData loads all data from the repository and checks the integrity. func (c *Checker) ReadData(ctx context.Context, errChan chan<- error) { c.ReadPacks(ctx, c.packs, nil, errChan) @@ -743,8 +552,7 @@ func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *p } } - err := checkPack(ctx, c.repo, ps.id, ps.blobs, ps.size, bufRd, dec) - + err := repository.CheckPack(ctx, c.repo.(*repository.Repository), ps.id, ps.blobs, ps.size, bufRd, dec) p.Add(1) if err == nil { continue diff --git a/internal/repository/check.go b/internal/repository/check.go new file mode 100644 index 00000000000..17d3444518b --- /dev/null +++ b/internal/repository/check.go @@ -0,0 +1,205 @@ +package repository + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "sort" + + "github.com/klauspost/compress/zstd" + "github.com/minio/sha256-simd" + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/hashing" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/restic" +) + +// ErrPackData is returned if errors are discovered while verifying a packfile +type ErrPackData struct { + PackID restic.ID + errs []error +} + +func (e *ErrPackData) Error() string { + return fmt.Sprintf("pack %v contains %v errors: %v", e.PackID, len(e.errs), e.errs) +} + +type partialReadError struct { + err error +} + +func (e *partialReadError) Error() string { + return e.err.Error() +} + +// CheckPack reads a pack and checks the integrity of all blobs. +func CheckPack(ctx context.Context, r *Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { + err := checkPackInner(ctx, r, id, blobs, size, bufRd, dec) + if err != nil { + // retry pack verification to detect transient errors + err2 := checkPackInner(ctx, r, id, blobs, size, bufRd, dec) + if err2 != nil { + err = err2 + } else { + err = fmt.Errorf("check successful on second attempt, original error %w", err) + } + } + return err +} + +func checkPackInner(ctx context.Context, r restic.Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { + + debug.Log("checking pack %v", id.String()) + + if len(blobs) == 0 { + return &ErrPackData{PackID: id, errs: []error{errors.New("pack is empty or not indexed")}} + } + + // sanity check blobs in index + sort.Slice(blobs, func(i, j int) bool { + return blobs[i].Offset < blobs[j].Offset + }) + idxHdrSize := pack.CalculateHeaderSize(blobs) + lastBlobEnd := 0 + nonContinuousPack := false + for _, blob := range blobs { + if lastBlobEnd != int(blob.Offset) { + nonContinuousPack = true + } + lastBlobEnd = int(blob.Offset + blob.Length) + } + // size was calculated by masterindex.PackSize, thus there's no need to recalculate it here + + var errs []error + if nonContinuousPack { + debug.Log("Index for pack contains gaps / overlaps, blobs: %v", blobs) + errs = append(errs, errors.New("index for pack contains gaps / overlapping blobs")) + } + + // calculate hash on-the-fly while reading the pack and capture pack header + var hash restic.ID + var hdrBuf []byte + h := backend.Handle{Type: backend.PackFile, Name: id.String()} + err := r.Backend().Load(ctx, h, int(size), 0, func(rd io.Reader) error { + hrd := hashing.NewReader(rd, sha256.New()) + bufRd.Reset(hrd) + + it := NewPackBlobIterator(id, newBufReader(bufRd), 0, blobs, r.Key(), dec) + for { + val, err := it.Next() + if err == ErrPackEOF { + break + } else if err != nil { + return &partialReadError{err} + } + debug.Log(" check blob %v: %v", val.Handle.ID, val.Handle) + if val.Err != nil { + debug.Log(" error verifying blob %v: %v", val.Handle.ID, val.Err) + errs = append(errs, errors.Errorf("blob %v: %v", val.Handle.ID, val.Err)) + } + } + + // skip enough bytes until we reach the possible header start + curPos := lastBlobEnd + minHdrStart := int(size) - pack.MaxHeaderSize + if minHdrStart > curPos { + _, err := bufRd.Discard(minHdrStart - curPos) + if err != nil { + return &partialReadError{err} + } + curPos += minHdrStart - curPos + } + + // read remainder, which should be the pack header + var err error + hdrBuf = make([]byte, int(size-int64(curPos))) + _, err = io.ReadFull(bufRd, hdrBuf) + if err != nil { + return &partialReadError{err} + } + + hash = restic.IDFromHash(hrd.Sum(nil)) + return nil + }) + if err != nil { + var e *partialReadError + isPartialReadError := errors.As(err, &e) + // failed to load the pack file, return as further checks cannot succeed anyways + debug.Log(" error streaming pack (partial %v): %v", isPartialReadError, err) + if isPartialReadError { + return &ErrPackData{PackID: id, errs: append(errs, fmt.Errorf("partial download error: %w", err))} + } + + // The check command suggests to repair files for which a `ErrPackData` is returned. However, this file + // completely failed to download such that there's no point in repairing anything. + return fmt.Errorf("download error: %w", err) + } + if !hash.Equal(id) { + debug.Log("pack ID does not match, want %v, got %v", id, hash) + return &ErrPackData{PackID: id, errs: append(errs, errors.Errorf("unexpected pack id %v", hash))} + } + + blobs, hdrSize, err := pack.List(r.Key(), bytes.NewReader(hdrBuf), int64(len(hdrBuf))) + if err != nil { + return &ErrPackData{PackID: id, errs: append(errs, err)} + } + + if uint32(idxHdrSize) != hdrSize { + debug.Log("Pack header size does not match, want %v, got %v", idxHdrSize, hdrSize) + errs = append(errs, errors.Errorf("pack header size does not match, want %v, got %v", idxHdrSize, hdrSize)) + } + + idx := r.Index() + for _, blob := range blobs { + // Check if blob is contained in index and position is correct + idxHas := false + for _, pb := range idx.Lookup(blob.BlobHandle) { + if pb.PackID == id && pb.Blob == blob { + idxHas = true + break + } + } + if !idxHas { + errs = append(errs, errors.Errorf("blob %v is not contained in index or position is incorrect", blob.ID)) + continue + } + } + + if len(errs) > 0 { + return &ErrPackData{PackID: id, errs: errs} + } + + return nil +} + +type bufReader struct { + rd *bufio.Reader + buf []byte +} + +func newBufReader(rd *bufio.Reader) *bufReader { + return &bufReader{ + rd: rd, + } +} + +func (b *bufReader) Discard(n int) (discarded int, err error) { + return b.rd.Discard(n) +} + +func (b *bufReader) ReadFull(n int) (buf []byte, err error) { + if cap(b.buf) < n { + b.buf = make([]byte, n) + } + b.buf = b.buf[:n] + + _, err = io.ReadFull(b.rd, b.buf) + if err != nil { + return nil, err + } + return b.buf, nil +} From aa4647f773ea1ded434ffef07836178859fc3e8c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 16:29:48 +0200 Subject: [PATCH 234/893] repository: unexport PackBlobIterator --- internal/repository/check.go | 4 ++-- internal/repository/repository.go | 32 +++++++++++++++---------------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/internal/repository/check.go b/internal/repository/check.go index 17d3444518b..9d8c43fc667 100644 --- a/internal/repository/check.go +++ b/internal/repository/check.go @@ -88,10 +88,10 @@ func checkPackInner(ctx context.Context, r restic.Repository, id restic.ID, blob hrd := hashing.NewReader(rd, sha256.New()) bufRd.Reset(hrd) - it := NewPackBlobIterator(id, newBufReader(bufRd), 0, blobs, r.Key(), dec) + it := newPackBlobIterator(id, newBufReader(bufRd), 0, blobs, r.Key(), dec) for { val, err := it.Next() - if err == ErrPackEOF { + if err == errPackEOF { break } else if err != nil { return &partialReadError{err} diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 4c06d8134f8..f740c67639f 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -271,7 +271,7 @@ func (r *Repository) loadBlob(ctx context.Context, blobs []restic.PackedBlob, bu continue } - it := NewPackBlobIterator(blob.PackID, newByteReader(buf), uint(blob.Offset), []restic.Blob{blob.Blob}, r.key, r.getZstdDecoder()) + it := newPackBlobIterator(blob.PackID, newByteReader(buf), uint(blob.Offset), []restic.Blob{blob.Blob}, r.key, r.getZstdDecoder()) pbv, err := it.Next() if err == nil { @@ -1029,11 +1029,11 @@ func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBl return errors.Wrap(err, "StreamPack") } - it := NewPackBlobIterator(packID, newByteReader(data), dataStart, blobs, key, dec) + it := newPackBlobIterator(packID, newByteReader(data), dataStart, blobs, key, dec) for { val, err := it.Next() - if err == ErrPackEOF { + if err == errPackEOF { break } else if err != nil { return err @@ -1098,7 +1098,7 @@ func (b *byteReader) ReadFull(n int) (buf []byte, err error) { return buf, nil } -type PackBlobIterator struct { +type packBlobIterator struct { packID restic.ID rd discardReader currentOffset uint @@ -1110,17 +1110,17 @@ type PackBlobIterator struct { decode []byte } -type PackBlobValue struct { +type packBlobValue struct { Handle restic.BlobHandle Plaintext []byte Err error } -var ErrPackEOF = errors.New("reached EOF of pack file") +var errPackEOF = errors.New("reached EOF of pack file") -func NewPackBlobIterator(packID restic.ID, rd discardReader, currentOffset uint, - blobs []restic.Blob, key *crypto.Key, dec *zstd.Decoder) *PackBlobIterator { - return &PackBlobIterator{ +func newPackBlobIterator(packID restic.ID, rd discardReader, currentOffset uint, + blobs []restic.Blob, key *crypto.Key, dec *zstd.Decoder) *packBlobIterator { + return &packBlobIterator{ packID: packID, rd: rd, currentOffset: currentOffset, @@ -1131,9 +1131,9 @@ func NewPackBlobIterator(packID restic.ID, rd discardReader, currentOffset uint, } // Next returns the next blob, an error or ErrPackEOF if all blobs were read -func (b *PackBlobIterator) Next() (PackBlobValue, error) { +func (b *packBlobIterator) Next() (packBlobValue, error) { if len(b.blobs) == 0 { - return PackBlobValue{}, ErrPackEOF + return packBlobValue{}, errPackEOF } entry := b.blobs[0] @@ -1141,12 +1141,12 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) { skipBytes := int(entry.Offset - b.currentOffset) if skipBytes < 0 { - return PackBlobValue{}, fmt.Errorf("overlapping blobs in pack %v", b.packID) + return packBlobValue{}, fmt.Errorf("overlapping blobs in pack %v", b.packID) } _, err := b.rd.Discard(skipBytes) if err != nil { - return PackBlobValue{}, err + return packBlobValue{}, err } b.currentOffset = entry.Offset @@ -1156,14 +1156,14 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) { buf, err := b.rd.ReadFull(int(entry.Length)) if err != nil { debug.Log(" read error %v", err) - return PackBlobValue{}, fmt.Errorf("readFull: %w", err) + return packBlobValue{}, fmt.Errorf("readFull: %w", err) } b.currentOffset = entry.Offset + entry.Length if int(entry.Length) <= b.key.NonceSize() { debug.Log("%v", b.blobs) - return PackBlobValue{}, fmt.Errorf("invalid blob length %v", entry) + return packBlobValue{}, fmt.Errorf("invalid blob length %v", entry) } // decryption errors are likely permanent, give the caller a chance to skip them @@ -1191,7 +1191,7 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) { } } - return PackBlobValue{entry.BlobHandle, plaintext, err}, nil + return packBlobValue{entry.BlobHandle, plaintext, err}, nil } var zeroChunkOnce sync.Once From 8a425c2f0a2f0f5ff0a6992feac5f68894190fd8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 16:59:09 +0200 Subject: [PATCH 235/893] remove usages of repo.Backend() from tests --- internal/archiver/archiver_test.go | 2 +- internal/checker/checker_test.go | 30 ++++++++++----------- internal/index/index_parallel_test.go | 2 +- internal/index/master_index_test.go | 2 +- internal/migrations/upgrade_repo_v2_test.go | 2 +- internal/repository/fuzz_test.go | 2 +- internal/repository/lock_test.go | 24 ++++++++--------- internal/repository/prune_test.go | 4 +-- internal/repository/repack_test.go | 10 +++---- internal/repository/repair_index_test.go | 24 ++++++++--------- internal/repository/repair_pack_test.go | 26 +++++++++--------- internal/repository/repository_test.go | 28 +++++++++---------- internal/repository/testing.go | 19 ++++++------- internal/repository/upgrade_repo.go | 4 +-- internal/repository/upgrade_repo_test.go | 4 +-- internal/restic/lock_test.go | 2 +- internal/restic/snapshot_test.go | 2 +- internal/restic/tree_test.go | 4 +-- 18 files changed, 96 insertions(+), 95 deletions(-) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 51a425f4efc..8d0c2c02ff1 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1970,7 +1970,7 @@ func TestArchiverContextCanceled(t *testing.T) { }) // Ensure that the archiver itself reports the canceled context and not just the backend - repo := repository.TestRepositoryWithBackend(t, &noCancelBackend{mem.New()}, 0, repository.Options{}) + repo, _ := repository.TestRepositoryWithBackend(t, &noCancelBackend{mem.New()}, 0, repository.Options{}) back := rtest.Chdir(t, tempdir) defer back() diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index 38a166000cf..62401aa1939 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -73,7 +73,7 @@ func assertOnlyMixedPackHints(t *testing.T, hints []error) { } func TestCheckRepo(t *testing.T) { - repo, cleanup := repository.TestFromFixture(t, checkerTestData) + repo, _, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() chkr := checker.New(repo, false) @@ -91,7 +91,7 @@ func TestCheckRepo(t *testing.T) { } func TestMissingPack(t *testing.T) { - repo, cleanup := repository.TestFromFixture(t, checkerTestData) + repo, _, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() packID := restic.TestParseID("657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6") @@ -117,7 +117,7 @@ func TestMissingPack(t *testing.T) { } func TestUnreferencedPack(t *testing.T) { - repo, cleanup := repository.TestFromFixture(t, checkerTestData) + repo, _, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() // index 3f1a only references pack 60e0 @@ -145,7 +145,7 @@ func TestUnreferencedPack(t *testing.T) { } func TestUnreferencedBlobs(t *testing.T) { - repo, cleanup := repository.TestFromFixture(t, checkerTestData) + repo, _, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() snapshotID := restic.TestParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02") @@ -180,7 +180,7 @@ func TestUnreferencedBlobs(t *testing.T) { } func TestModifiedIndex(t *testing.T) { - repo, cleanup := repository.TestFromFixture(t, checkerTestData) + repo, be, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() done := make(chan struct{}) @@ -208,13 +208,13 @@ func TestModifiedIndex(t *testing.T) { }() wr := io.Writer(tmpfile) var hw *hashing.Writer - if repo.Backend().Hasher() != nil { - hw = hashing.NewWriter(wr, repo.Backend().Hasher()) + if be.Hasher() != nil { + hw = hashing.NewWriter(wr, be.Hasher()) wr = hw } // read the file from the backend - err = repo.Backend().Load(context.TODO(), h, 0, 0, func(rd io.Reader) error { + err = be.Load(context.TODO(), h, 0, 0, func(rd io.Reader) error { _, err := io.Copy(wr, rd) return err }) @@ -236,7 +236,7 @@ func TestModifiedIndex(t *testing.T) { t.Fatal(err) } - err = repo.Backend().Save(context.TODO(), h2, rd) + err = be.Save(context.TODO(), h2, rd) if err != nil { t.Fatal(err) } @@ -257,7 +257,7 @@ func TestModifiedIndex(t *testing.T) { var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz") func TestDuplicatePacksInIndex(t *testing.T) { - repo, cleanup := repository.TestFromFixture(t, checkerDuplicateIndexTestData) + repo, _, cleanup := repository.TestFromFixture(t, checkerDuplicateIndexTestData) defer cleanup() chkr := checker.New(repo, false) @@ -334,11 +334,11 @@ func (b *errorOnceBackend) Load(ctx context.Context, h backend.Handle, length in } func TestCheckerModifiedData(t *testing.T) { - repo := repository.TestRepository(t) + repo, be := repository.TestRepositoryWithVersion(t, 0) sn := archiver.TestSnapshot(t, repo, ".", nil) t.Logf("archived as %v", sn.ID().Str()) - errBe := &errorBackend{Backend: repo.Backend()} + errBe := &errorBackend{Backend: be} for _, test := range []struct { name string @@ -360,7 +360,7 @@ func TestCheckerModifiedData(t *testing.T) { }, { "errorOnceBackend", - &errorOnceBackend{Backend: repo.Backend()}, + &errorOnceBackend{Backend: be}, func() {}, func(t *testing.T, err error) { if !strings.Contains(err.Error(), "check successful on second attempt, original error pack") { @@ -427,7 +427,7 @@ func (r *loadTreesOnceRepository) LoadTree(ctx context.Context, id restic.ID) (* } func TestCheckerNoDuplicateTreeDecodes(t *testing.T) { - repo, cleanup := repository.TestFromFixture(t, checkerTestData) + repo, _, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() checkRepo := &loadTreesOnceRepository{ Repository: repo, @@ -575,7 +575,7 @@ func TestCheckerBlobTypeConfusion(t *testing.T) { } func loadBenchRepository(t *testing.B) (*checker.Checker, restic.Repository, func()) { - repo, cleanup := repository.TestFromFixture(t, checkerTestData) + repo, _, cleanup := repository.TestFromFixture(t, checkerTestData) chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) diff --git a/internal/index/index_parallel_test.go b/internal/index/index_parallel_test.go index 5cb8d299d9c..61b0aad63c3 100644 --- a/internal/index/index_parallel_test.go +++ b/internal/index/index_parallel_test.go @@ -15,7 +15,7 @@ import ( var repoFixture = filepath.Join("..", "repository", "testdata", "test-repo.tar.gz") func TestRepositoryForAllIndexes(t *testing.T) { - repo, cleanup := repository.TestFromFixture(t, repoFixture) + repo, _, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() expectedIndexIDs := restic.NewIDSet() diff --git a/internal/index/master_index_test.go b/internal/index/master_index_test.go index fe0364c61dd..c3560a7fb3a 100644 --- a/internal/index/master_index_test.go +++ b/internal/index/master_index_test.go @@ -342,7 +342,7 @@ var ( ) func createFilledRepo(t testing.TB, snapshots int, version uint) restic.Repository { - repo := repository.TestRepositoryWithVersion(t, version) + repo, _ := repository.TestRepositoryWithVersion(t, version) for i := 0; i < snapshots; i++ { restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth) diff --git a/internal/migrations/upgrade_repo_v2_test.go b/internal/migrations/upgrade_repo_v2_test.go index 59f2394e0a7..44a39b6c56c 100644 --- a/internal/migrations/upgrade_repo_v2_test.go +++ b/internal/migrations/upgrade_repo_v2_test.go @@ -8,7 +8,7 @@ import ( ) func TestUpgradeRepoV2(t *testing.T) { - repo := repository.TestRepositoryWithVersion(t, 1) + repo, _ := repository.TestRepositoryWithVersion(t, 1) if repo.Config().Version != 1 { t.Fatal("test repo has wrong version") } diff --git a/internal/repository/fuzz_test.go b/internal/repository/fuzz_test.go index 80372f8e099..f1fb061575a 100644 --- a/internal/repository/fuzz_test.go +++ b/internal/repository/fuzz_test.go @@ -18,7 +18,7 @@ func FuzzSaveLoadBlob(f *testing.F) { } id := restic.Hash(blob) - repo := TestRepositoryWithVersion(t, 2) + repo, _ := TestRepositoryWithVersion(t, 2) var wg errgroup.Group repo.StartPackUploader(context.TODO(), &wg) diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index 7bc32c0fb74..bd7cbd5e2ca 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -19,7 +19,7 @@ import ( type backendWrapper func(r backend.Backend) (backend.Backend, error) -func openLockTestRepo(t *testing.T, wrapper backendWrapper) *Repository { +func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*Repository, backend.Backend) { be := backend.Backend(mem.New()) // initialize repo TestRepositoryWithBackend(t, be, 0, Options{}) @@ -31,7 +31,7 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) *Repository { rtest.OK(t, err) } - return TestOpenBackend(t, be) + return TestOpenBackend(t, be), be } func checkedLockRepo(ctx context.Context, t *testing.T, repo *Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) { @@ -46,7 +46,7 @@ func checkedLockRepo(ctx context.Context, t *testing.T, repo *Repository, locker func TestLock(t *testing.T) { t.Parallel() - repo := openLockTestRepo(t, nil) + repo, _ := openLockTestRepo(t, nil) lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, lockerInst, 0) lock.Unlock() @@ -57,7 +57,7 @@ func TestLock(t *testing.T) { func TestLockCancel(t *testing.T) { t.Parallel() - repo := openLockTestRepo(t, nil) + repo, _ := openLockTestRepo(t, nil) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -73,8 +73,8 @@ func TestLockCancel(t *testing.T) { func TestLockConflict(t *testing.T) { t.Parallel() - repo := openLockTestRepo(t, nil) - repo2 := TestOpenBackend(t, repo.Backend()) + repo, be := openLockTestRepo(t, nil) + repo2 := TestOpenBackend(t, be) lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) @@ -101,7 +101,7 @@ func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backen func TestLockFailedRefresh(t *testing.T) { t.Parallel() - repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo, _ := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &writeOnceBackend{Backend: r}, nil }) @@ -138,7 +138,7 @@ func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend. func TestLockSuccessfulRefresh(t *testing.T) { t.Parallel() - repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo, _ := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &loggingBackend{ Backend: r, t: t, @@ -190,7 +190,7 @@ func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.Rew func TestLockSuccessfulStaleRefresh(t *testing.T) { t.Parallel() var sb *slowBackend - repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo, _ := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { sb = &slowBackend{Backend: r} return sb, nil }) @@ -238,7 +238,7 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { func TestLockWaitTimeout(t *testing.T) { t.Parallel() - repo := openLockTestRepo(t, nil) + repo, _ := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) @@ -260,7 +260,7 @@ func TestLockWaitTimeout(t *testing.T) { func TestLockWaitCancel(t *testing.T) { t.Parallel() - repo := openLockTestRepo(t, nil) + repo, _ := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) @@ -286,7 +286,7 @@ func TestLockWaitCancel(t *testing.T) { func TestLockWaitSuccess(t *testing.T) { t.Parallel() - repo := openLockTestRepo(t, nil) + repo, _ := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go index e12ba6e3dfd..dbf36ffd050 100644 --- a/internal/repository/prune_test.go +++ b/internal/repository/prune_test.go @@ -14,7 +14,7 @@ import ( ) func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { - repo := repository.TestRepository(t) + repo, be := repository.TestRepositoryWithVersion(t, 0) createRandomBlobs(t, repo, 4, 0.5, true) createRandomBlobs(t, repo, 5, 0.5, true) keep, _ := selectBlobs(t, repo, 0.5) @@ -37,7 +37,7 @@ func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { rtest.OK(t, plan.Execute(context.TODO(), &progress.NoopPrinter{})) - repo = repository.TestOpenBackend(t, repo.Backend()) + repo = repository.TestOpenBackend(t, be) checker.TestCheckRepo(t, repo, true) if errOnUnused { diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index 949f607dfb9..3fd56ccb120 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -215,7 +215,7 @@ func TestRepack(t *testing.T) { } func testRepack(t *testing.T, version uint) { - repo := repository.TestRepositoryWithVersion(t, version) + repo, _ := repository.TestRepositoryWithVersion(t, version) seed := time.Now().UnixNano() rand.Seed(seed) @@ -293,8 +293,8 @@ func (r oneConnectionRepo) Connections() uint { } func testRepackCopy(t *testing.T, version uint) { - repo := repository.TestRepositoryWithVersion(t, version) - dstRepo := repository.TestRepositoryWithVersion(t, version) + repo, _ := repository.TestRepositoryWithVersion(t, version) + dstRepo, _ := repository.TestRepositoryWithVersion(t, version) // test with minimal possible connection count repoWrapped := &oneConnectionRepo{repo} @@ -340,7 +340,7 @@ func TestRepackWrongBlob(t *testing.T) { func testRepackWrongBlob(t *testing.T, version uint) { // disable verification to allow adding corrupted blobs to the repository - repo := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) + repo, _ := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) seed := time.Now().UnixNano() rand.Seed(seed) @@ -366,7 +366,7 @@ func TestRepackBlobFallback(t *testing.T) { func testRepackBlobFallback(t *testing.T, version uint) { // disable verification to allow adding corrupted blobs to the repository - repo := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) + repo, _ := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) seed := time.Now().UnixNano() rand.Seed(seed) diff --git a/internal/repository/repair_index_test.go b/internal/repository/repair_index_test.go index 5b2c9804418..79922e9ec3a 100644 --- a/internal/repository/repair_index_test.go +++ b/internal/repository/repair_index_test.go @@ -16,16 +16,16 @@ func listIndex(t *testing.T, repo restic.Lister) restic.IDSet { return listFiles(t, repo, restic.IndexFile) } -func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, repo *repository.Repository)) { - repo := repository.TestRepository(t) +func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, repo *repository.Repository, be backend.Backend)) { + repo, be := repository.TestRepositoryWithVersion(t, 0) createRandomBlobs(t, repo, 4, 0.5, true) createRandomBlobs(t, repo, 5, 0.5, true) indexes := listIndex(t, repo) t.Logf("old indexes %v", indexes) - damage(t, repo) + damage(t, repo, be) - repo = repository.TestOpenBackend(t, repo.Backend()) + repo = repository.TestOpenBackend(t, be) rtest.OK(t, repository.RepairIndex(context.TODO(), repo, repository.RepairIndexOptions{ ReadAllPacks: readAllPacks, }, &progress.NoopPrinter{})) @@ -40,17 +40,17 @@ func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, func TestRebuildIndex(t *testing.T) { for _, test := range []struct { name string - damage func(t *testing.T, repo *repository.Repository) + damage func(t *testing.T, repo *repository.Repository, be backend.Backend) }{ { "valid index", - func(t *testing.T, repo *repository.Repository) {}, + func(t *testing.T, repo *repository.Repository, be backend.Backend) {}, }, { "damaged index", - func(t *testing.T, repo *repository.Repository) { + func(t *testing.T, repo *repository.Repository, be backend.Backend) { index := listIndex(t, repo).List()[0] - replaceFile(t, repo, backend.Handle{Type: restic.IndexFile, Name: index.String()}, func(b []byte) []byte { + replaceFile(t, be, backend.Handle{Type: restic.IndexFile, Name: index.String()}, func(b []byte) []byte { b[0] ^= 0xff return b }) @@ -58,16 +58,16 @@ func TestRebuildIndex(t *testing.T) { }, { "missing index", - func(t *testing.T, repo *repository.Repository) { + func(t *testing.T, repo *repository.Repository, be backend.Backend) { index := listIndex(t, repo).List()[0] - rtest.OK(t, repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: index.String()})) + rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: index.String()})) }, }, { "missing pack", - func(t *testing.T, repo *repository.Repository) { + func(t *testing.T, repo *repository.Repository, be backend.Backend) { pack := listPacks(t, repo).List()[0] - rtest.OK(t, repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: pack.String()})) + rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: pack.String()})) }, }, } { diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index ccb9bd13122..7acdc646e6f 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -24,12 +24,12 @@ func listBlobs(repo restic.Repository) restic.BlobSet { return blobs } -func replaceFile(t *testing.T, repo *repository.Repository, h backend.Handle, damage func([]byte) []byte) { - buf, err := backendtest.LoadAll(context.TODO(), repo.Backend(), h) +func replaceFile(t *testing.T, be backend.Backend, h backend.Handle, damage func([]byte) []byte) { + buf, err := backendtest.LoadAll(context.TODO(), be, h) test.OK(t, err) buf = damage(buf) - test.OK(t, repo.Backend().Remove(context.TODO(), h)) - test.OK(t, repo.Backend().Save(context.TODO(), h, backend.NewByteReader(buf, repo.Backend().Hasher()))) + test.OK(t, be.Remove(context.TODO(), h)) + test.OK(t, be.Save(context.TODO(), h, backend.NewByteReader(buf, be.Hasher()))) } func TestRepairBrokenPack(t *testing.T) { @@ -39,17 +39,17 @@ func TestRepairBrokenPack(t *testing.T) { func testRepairBrokenPack(t *testing.T, version uint) { tests := []struct { name string - damage func(t *testing.T, repo *repository.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) + damage func(t *testing.T, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) }{ { "valid pack", - func(t *testing.T, repo *repository.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { return packsBefore, restic.NewBlobSet() }, }, { "broken pack", - func(t *testing.T, repo *repository.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { wrongBlob := createRandomWrongBlob(t, repo) damagedPacks := findPacksForBlobs(t, repo, restic.NewBlobSet(wrongBlob)) return damagedPacks, restic.NewBlobSet(wrongBlob) @@ -57,10 +57,10 @@ func testRepairBrokenPack(t *testing.T, version uint) { }, { "partially broken pack", - func(t *testing.T, repo *repository.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { // damage one of the pack files damagedID := packsBefore.List()[0] - replaceFile(t, repo, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, + replaceFile(t, be, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, func(buf []byte) []byte { buf[0] ^= 0xff return buf @@ -80,10 +80,10 @@ func testRepairBrokenPack(t *testing.T, version uint) { }, }, { "truncated pack", - func(t *testing.T, repo *repository.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { // damage one of the pack files damagedID := packsBefore.List()[0] - replaceFile(t, repo, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, + replaceFile(t, be, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, func(buf []byte) []byte { buf = buf[0:10] return buf @@ -104,7 +104,7 @@ func testRepairBrokenPack(t *testing.T, version uint) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { // disable verification to allow adding corrupted blobs to the repository - repo := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) + repo, be := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) seed := time.Now().UnixNano() rand.Seed(seed) @@ -114,7 +114,7 @@ func testRepairBrokenPack(t *testing.T, version uint) { packsBefore := listPacks(t, repo) blobsBefore := listBlobs(repo) - toRepair, damagedBlobs := test.damage(t, repo, packsBefore) + toRepair, damagedBlobs := test.damage(t, repo, be, packsBefore) rtest.OK(t, repository.RepairPacks(context.TODO(), repo, toRepair, &progress.NoopPrinter{})) // reload index diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index a4733707cd3..f0d3ae486e9 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -45,7 +45,7 @@ func testSaveCalculateID(t *testing.T, version uint) { } func testSave(t *testing.T, version uint, calculateID bool) { - repo := repository.TestRepositoryWithVersion(t, version) + repo, _ := repository.TestRepositoryWithVersion(t, version) for _, size := range testSizes { data := make([]byte, size) @@ -88,7 +88,7 @@ func BenchmarkSaveAndEncrypt(t *testing.B) { } func benchmarkSaveAndEncrypt(t *testing.B, version uint) { - repo := repository.TestRepositoryWithVersion(t, version) + repo, _ := repository.TestRepositoryWithVersion(t, version) size := 4 << 20 // 4MiB data := make([]byte, size) @@ -114,7 +114,7 @@ func TestLoadBlob(t *testing.T) { } func testLoadBlob(t *testing.T, version uint) { - repo := repository.TestRepositoryWithVersion(t, version) + repo, _ := repository.TestRepositoryWithVersion(t, version) length := 1000000 buf := crypto.NewBlobBuffer(length) _, err := io.ReadFull(rnd, buf) @@ -145,7 +145,7 @@ func testLoadBlob(t *testing.T, version uint) { func TestLoadBlobBroken(t *testing.T) { be := mem.New() - repo := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}) + repo, _ := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}) buf := test.Random(42, 1000) var wg errgroup.Group @@ -170,7 +170,7 @@ func BenchmarkLoadBlob(b *testing.B) { } func benchmarkLoadBlob(b *testing.B, version uint) { - repo := repository.TestRepositoryWithVersion(b, version) + repo, _ := repository.TestRepositoryWithVersion(b, version) length := 1000000 buf := crypto.NewBlobBuffer(length) _, err := io.ReadFull(rnd, buf) @@ -211,7 +211,7 @@ func BenchmarkLoadUnpacked(b *testing.B) { } func benchmarkLoadUnpacked(b *testing.B, version uint) { - repo := repository.TestRepositoryWithVersion(b, version) + repo, _ := repository.TestRepositoryWithVersion(b, version) length := 1000000 buf := crypto.NewBlobBuffer(length) _, err := io.ReadFull(rnd, buf) @@ -247,7 +247,7 @@ func benchmarkLoadUnpacked(b *testing.B, version uint) { var repoFixture = filepath.Join("testdata", "test-repo.tar.gz") func TestRepositoryLoadIndex(t *testing.T) { - repo, cleanup := repository.TestFromFixture(t, repoFixture) + repo, _, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) @@ -268,7 +268,7 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (* } func TestRepositoryLoadUnpackedBroken(t *testing.T) { - repo := repository.TestRepository(t) + repo, be := repository.TestRepositoryWithVersion(t, 0) data := rtest.Random(23, 12345) id := restic.Hash(data) @@ -277,7 +277,7 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) { data[0] ^= 0xff // store broken file - err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, repo.Backend().Hasher())) + err := be.Save(context.TODO(), h, backend.NewByteReader(data, be.Hasher())) rtest.OK(t, err) _, err = repo.LoadUnpacked(context.TODO(), restic.IndexFile, id) @@ -322,7 +322,7 @@ func BenchmarkLoadIndex(b *testing.B) { func benchmarkLoadIndex(b *testing.B, version uint) { repository.TestUseLowSecurityKDFParameters(b) - repo := repository.TestRepositoryWithVersion(b, version) + repo, be := repository.TestRepositoryWithVersion(b, version) idx := index.NewIndex() for i := 0; i < 5000; i++ { @@ -340,7 +340,7 @@ func benchmarkLoadIndex(b *testing.B, version uint) { rtest.OK(b, err) b.Logf("index saved as %v", id.Str()) - fi, err := repo.Backend().Stat(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: id.String()}) + fi, err := be.Stat(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: id.String()}) rtest.OK(b, err) b.Logf("filesize is %v", fi.Size) @@ -374,7 +374,7 @@ func TestRepositoryIncrementalIndex(t *testing.T) { } func testRepositoryIncrementalIndex(t *testing.T, version uint) { - repo := repository.TestRepositoryWithVersion(t, version) + repo, _ := repository.TestRepositoryWithVersion(t, version) index.IndexFull = func(*index.Index, bool) bool { return true } @@ -425,7 +425,7 @@ func TestInvalidCompression(t *testing.T) { func TestListPack(t *testing.T) { be := mem.New() - repo := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}) + repo, _ := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}) buf := test.Random(42, 1000) var wg errgroup.Group @@ -440,7 +440,7 @@ func TestListPack(t *testing.T) { // Forcibly cache pack file packID := repo.Index().Lookup(restic.BlobHandle{Type: restic.TreeBlob, ID: id})[0].PackID - rtest.OK(t, repo.Backend().Load(context.TODO(), backend.Handle{Type: restic.PackFile, IsMetadata: true, Name: packID.String()}, 0, 0, func(rd io.Reader) error { return nil })) + rtest.OK(t, be.Load(context.TODO(), backend.Handle{Type: restic.PackFile, IsMetadata: true, Name: packID.String()}, 0, 0, func(rd io.Reader) error { return nil })) // Get size to list pack var size int64 diff --git a/internal/repository/testing.go b/internal/repository/testing.go index 5d0db1a31db..2155cad166d 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -46,7 +46,7 @@ const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) // TestRepositoryWithBackend returns a repository initialized with a test // password. If be is nil, an in-memory backend is used. A constant polynomial // is used for the chunker and low-security test parameters. -func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, opts Options) *Repository { +func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, opts Options) (*Repository, backend.Backend) { t.Helper() TestUseLowSecurityKDFParameters(t) restic.TestDisableCheckPolynomial(t) @@ -69,7 +69,7 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o t.Fatalf("TestRepository(): initialize repo failed: %v", err) } - return repo + return repo, be } // TestRepository returns a repository initialized with a test password on an @@ -78,10 +78,11 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o // instead. The directory is not removed, but left there for inspection. func TestRepository(t testing.TB) *Repository { t.Helper() - return TestRepositoryWithVersion(t, 0) + repo, _ := TestRepositoryWithVersion(t, 0) + return repo } -func TestRepositoryWithVersion(t testing.TB, version uint) *Repository { +func TestRepositoryWithVersion(t testing.TB, version uint) (*Repository, backend.Backend) { t.Helper() dir := os.Getenv("RESTIC_TEST_REPO") opts := Options{} @@ -103,15 +104,15 @@ func TestRepositoryWithVersion(t testing.TB, version uint) *Repository { return TestRepositoryWithBackend(t, nil, version, opts) } -func TestFromFixture(t testing.TB, repoFixture string) (*Repository, func()) { +func TestFromFixture(t testing.TB, repoFixture string) (*Repository, backend.Backend, func()) { repodir, cleanup := test.Env(t, repoFixture) - repo := TestOpenLocal(t, repodir) + repo, be := TestOpenLocal(t, repodir) - return repo, cleanup + return repo, be, cleanup } // TestOpenLocal opens a local repository. -func TestOpenLocal(t testing.TB, dir string) *Repository { +func TestOpenLocal(t testing.TB, dir string) (*Repository, backend.Backend) { var be backend.Backend be, err := local.Open(context.TODO(), local.Config{Path: dir, Connections: 2}) if err != nil { @@ -120,7 +121,7 @@ func TestOpenLocal(t testing.TB, dir string) *Repository { be = retry.New(be, 3, nil, nil) - return TestOpenBackend(t, be) + return TestOpenBackend(t, be), be } func TestOpenBackend(t testing.TB, be backend.Backend) *Repository { diff --git a/internal/repository/upgrade_repo.go b/internal/repository/upgrade_repo.go index 3e86cc37707..ea3ae2c0e0f 100644 --- a/internal/repository/upgrade_repo.go +++ b/internal/repository/upgrade_repo.go @@ -88,8 +88,8 @@ func UpgradeRepo(ctx context.Context, repo *Repository) error { } // try contingency methods, reupload the original file - _ = repo.Backend().Remove(ctx, h) - err = repo.Backend().Save(ctx, h, backend.NewByteReader(rawConfigFile, nil)) + _ = repo.be.Remove(ctx, h) + err = repo.be.Save(ctx, h, backend.NewByteReader(rawConfigFile, nil)) if err != nil { repoError.ReuploadOldConfigError = err } diff --git a/internal/repository/upgrade_repo_test.go b/internal/repository/upgrade_repo_test.go index 85555692a55..61ca6ef9564 100644 --- a/internal/repository/upgrade_repo_test.go +++ b/internal/repository/upgrade_repo_test.go @@ -13,7 +13,7 @@ import ( ) func TestUpgradeRepoV2(t *testing.T) { - repo := TestRepositoryWithVersion(t, 1) + repo, _ := TestRepositoryWithVersion(t, 1) if repo.Config().Version != 1 { t.Fatal("test repo has wrong version") } @@ -55,7 +55,7 @@ func TestUpgradeRepoV2Failure(t *testing.T) { Backend: be, } - repo := TestRepositoryWithBackend(t, be, 1, Options{}) + repo, _ := TestRepositoryWithBackend(t, be, 1, Options{}) if repo.Config().Version != 1 { t.Fatal("test repo has wrong version") } diff --git a/internal/restic/lock_test.go b/internal/restic/lock_test.go index 0ca5e815f24..fb9345fbce9 100644 --- a/internal/restic/lock_test.go +++ b/internal/restic/lock_test.go @@ -66,7 +66,7 @@ func (be *failLockLoadingBackend) Load(ctx context.Context, h backend.Handle, le func TestMultipleLockFailure(t *testing.T) { be := &failLockLoadingBackend{Backend: mem.New()} - repo := repository.TestRepositoryWithBackend(t, be, 0, repository.Options{}) + repo, _ := repository.TestRepositoryWithBackend(t, be, 0, repository.Options{}) restic.TestSetLockTimeout(t, 5*time.Millisecond) lock1, err := restic.NewLock(context.TODO(), repo) diff --git a/internal/restic/snapshot_test.go b/internal/restic/snapshot_test.go index b32c771d4bf..9099c8b5f30 100644 --- a/internal/restic/snapshot_test.go +++ b/internal/restic/snapshot_test.go @@ -32,7 +32,7 @@ func TestLoadJSONUnpacked(t *testing.T) { } func testLoadJSONUnpacked(t *testing.T, version uint) { - repo := repository.TestRepositoryWithVersion(t, version) + repo, _ := repository.TestRepositoryWithVersion(t, version) // archive a snapshot sn := restic.Snapshot{} diff --git a/internal/restic/tree_test.go b/internal/restic/tree_test.go index 67ecec897ed..8e0b3587ace 100644 --- a/internal/restic/tree_test.go +++ b/internal/restic/tree_test.go @@ -181,7 +181,7 @@ func testLoadTree(t *testing.T, version uint) { } // archive a few files - repo := repository.TestRepositoryWithVersion(t, version) + repo, _ := repository.TestRepositoryWithVersion(t, version) sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil) rtest.OK(t, repo.Flush(context.Background())) @@ -199,7 +199,7 @@ func benchmarkLoadTree(t *testing.B, version uint) { } // archive a few files - repo := repository.TestRepositoryWithVersion(t, version) + repo, _ := repository.TestRepositoryWithVersion(t, version) sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil) rtest.OK(t, repo.Flush(context.Background())) From d2c26e33f3fff507c5b647c4e1134df12093e82c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 17:05:16 +0200 Subject: [PATCH 236/893] repository: remove further usages of repo.Backend() --- internal/repository/check.go | 4 ++-- internal/repository/repository.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/repository/check.go b/internal/repository/check.go index 9d8c43fc667..4092cbef66a 100644 --- a/internal/repository/check.go +++ b/internal/repository/check.go @@ -51,7 +51,7 @@ func CheckPack(ctx context.Context, r *Repository, id restic.ID, blobs []restic. return err } -func checkPackInner(ctx context.Context, r restic.Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { +func checkPackInner(ctx context.Context, r *Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { debug.Log("checking pack %v", id.String()) @@ -84,7 +84,7 @@ func checkPackInner(ctx context.Context, r restic.Repository, id restic.ID, blob var hash restic.ID var hdrBuf []byte h := backend.Handle{Type: backend.PackFile, Name: id.String()} - err := r.Backend().Load(ctx, h, int(size), 0, func(rd io.Reader) error { + err := r.be.Load(ctx, h, int(size), 0, func(rd io.Reader) error { hrd := hashing.NewReader(rd, sha256.New()) bufRd.Reset(hrd) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index f740c67639f..f80eb9c99f7 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -874,7 +874,7 @@ func (r *Repository) List(ctx context.Context, t restic.FileType, fn func(restic func (r *Repository) ListPack(ctx context.Context, id restic.ID, size int64) ([]restic.Blob, uint32, error) { h := backend.Handle{Type: restic.PackFile, Name: id.String()} - entries, hdrSize, err := pack.List(r.Key(), backend.ReaderAt(ctx, r.Backend(), h), size) + entries, hdrSize, err := pack.List(r.Key(), backend.ReaderAt(ctx, r.be, h), size) if err != nil { if r.Cache != nil { // ignore error as there is not much we can do here @@ -882,7 +882,7 @@ func (r *Repository) ListPack(ctx context.Context, id restic.ID, size int64) ([] } // retry on error - entries, hdrSize, err = pack.List(r.Key(), backend.ReaderAt(ctx, r.Backend(), h), size) + entries, hdrSize, err = pack.List(r.Key(), backend.ReaderAt(ctx, r.be, h), size) } return entries, hdrSize, err } @@ -948,7 +948,7 @@ const maxUnusedRange = 1 * 1024 * 1024 // then LoadBlobsFromPack will abort and not retry it. The buf passed to the callback is only valid within // this specific call. The callback must not keep a reference to buf. func (r *Repository) LoadBlobsFromPack(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { - return streamPack(ctx, r.Backend().Load, r.LoadBlob, r.getZstdDecoder(), r.key, packID, blobs, handleBlobFn) + return streamPack(ctx, r.be.Load, r.LoadBlob, r.getZstdDecoder(), r.key, packID, blobs, handleBlobFn) } func streamPack(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBlobFn, dec *zstd.Decoder, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { From 3d2410ed505d9699e9a27e24a43c4643d839e1b1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 17:11:08 +0200 Subject: [PATCH 237/893] Replace some repo.RemoveUnpacked usages These will eventually be blocked as they do not delete Snapshots. --- internal/checker/checker_test.go | 8 ++++---- internal/restic/lock_test.go | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index 62401aa1939..baec8862890 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -91,11 +91,11 @@ func TestCheckRepo(t *testing.T) { } func TestMissingPack(t *testing.T) { - repo, _, cleanup := repository.TestFromFixture(t, checkerTestData) + repo, be, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() packID := restic.TestParseID("657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6") - test.OK(t, repo.RemoveUnpacked(context.TODO(), restic.PackFile, packID)) + test.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: packID.String()})) chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) @@ -117,13 +117,13 @@ func TestMissingPack(t *testing.T) { } func TestUnreferencedPack(t *testing.T) { - repo, _, cleanup := repository.TestFromFixture(t, checkerTestData) + repo, be, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() // index 3f1a only references pack 60e0 packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" indexID := restic.TestParseID("3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44") - test.OK(t, repo.RemoveUnpacked(context.TODO(), restic.IndexFile, indexID)) + test.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: indexID.String()})) chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) diff --git a/internal/restic/lock_test.go b/internal/restic/lock_test.go index fb9345fbce9..01234182107 100644 --- a/internal/restic/lock_test.go +++ b/internal/restic/lock_test.go @@ -312,7 +312,7 @@ func TestLockRefreshStale(t *testing.T) { } func TestLockRefreshStaleMissing(t *testing.T) { - repo := repository.TestRepository(t) + repo, be := repository.TestRepositoryWithVersion(t, 0) restic.TestSetLockTimeout(t, 5*time.Millisecond) lock, err := restic.NewLock(context.TODO(), repo) @@ -320,7 +320,7 @@ func TestLockRefreshStaleMissing(t *testing.T) { lockID := checkSingleLock(t, repo) // refresh must fail if lock was removed - rtest.OK(t, repo.RemoveUnpacked(context.TODO(), restic.LockFile, lockID)) + rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.LockFile, Name: lockID.String()})) time.Sleep(time.Millisecond) err = lock.RefreshStaleLock(context.TODO()) rtest.Assert(t, err == restic.ErrRemovedLock, "unexpected error, expected %v, got %v", restic.ErrRemovedLock, err) From 673496b09190180d90d1b147c05f4831d84ac4c1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 17:49:32 +0200 Subject: [PATCH 238/893] repository: clean cache between CheckPack retries The cache cleanup pattern is also used in ListPack etc. --- internal/repository/check.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/repository/check.go b/internal/repository/check.go index 4092cbef66a..8018f490238 100644 --- a/internal/repository/check.go +++ b/internal/repository/check.go @@ -40,6 +40,11 @@ func (e *partialReadError) Error() string { func CheckPack(ctx context.Context, r *Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { err := checkPackInner(ctx, r, id, blobs, size, bufRd, dec) if err != nil { + if r.Cache != nil { + // ignore error as there's not much we can do here + _ = r.Cache.Forget(backend.Handle{Type: restic.PackFile, Name: id.String()}) + } + // retry pack verification to detect transient errors err2 := checkPackInner(ctx, r, id, blobs, size, bufRd, dec) if err2 != nil { From 291c9677de886d5333ed20dcc05af6907f8bce2f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 17:50:54 +0200 Subject: [PATCH 239/893] restic/repository: remove Backend() method --- internal/repository/repository.go | 5 ----- internal/restic/repository.go | 3 --- 2 files changed, 8 deletions(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index f80eb9c99f7..534edc9fdb9 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -579,11 +579,6 @@ func (r *Repository) flushPacks(ctx context.Context) error { return err } -// Backend returns the backend for the repository. -func (r *Repository) Backend() backend.Backend { - return r.be -} - func (r *Repository) Connections() uint { return r.be.Connections() } diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 89a6c3ca0d6..e386630a35a 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -16,9 +16,6 @@ var ErrInvalidData = errors.New("invalid data returned") // Repository stores data in a backend. It provides high-level functions and // transparently encrypts/decrypts data. type Repository interface { - - // Backend returns the backend used by the repository - Backend() backend.Backend // Connections returns the maximum number of concurrent backend operations Connections() uint From 223aa22cb01a57c87a3962bd1f78f35659b80881 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 10 May 2024 18:19:14 +0200 Subject: [PATCH 240/893] replace some uses of restic.Repository with finegrained interfaces --- internal/index/master_index.go | 2 +- internal/repository/prune.go | 2 +- internal/restic/lock.go | 12 ++++++------ internal/restic/lock_test.go | 4 ++-- internal/restic/parallel.go | 2 +- internal/restic/repository.go | 20 +++++++++++++++++++- internal/restic/testing.go | 2 +- 7 files changed, 31 insertions(+), 13 deletions(-) diff --git a/internal/index/master_index.go b/internal/index/master_index.go index d99a3434df1..21ab344d673 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -270,7 +270,7 @@ func (mi *MasterIndex) MergeFinalIndexes() error { // Save saves all known indexes to index files, leaving out any // packs whose ID is contained in packBlacklist from finalized indexes. // It also removes the old index files and those listed in extraObsolete. -func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, excludePacks restic.IDSet, extraObsolete restic.IDs, opts restic.MasterIndexSaveOpts) error { +func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverRemoverUnpacked, excludePacks restic.IDSet, extraObsolete restic.IDs, opts restic.MasterIndexSaveOpts) error { p := opts.SaveProgress p.SetMax(uint64(len(mi.Packs(excludePacks)))) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 8ab16ab1506..479439e6abf 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -621,7 +621,7 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e // deleteFiles deletes the given fileList of fileType in parallel // if ignoreError=true, it will print a warning if there was an error, else it will abort. -func deleteFiles(ctx context.Context, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { +func deleteFiles(ctx context.Context, ignoreError bool, repo restic.RemoverUnpacked, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { bar := printer.NewCounter("files deleted") defer bar.Done() diff --git a/internal/restic/lock.go b/internal/restic/lock.go index 127ac643ffe..1e393c7eda8 100644 --- a/internal/restic/lock.go +++ b/internal/restic/lock.go @@ -35,7 +35,7 @@ type Lock struct { UID uint32 `json:"uid,omitempty"` GID uint32 `json:"gid,omitempty"` - repo Repository + repo Unpacked lockID *ID } @@ -86,14 +86,14 @@ var ErrRemovedLock = errors.New("lock file was removed in the meantime") // NewLock returns a new, non-exclusive lock for the repository. If an // exclusive lock is already held by another process, it returns an error // that satisfies IsAlreadyLocked. -func NewLock(ctx context.Context, repo Repository) (*Lock, error) { +func NewLock(ctx context.Context, repo Unpacked) (*Lock, error) { return newLock(ctx, repo, false) } // NewExclusiveLock returns a new, exclusive lock for the repository. If // another lock (normal and exclusive) is already held by another process, // it returns an error that satisfies IsAlreadyLocked. -func NewExclusiveLock(ctx context.Context, repo Repository) (*Lock, error) { +func NewExclusiveLock(ctx context.Context, repo Unpacked) (*Lock, error) { return newLock(ctx, repo, true) } @@ -105,7 +105,7 @@ func TestSetLockTimeout(t testing.TB, d time.Duration) { waitBeforeLockCheck = d } -func newLock(ctx context.Context, repo Repository, excl bool) (*Lock, error) { +func newLock(ctx context.Context, repo Unpacked, excl bool) (*Lock, error) { lock := &Lock{ Time: time.Now(), PID: os.Getpid(), @@ -389,7 +389,7 @@ func LoadLock(ctx context.Context, repo LoaderUnpacked, id ID) (*Lock, error) { } // RemoveStaleLocks deletes all locks detected as stale from the repository. -func RemoveStaleLocks(ctx context.Context, repo Repository) (uint, error) { +func RemoveStaleLocks(ctx context.Context, repo Unpacked) (uint, error) { var processed uint err := ForAllLocks(ctx, repo, nil, func(id ID, lock *Lock, err error) error { if err != nil { @@ -412,7 +412,7 @@ func RemoveStaleLocks(ctx context.Context, repo Repository) (uint, error) { } // RemoveAllLocks removes all locks forcefully. -func RemoveAllLocks(ctx context.Context, repo Repository) (uint, error) { +func RemoveAllLocks(ctx context.Context, repo Unpacked) (uint, error) { var processed uint32 err := ParallelList(ctx, repo, LockFile, repo.Connections(), func(ctx context.Context, id ID, _ int64) error { err := repo.RemoveUnpacked(ctx, LockFile, id) diff --git a/internal/restic/lock_test.go b/internal/restic/lock_test.go index 01234182107..b96b11e35ee 100644 --- a/internal/restic/lock_test.go +++ b/internal/restic/lock_test.go @@ -130,7 +130,7 @@ func createFakeLock(repo restic.SaverUnpacked, t time.Time, pid int) (restic.ID, return restic.SaveJSONUnpacked(context.TODO(), repo, restic.LockFile, &newLock) } -func removeLock(repo restic.Repository, id restic.ID) error { +func removeLock(repo restic.RemoverUnpacked, id restic.ID) error { return repo.RemoveUnpacked(context.TODO(), restic.LockFile, id) } @@ -190,7 +190,7 @@ func TestLockStale(t *testing.T) { } } -func lockExists(repo restic.Repository, t testing.TB, lockID restic.ID) bool { +func lockExists(repo restic.Lister, t testing.TB, lockID restic.ID) bool { var exists bool rtest.OK(t, repo.List(context.TODO(), restic.LockFile, func(id restic.ID, size int64) error { if id == lockID { diff --git a/internal/restic/parallel.go b/internal/restic/parallel.go index 11460bbbde3..0c2215325bd 100644 --- a/internal/restic/parallel.go +++ b/internal/restic/parallel.go @@ -54,7 +54,7 @@ func ParallelList(ctx context.Context, r Lister, t FileType, parallelism uint, f // ParallelRemove deletes the given fileList of fileType in parallel // if callback returns an error, then it will abort. -func ParallelRemove(ctx context.Context, repo Repository, fileList IDSet, fileType FileType, report func(id ID, err error) error, bar *progress.Counter) error { +func ParallelRemove(ctx context.Context, repo RemoverUnpacked, fileList IDSet, fileType FileType, report func(id ID, err error) error, bar *progress.Counter) error { fileChan := make(chan ID) wg, ctx := errgroup.WithContext(ctx) wg.Go(func() error { diff --git a/internal/restic/repository.go b/internal/restic/repository.go index e386630a35a..bc0ec2d43dd 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -89,6 +89,18 @@ type SaverUnpacked interface { SaveUnpacked(context.Context, FileType, []byte) (ID, error) } +// RemoverUnpacked allows removing an unpacked blob +type RemoverUnpacked interface { + // Connections returns the maximum number of concurrent backend operations + Connections() uint + RemoveUnpacked(ctx context.Context, t FileType, id ID) error +} + +type SaverRemoverUnpacked interface { + SaverUnpacked + RemoverUnpacked +} + type PackBlobs struct { PackID ID Blobs []Blob @@ -111,7 +123,7 @@ type MasterIndex interface { Each(ctx context.Context, fn func(PackedBlob)) error ListPacks(ctx context.Context, packs IDSet) <-chan PackBlobs - Save(ctx context.Context, repo Repository, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error + Save(ctx context.Context, repo SaverRemoverUnpacked, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error } // Lister allows listing files in a backend. @@ -123,3 +135,9 @@ type ListerLoaderUnpacked interface { Lister LoaderUnpacked } + +type Unpacked interface { + ListerLoaderUnpacked + SaverUnpacked + RemoverUnpacked +} diff --git a/internal/restic/testing.go b/internal/restic/testing.go index d2acd3ee9ab..8f86a7b2c42 100644 --- a/internal/restic/testing.go +++ b/internal/restic/testing.go @@ -190,7 +190,7 @@ func ParseDurationOrPanic(s string) Duration { // TestLoadAllSnapshots returns a list of all snapshots in the repo. // If a snapshot ID is in excludeIDs, it will not be included in the result. -func TestLoadAllSnapshots(ctx context.Context, repo Repository, excludeIDs IDSet) (snapshots Snapshots, err error) { +func TestLoadAllSnapshots(ctx context.Context, repo ListerLoaderUnpacked, excludeIDs IDSet) (snapshots Snapshots, err error) { err = ForAllSnapshots(ctx, repo, repo, excludeIDs, func(id ID, sn *Snapshot, err error) error { if err != nil { return err From 355f520936b60987678fac2d5d6b92be5b20638f Mon Sep 17 00:00:00 2001 From: Maik Riechert Date: Wed, 15 May 2024 16:54:28 +0000 Subject: [PATCH 241/893] Azure: add option to force use of CLI credential --- changelog/unreleased/pull-4799 | 5 +++++ doc/030_preparing_a_new_repo.rst | 14 ++++++++++---- doc/040_backup.rst | 1 + internal/backend/azure/azure.go | 12 ++++++++++++ internal/backend/azure/config.go | 19 +++++++++++++------ 5 files changed, 41 insertions(+), 10 deletions(-) create mode 100644 changelog/unreleased/pull-4799 diff --git a/changelog/unreleased/pull-4799 b/changelog/unreleased/pull-4799 new file mode 100644 index 00000000000..0179bc51a91 --- /dev/null +++ b/changelog/unreleased/pull-4799 @@ -0,0 +1,5 @@ +Enhancement: Add option to force use of Azure CLI credential + +A new environment variable `AZURE_FORCE_CLI_CREDENTIAL=true` allows forcing the use of Azure CLI credential, ignoring other credentials like managed identity. + +https://github.com/restic/restic/pull/4799 diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 0c50b65be31..ee0a0df5e8d 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -550,17 +550,23 @@ For authentication export one of the following variables: # For SAS $ export AZURE_ACCOUNT_SAS= -For authentication using ``az login`` set the resource group name and ensure the user has -the minimum permissions of the role assignment ``Storage Blob Data Contributor`` on Azure RBAC. +For authentication using ``az login`` ensure the user has +the minimum permissions of the role assignment ``Storage Blob Data Contributor`` on Azure RBAC +for the storage account. .. code-block:: console - $ export AZURE_RESOURCE_GROUP= $ az login -Alternatively, if run on Azure, restic will automatically uses service accounts configured +Alternatively, if run on Azure, restic will automatically use service accounts configured via the standard environment variables or Workload / Managed Identities. +To enforce the use of the Azure CLI credential when other credentials are present, set the following environment variable: + +.. code-block:: console + + $ export AZURE_FORCE_CLI_CREDENTIAL=true + Restic will by default use Azure's global domain ``core.windows.net`` as endpoint suffix. You can specify other suffixes as follows: diff --git a/doc/040_backup.rst b/doc/040_backup.rst index e125d2c656c..157296dc22a 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -673,6 +673,7 @@ environment variables. The following lists these environment variables: AZURE_ACCOUNT_KEY Account key for Azure AZURE_ACCOUNT_SAS Shared access signatures (SAS) for Azure AZURE_ENDPOINT_SUFFIX Endpoint suffix for Azure Storage (default: core.windows.net) + AZURE_FORCE_CLI_CREDENTIAL Force the use of Azure CLI credentials for authentication B2_ACCOUNT_ID Account ID or applicationKeyId for Backblaze B2 B2_ACCOUNT_KEY Account Key or applicationKey for Backblaze B2 diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 9f879f98990..46be3335785 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -101,6 +101,18 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { if err != nil { return nil, errors.Wrap(err, "NewAccountSASClientFromEndpointToken") } + } else if cfg.ForceCliCredential { + debug.Log(" - using AzureCLICredential") + + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return nil, errors.Wrap(err, "NewAzureCLICredential") + } + + client, err = azContainer.NewClient(url, cred, opts) + if err != nil { + return nil, errors.Wrap(err, "NewClient") + } } else { debug.Log(" - using DefaultAzureCredential") cred, err := azidentity.NewDefaultAzureCredential(nil) diff --git a/internal/backend/azure/config.go b/internal/backend/azure/config.go index 6ae431f65ea..7d69719eff3 100644 --- a/internal/backend/azure/config.go +++ b/internal/backend/azure/config.go @@ -3,6 +3,7 @@ package azure import ( "os" "path" + "strconv" "strings" "github.com/restic/restic/internal/backend" @@ -13,12 +14,13 @@ import ( // Config contains all configuration necessary to connect to an azure compatible // server. type Config struct { - AccountName string - AccountSAS options.SecretString - AccountKey options.SecretString - EndpointSuffix string - Container string - Prefix string + AccountName string + AccountSAS options.SecretString + AccountKey options.SecretString + ForceCliCredential bool + EndpointSuffix string + Container string + Prefix string Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` } @@ -73,6 +75,11 @@ func (cfg *Config) ApplyEnvironment(prefix string) { cfg.AccountSAS = options.NewSecretString(os.Getenv(prefix + "AZURE_ACCOUNT_SAS")) } + var forceCliCred, err = strconv.ParseBool(os.Getenv(prefix + "AZURE_FORCE_CLI_CREDENTIAL")) + if err == nil { + cfg.ForceCliCredential = forceCliCred + } + if cfg.EndpointSuffix == "" { cfg.EndpointSuffix = os.Getenv(prefix + "AZURE_ENDPOINT_SUFFIX") } From c56ecec9aa103a706963fa2fd71a4bc7e39ffc53 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 18 May 2024 22:15:38 +0200 Subject: [PATCH 242/893] azure: deduplicate cli and default credentials case --- internal/backend/azure/azure.go | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 46be3335785..92f64243bbb 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -101,23 +101,21 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { if err != nil { return nil, errors.Wrap(err, "NewAccountSASClientFromEndpointToken") } - } else if cfg.ForceCliCredential { - debug.Log(" - using AzureCLICredential") - - cred, err := azidentity.NewAzureCLICredential(nil) - if err != nil { - return nil, errors.Wrap(err, "NewAzureCLICredential") - } - - client, err = azContainer.NewClient(url, cred, opts) - if err != nil { - return nil, errors.Wrap(err, "NewClient") - } } else { - debug.Log(" - using DefaultAzureCredential") - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - return nil, errors.Wrap(err, "NewDefaultAzureCredential") + var cred azcore.TokenCredential + + if cfg.ForceCliCredential { + debug.Log(" - using AzureCLICredential") + cred, err = azidentity.NewAzureCLICredential(nil) + if err != nil { + return nil, errors.Wrap(err, "NewAzureCLICredential") + } + } else { + debug.Log(" - using DefaultAzureCredential") + cred, err = azidentity.NewDefaultAzureCredential(nil) + if err != nil { + return nil, errors.Wrap(err, "NewDefaultAzureCredential") + } } client, err = azContainer.NewClient(url, cred, opts) From 5f23baabccb561d663db5e82e396447d2a0b3766 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 18 May 2024 23:03:24 +0200 Subject: [PATCH 243/893] add retries for corrupted blobs to changelog --- changelog/unreleased/issue-4627 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/changelog/unreleased/issue-4627 b/changelog/unreleased/issue-4627 index d9705453546..95c9d4bc401 100644 --- a/changelog/unreleased/issue-4627 +++ b/changelog/unreleased/issue-4627 @@ -11,6 +11,8 @@ retried after a short timeout. Attempts to access a missing file or a truncated file will no longer be retried. This avoids unnecessary retries in those cases. +If a download yields a corrupt file or blob, then the download will be retried once. + Most parts of the new backend error handling can temporarily be disabled by setting the environment variable `RESTIC_FEATURES=backend-error-redesign=false`. Note that this feature flag will @@ -23,3 +25,4 @@ https://github.com/restic/restic/pull/4792 https://github.com/restic/restic/issues/4515 https://github.com/restic/restic/issues/1523 https://github.com/restic/restic/pull/4520 +https://github.com/restic/restic/pull/4800 From 4021e67d97b0c41e84cccd771057efe517b62219 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 20 May 2024 20:48:29 +0200 Subject: [PATCH 244/893] doc: fix tmpdir documentation for windows --- doc/040_backup.rst | 3 ++- doc/047_tuning_backup_parameters.rst | 3 ++- doc/manual_rest.rst | 9 ++++++--- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 157296dc22a..782aa703060 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -654,7 +654,8 @@ environment variables. The following lists these environment variables: RESTIC_PACK_SIZE Target size for pack files RESTIC_READ_CONCURRENCY Concurrency for file reads - TMPDIR Location for temporary files + TMPDIR Location for temporary files (except Windows) + TMP Location for temporary files (only Windows) AWS_ACCESS_KEY_ID Amazon S3 access key ID AWS_SECRET_ACCESS_KEY Amazon S3 secret access key diff --git a/doc/047_tuning_backup_parameters.rst b/doc/047_tuning_backup_parameters.rst index a6b998cfeda..650f111be8f 100644 --- a/doc/047_tuning_backup_parameters.rst +++ b/doc/047_tuning_backup_parameters.rst @@ -99,7 +99,8 @@ to a 16 MiB pack size. The side effect of increasing the pack size is requiring more disk space for temporary pack files created before uploading. The space must be available in the system default temp -directory, unless overwritten by setting the ``$TMPDIR`` environment variable. In addition, +directory, unless overwritten by setting the ``$TMPDIR`` (except Windows) environment +variable (on Windows use ``$TMP`` or ``$TEMP``). In addition, depending on the backend the memory usage can also increase by a similar amount. Restic requires temporary space according to the pack size, multiplied by the number of backend connections plus one. For example, if the backend uses 5 connections (the default diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst index bf9554e046a..34116fe86b5 100644 --- a/doc/manual_rest.rst +++ b/doc/manual_rest.rst @@ -408,9 +408,12 @@ Temporary files During some operations (e.g. ``backup`` and ``prune``) restic uses temporary files to store data. These files will, by default, be saved to the system's temporary directory, on Linux this is usually located in -``/tmp/``. The environment variable ``TMPDIR`` can be used to specify a -different directory, e.g. to use the directory ``/var/tmp/restic-tmp`` -instead of the default, set the environment variable like this: +``/tmp/``. To specify a different directory for temporary files, set +the appropriate environment variable. On non-Windows operating systems, +use the ``TMPDIR`` environment variable. On Windows, use either the +``TMP`` or ``TEMP`` environment variable. For example, to use the +directory ``/var/tmp/restic-tmp`` instead of the default, set the +environment variable as follows: .. code-block:: console From 7f439a9c346fe175db1b02aeef2b19c5c63d73a4 Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Tue, 21 May 2024 09:54:42 +0200 Subject: [PATCH 245/893] ui: Inline lineWriter into StdioWrapper --- internal/ui/stdio_wrapper.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/ui/stdio_wrapper.go b/internal/ui/stdio_wrapper.go index 42f4cc54523..6566ab67c67 100644 --- a/internal/ui/stdio_wrapper.go +++ b/internal/ui/stdio_wrapper.go @@ -37,14 +37,14 @@ func (w *StdioWrapper) Stderr() io.WriteCloser { } type lineWriter struct { - buf *bytes.Buffer + buf bytes.Buffer print func(string) } var _ io.WriteCloser = &lineWriter{} func newLineWriter(print func(string)) *lineWriter { - return &lineWriter{buf: bytes.NewBuffer(nil), print: print} + return &lineWriter{print: print} } func (w *lineWriter) Write(data []byte) (n int, err error) { From 0b56214473759b8f820b7d7ff7ab0e1ac64dd80f Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Tue, 21 May 2024 10:19:14 +0200 Subject: [PATCH 246/893] ui: Simplify stdio wrapper The StdioWrapper type is really just a pair of io.WriteClosers, so remove it in favor of a function that returns two of those. Test coverage increases because the removed code was not tested. --- cmd/restic/termstatus.go | 3 +-- internal/ui/stdio_wrapper.go | 31 ++++--------------------------- 2 files changed, 5 insertions(+), 29 deletions(-) diff --git a/cmd/restic/termstatus.go b/cmd/restic/termstatus.go index cf3cd82ee9a..633e4521cf5 100644 --- a/cmd/restic/termstatus.go +++ b/cmd/restic/termstatus.go @@ -31,8 +31,7 @@ func setupTermstatus() (*termstatus.Terminal, func()) { // use the termstatus for stdout/stderr prevStdout, prevStderr := globalOptions.stdout, globalOptions.stderr - stdioWrapper := ui.NewStdioWrapper(term) - globalOptions.stdout, globalOptions.stderr = stdioWrapper.Stdout(), stdioWrapper.Stderr() + globalOptions.stdout, globalOptions.stderr = ui.WrapStdio(term) return term, func() { // shutdown termstatus diff --git a/internal/ui/stdio_wrapper.go b/internal/ui/stdio_wrapper.go index 6566ab67c67..f7853360159 100644 --- a/internal/ui/stdio_wrapper.go +++ b/internal/ui/stdio_wrapper.go @@ -7,33 +7,10 @@ import ( "github.com/restic/restic/internal/ui/termstatus" ) -// StdioWrapper provides stdout and stderr integration with termstatus. -type StdioWrapper struct { - stdout *lineWriter - stderr *lineWriter -} - -// NewStdioWrapper initializes a new stdio wrapper that can be used in place of -// os.Stdout or os.Stderr. -func NewStdioWrapper(term *termstatus.Terminal) *StdioWrapper { - return &StdioWrapper{ - stdout: newLineWriter(term.Print), - stderr: newLineWriter(term.Error), - } -} - -// Stdout returns a writer that is line buffered and can be used in place of -// os.Stdout. On Close(), the remaining bytes are written, followed by a line -// break. -func (w *StdioWrapper) Stdout() io.WriteCloser { - return w.stdout -} - -// Stderr returns a writer that is line buffered and can be used in place of -// os.Stderr. On Close(), the remaining bytes are written, followed by a line -// break. -func (w *StdioWrapper) Stderr() io.WriteCloser { - return w.stderr +// WrapStdio returns line-buffering replacements for os.Stdout and os.Stderr. +// On Close, the remaining bytes are written, followed by a line break. +func WrapStdio(term *termstatus.Terminal) (stdout, stderr io.WriteCloser) { + return newLineWriter(term.Print), newLineWriter(term.Error) } type lineWriter struct { From 66d03c797e44a8885b40e6790168348ba4d36cc6 Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Tue, 21 May 2024 10:51:05 +0200 Subject: [PATCH 247/893] ui, termstatus: Move WrapStdio Saves some imports. ui still needs to import ui/termstatus from message.go. --- cmd/restic/termstatus.go | 3 +-- internal/ui/{ => termstatus}/stdio_wrapper.go | 6 ++---- internal/ui/{ => termstatus}/stdio_wrapper_test.go | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) rename internal/ui/{ => termstatus}/stdio_wrapper.go (86%) rename internal/ui/{ => termstatus}/stdio_wrapper_test.go (98%) diff --git a/cmd/restic/termstatus.go b/cmd/restic/termstatus.go index 633e4521cf5..c0e9a045bfe 100644 --- a/cmd/restic/termstatus.go +++ b/cmd/restic/termstatus.go @@ -4,7 +4,6 @@ import ( "context" "sync" - "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/termstatus" ) @@ -31,7 +30,7 @@ func setupTermstatus() (*termstatus.Terminal, func()) { // use the termstatus for stdout/stderr prevStdout, prevStderr := globalOptions.stdout, globalOptions.stderr - globalOptions.stdout, globalOptions.stderr = ui.WrapStdio(term) + globalOptions.stdout, globalOptions.stderr = termstatus.WrapStdio(term) return term, func() { // shutdown termstatus diff --git a/internal/ui/stdio_wrapper.go b/internal/ui/termstatus/stdio_wrapper.go similarity index 86% rename from internal/ui/stdio_wrapper.go rename to internal/ui/termstatus/stdio_wrapper.go index f7853360159..233610ba397 100644 --- a/internal/ui/stdio_wrapper.go +++ b/internal/ui/termstatus/stdio_wrapper.go @@ -1,15 +1,13 @@ -package ui +package termstatus import ( "bytes" "io" - - "github.com/restic/restic/internal/ui/termstatus" ) // WrapStdio returns line-buffering replacements for os.Stdout and os.Stderr. // On Close, the remaining bytes are written, followed by a line break. -func WrapStdio(term *termstatus.Terminal) (stdout, stderr io.WriteCloser) { +func WrapStdio(term *Terminal) (stdout, stderr io.WriteCloser) { return newLineWriter(term.Print), newLineWriter(term.Error) } diff --git a/internal/ui/stdio_wrapper_test.go b/internal/ui/termstatus/stdio_wrapper_test.go similarity index 98% rename from internal/ui/stdio_wrapper_test.go rename to internal/ui/termstatus/stdio_wrapper_test.go index b95d9180da0..1e214f1f4f4 100644 --- a/internal/ui/stdio_wrapper_test.go +++ b/internal/ui/termstatus/stdio_wrapper_test.go @@ -1,4 +1,4 @@ -package ui +package termstatus import ( "strings" From a3633cad9e23d0bd936a394837874c7accbe97ce Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 29 Apr 2024 21:07:17 +0200 Subject: [PATCH 248/893] retry: explicitly log failed requests This simplifies finding the request in the log output that cause an operation to fail. --- cmd/restic/global.go | 6 +++- internal/backend/retry/backend_retry.go | 30 +++++++++++++------- internal/backend/retry/backend_retry_test.go | 21 ++++++++++++++ 3 files changed, 45 insertions(+), 12 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 6920caa8d82..d0facc67498 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -416,7 +416,11 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi } report := func(msg string, err error, d time.Duration) { - Warnf("%v returned error, retrying after %v: %v\n", msg, d, err) + if d >= 0 { + Warnf("%v returned error, retrying after %v: %v\n", msg, d, err) + } else { + Warnf("%v failed: %v\n", msg, err) + } } success := func(msg string, retries int) { Warnf("%v operation successful after %d retries\n", msg, retries) diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index 31934ec96de..c6815413cd5 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -44,20 +44,28 @@ func New(be backend.Backend, maxTries int, report func(string, error, time.Durat // retryNotifyErrorWithSuccess is an extension of backoff.RetryNotify with notification of success after an error. // success is NOT notified on the first run of operation (only after an error). func retryNotifyErrorWithSuccess(operation backoff.Operation, b backoff.BackOff, notify backoff.Notify, success func(retries int)) error { + var operationWrapper backoff.Operation if success == nil { - return backoff.RetryNotify(operation, b, notify) - } - retries := 0 - operationWrapper := func() error { - err := operation() - if err != nil { - retries++ - } else if retries > 0 { - success(retries) + operationWrapper = operation + } else { + retries := 0 + operationWrapper = func() error { + err := operation() + if err != nil { + retries++ + } else if retries > 0 { + success(retries) + } + return err } - return err } - return backoff.RetryNotify(operationWrapper, b, notify) + err := backoff.RetryNotify(operationWrapper, b, notify) + + if err != nil && notify != nil { + // log final error + notify(err, -1) + } + return err } var fastRetries = false diff --git a/internal/backend/retry/backend_retry_test.go b/internal/backend/retry/backend_retry_test.go index a515b0b7d03..de86e6cf6af 100644 --- a/internal/backend/retry/backend_retry_test.go +++ b/internal/backend/retry/backend_retry_test.go @@ -497,3 +497,24 @@ func TestNotifyWithSuccessIsCalled(t *testing.T) { t.Fatalf("Success should have been called only once, but was called %d times instead", successCalled) } } + +func TestNotifyWithSuccessFinalError(t *testing.T) { + operation := func() error { + return errors.New("expected error in test") + } + + notifyCalled := 0 + notify := func(error, time.Duration) { + notifyCalled++ + } + + successCalled := 0 + success := func(retries int) { + successCalled++ + } + + err := retryNotifyErrorWithSuccess(operation, backoff.WithMaxRetries(&backoff.ZeroBackOff{}, 5), notify, success) + test.Assert(t, err.Error() == "expected error in test", "wrong error message %v", err) + test.Equals(t, 6, notifyCalled, "notify should have been called 6 times") + test.Equals(t, 0, successCalled, "success should not have been called") +} From a60ee9b764fe25e1f923e4faeab7f1e3bfb9f057 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 29 Apr 2024 21:12:21 +0200 Subject: [PATCH 249/893] retry: limit retries based on elapsed time not count Depending on how long an operation takes to fail, the total retry duration can currently vary between 1.5 and 15 minutes. In particular for temporarily interrupted network connections, the former timeout is too short. Thus always use a limit of 15 minutes. --- cmd/restic/global.go | 2 +- internal/backend/retry/backend_retry.go | 24 ++++++++++++-------- internal/backend/retry/backend_retry_test.go | 14 +++++++----- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index d0facc67498..c954a42707c 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -425,7 +425,7 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi success := func(msg string, retries int) { Warnf("%v operation successful after %d retries\n", msg, retries) } - be = retry.New(be, 10, report, success) + be = retry.New(be, 15*time.Minute, report, success) // wrap backend if a test specified a hook if opts.backendTestHook != nil { diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index c6815413cd5..fb2e6cf98ff 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -18,9 +18,9 @@ import ( // backoff. type Backend struct { backend.Backend - MaxTries int - Report func(string, error, time.Duration) - Success func(string, int) + MaxElapsedTime time.Duration + Report func(string, error, time.Duration) + Success func(string, int) failedLoads sync.Map } @@ -32,12 +32,12 @@ var _ backend.Backend = &Backend{} // backoff. report is called with a description and the error, if one occurred. // success is called with the number of retries before a successful operation // (it is not called if it succeeded on the first try) -func New(be backend.Backend, maxTries int, report func(string, error, time.Duration), success func(string, int)) *Backend { +func New(be backend.Backend, maxElapsedTime time.Duration, report func(string, error, time.Duration), success func(string, int)) *Backend { return &Backend{ - Backend: be, - MaxTries: maxTries, - Report: report, - Success: success, + Backend: be, + MaxElapsedTime: maxElapsedTime, + Report: report, + Success: success, } } @@ -82,9 +82,15 @@ func (be *Backend) retry(ctx context.Context, msg string, f func() error) error } bo := backoff.NewExponentialBackOff() + bo.MaxElapsedTime = be.MaxElapsedTime + if fastRetries { // speed up integration tests bo.InitialInterval = 1 * time.Millisecond + maxElapsedTime := 200 * time.Millisecond + if bo.MaxElapsedTime > maxElapsedTime { + bo.MaxElapsedTime = maxElapsedTime + } } err := retryNotifyErrorWithSuccess( @@ -97,7 +103,7 @@ func (be *Backend) retry(ctx context.Context, msg string, f func() error) error } return err }, - backoff.WithContext(backoff.WithMaxRetries(bo, uint64(be.MaxTries)), ctx), + backoff.WithContext(bo, ctx), func(err error, d time.Duration) { if be.Report != nil { be.Report(msg, err, d) diff --git a/internal/backend/retry/backend_retry_test.go b/internal/backend/retry/backend_retry_test.go index de86e6cf6af..ce0b99637cc 100644 --- a/internal/backend/retry/backend_retry_test.go +++ b/internal/backend/retry/backend_retry_test.go @@ -193,8 +193,9 @@ func TestBackendListRetryErrorBackend(t *testing.T) { } TestFastRetries(t) - const maxRetries = 2 - retryBackend := New(be, maxRetries, nil, nil) + const maxElapsedTime = 10 * time.Millisecond + now := time.Now() + retryBackend := New(be, maxElapsedTime, nil, nil) var listed []string err := retryBackend.List(context.TODO(), backend.PackFile, func(fi backend.FileInfo) error { @@ -207,8 +208,9 @@ func TestBackendListRetryErrorBackend(t *testing.T) { t.Fatalf("wrong error returned, want %v, got %v", ErrBackendTest, err) } - if retries != maxRetries+1 { - t.Fatalf("List was called %d times, wanted %v", retries, maxRetries+1) + duration := time.Since(now) + if duration > 100*time.Millisecond { + t.Fatalf("list retries took %v, expected at most 10ms", duration) } test.Equals(t, names[:2], listed) @@ -327,7 +329,7 @@ func TestBackendLoadCircuitBreaker(t *testing.T) { // trip the circuit breaker for file "other" err := retryBackend.Load(context.TODO(), backend.Handle{Name: "other"}, 0, 0, nilRd) test.Equals(t, otherError, err, "unexpected error") - test.Equals(t, 3, attempt) + test.Equals(t, 2, attempt) attempt = 0 err = retryBackend.Load(context.TODO(), backend.Handle{Name: "other"}, 0, 0, nilRd) @@ -407,7 +409,7 @@ func TestBackendRetryPermanent(t *testing.T) { return errors.New("something") }) test.Assert(t, !be.IsPermanentErrorFn(err), "error unexpectedly considered permanent %v", err) - test.Equals(t, 3, attempt) + test.Equals(t, 2, attempt) } From 512cd6ef07ee7ee78b36149dd78cba80e6a1aac4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 29 Apr 2024 21:16:24 +0200 Subject: [PATCH 250/893] retry: ensure that there's always at least one retry Previously, if an operation failed after 15 minutes, then it would never be retried. This means that large backend requests are more unreliable than smaller ones. --- internal/backend/retry/backend_retry.go | 26 ++++++++++++- internal/backend/retry/backend_retry_test.go | 40 ++++++++++++++++++++ 2 files changed, 65 insertions(+), 1 deletion(-) diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index fb2e6cf98ff..e40cce1222b 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -68,6 +68,30 @@ func retryNotifyErrorWithSuccess(operation backoff.Operation, b backoff.BackOff, return err } +func withRetryAtLeastOnce(delegate *backoff.ExponentialBackOff) *retryAtLeastOnce { + return &retryAtLeastOnce{delegate: delegate} +} + +type retryAtLeastOnce struct { + delegate *backoff.ExponentialBackOff + numTries uint64 +} + +func (b *retryAtLeastOnce) NextBackOff() time.Duration { + delay := b.delegate.NextBackOff() + + b.numTries++ + if b.numTries == 1 && b.delegate.Stop == delay { + return b.delegate.InitialInterval + } + return delay +} + +func (b *retryAtLeastOnce) Reset() { + b.numTries = 0 + b.delegate.Reset() +} + var fastRetries = false func (be *Backend) retry(ctx context.Context, msg string, f func() error) error { @@ -103,7 +127,7 @@ func (be *Backend) retry(ctx context.Context, msg string, f func() error) error } return err }, - backoff.WithContext(bo, ctx), + backoff.WithContext(withRetryAtLeastOnce(bo), ctx), func(err error, d time.Duration) { if be.Report != nil { be.Report(msg, err, d) diff --git a/internal/backend/retry/backend_retry_test.go b/internal/backend/retry/backend_retry_test.go index ce0b99637cc..cd0c4d48b8a 100644 --- a/internal/backend/retry/backend_retry_test.go +++ b/internal/backend/retry/backend_retry_test.go @@ -520,3 +520,43 @@ func TestNotifyWithSuccessFinalError(t *testing.T) { test.Equals(t, 6, notifyCalled, "notify should have been called 6 times") test.Equals(t, 0, successCalled, "success should not have been called") } + +type testClock struct { + Time time.Time +} + +func (c *testClock) Now() time.Time { + return c.Time +} + +func TestRetryAtLeastOnce(t *testing.T) { + expBackOff := backoff.NewExponentialBackOff() + expBackOff.InitialInterval = 500 * time.Millisecond + expBackOff.RandomizationFactor = 0 + expBackOff.MaxElapsedTime = 5 * time.Second + expBackOff.Multiplier = 2 // guarantee numerical stability + clock := &testClock{Time: time.Now()} + expBackOff.Clock = clock + expBackOff.Reset() + + retry := withRetryAtLeastOnce(expBackOff) + + // expire backoff + clock.Time = clock.Time.Add(10 * time.Second) + delay := retry.NextBackOff() + test.Equals(t, expBackOff.InitialInterval, delay, "must retry at least once") + + delay = retry.NextBackOff() + test.Equals(t, expBackOff.Stop, delay, "must not retry more than once") + + // test reset behavior + retry.Reset() + test.Equals(t, uint64(0), retry.numTries, "numTries should be reset to 0") + + // Verify that after reset, NextBackOff returns the initial interval again + delay = retry.NextBackOff() + test.Equals(t, expBackOff.InitialInterval, delay, "retries must work after reset") + + delay = retry.NextBackOff() + test.Equals(t, expBackOff.InitialInterval*time.Duration(expBackOff.Multiplier), delay, "retries must work after reset") +} From 98709a437250b9da0d15bf5d579c71deac295a5f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 29 Apr 2024 21:19:15 +0200 Subject: [PATCH 251/893] retry: reduce total number of retries Retries in restic try to solve two main problems: - retry a temporarily failed operation - tolerate temporary network interruptions The first problem only requires a few retries, whereas the last one benefits primarily from spreading the requests over a longer duration. Increasing the default multiplier and the initial interval works for both cases. The first few retries only take a few seconds, while later retries quickly reach the maximum interval of one minute. This ensures that the total number of retries issued by restic will remain at around 21 retries for a 15 minute period. As the concurrency in restic is bounded, retries drastically reduce the number of requests sent to a backend. This helps to prevent overloading the backend. --- internal/backend/retry/backend_retry.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index e40cce1222b..7f2b4f745b7 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -108,6 +108,9 @@ func (be *Backend) retry(ctx context.Context, msg string, f func() error) error bo := backoff.NewExponentialBackOff() bo.MaxElapsedTime = be.MaxElapsedTime + bo.InitialInterval = 1 * time.Second + bo.Multiplier = 2 + if fastRetries { // speed up integration tests bo.InitialInterval = 1 * time.Millisecond From b1266867d22fc6a424f1c3d0e31777bdfdd53de4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 29 Apr 2024 21:43:28 +0200 Subject: [PATCH 252/893] repository: wait max 1 minutes for lock removal if context is canceled The toplevel context in restic only canceled if the user interrupts a restic operation. If the network connection has failed this can require waiting the full retry duration of 15 minutes which is a bad user experience for interactive usage. Thus limit the delay to one minute in this case. --- internal/repository/lock.go | 2 +- internal/restic/lock.go | 45 ++++++++++++++++++++++++++++++------ internal/restic/lock_test.go | 24 +++++++++---------- 3 files changed, 51 insertions(+), 20 deletions(-) diff --git a/internal/repository/lock.go b/internal/repository/lock.go index 7035e3c596f..fd46066d1f4 100644 --- a/internal/repository/lock.go +++ b/internal/repository/lock.go @@ -132,7 +132,7 @@ func (l *locker) refreshLocks(ctx context.Context, backend backend.Backend, lock // remove the lock from the repo debug.Log("unlocking repository with lock %v", lock) - if err := lock.Unlock(); err != nil { + if err := lock.Unlock(ctx); err != nil { debug.Log("error while unlocking: %v", err) logger("error while unlocking: %v", err) } diff --git a/internal/restic/lock.go b/internal/restic/lock.go index 1e393c7eda8..49c7cedf22d 100644 --- a/internal/restic/lock.go +++ b/internal/restic/lock.go @@ -17,6 +17,10 @@ import ( "github.com/restic/restic/internal/debug" ) +// UnlockCancelDelay bounds the duration how long lock cleanup operations will wait +// if the passed in context was canceled. +const UnlockCancelDelay time.Duration = 1 * time.Minute + // Lock represents a process locking the repository for an operation. // // There are two types of locks: exclusive and non-exclusive. There may be many @@ -136,7 +140,7 @@ func newLock(ctx context.Context, repo Unpacked, excl bool) (*Lock, error) { time.Sleep(waitBeforeLockCheck) if err = lock.checkForOtherLocks(ctx); err != nil { - _ = lock.Unlock() + _ = lock.Unlock(ctx) return nil, err } @@ -220,12 +224,15 @@ func (l *Lock) createLock(ctx context.Context) (ID, error) { } // Unlock removes the lock from the repository. -func (l *Lock) Unlock() error { +func (l *Lock) Unlock(ctx context.Context) error { if l == nil || l.lockID == nil { return nil } - return l.repo.RemoveUnpacked(context.TODO(), LockFile, *l.lockID) + ctx, cancel := delayedCancelContext(ctx, UnlockCancelDelay) + defer cancel() + + return l.repo.RemoveUnpacked(ctx, LockFile, *l.lockID) } var StaleLockTimeout = 30 * time.Minute @@ -266,6 +273,23 @@ func (l *Lock) Stale() bool { return false } +func delayedCancelContext(parentCtx context.Context, delay time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + select { + case <-parentCtx.Done(): + case <-ctx.Done(): + return + } + + time.Sleep(delay) + cancel() + }() + + return ctx, cancel +} + // Refresh refreshes the lock by creating a new file in the backend with a new // timestamp. Afterwards the old lock is removed. func (l *Lock) Refresh(ctx context.Context) error { @@ -285,7 +309,10 @@ func (l *Lock) Refresh(ctx context.Context) error { oldLockID := l.lockID l.lockID = &id - return l.repo.RemoveUnpacked(context.TODO(), LockFile, *oldLockID) + ctx, cancel := delayedCancelContext(ctx, UnlockCancelDelay) + defer cancel() + + return l.repo.RemoveUnpacked(ctx, LockFile, *oldLockID) } // RefreshStaleLock is an extended variant of Refresh that can also refresh stale lock files. @@ -312,15 +339,19 @@ func (l *Lock) RefreshStaleLock(ctx context.Context) error { time.Sleep(waitBeforeLockCheck) exists, err = l.checkExistence(ctx) + + ctx, cancel := delayedCancelContext(ctx, UnlockCancelDelay) + defer cancel() + if err != nil { // cleanup replacement lock - _ = l.repo.RemoveUnpacked(context.TODO(), LockFile, id) + _ = l.repo.RemoveUnpacked(ctx, LockFile, id) return err } if !exists { // cleanup replacement lock - _ = l.repo.RemoveUnpacked(context.TODO(), LockFile, id) + _ = l.repo.RemoveUnpacked(ctx, LockFile, id) return ErrRemovedLock } @@ -331,7 +362,7 @@ func (l *Lock) RefreshStaleLock(ctx context.Context) error { oldLockID := l.lockID l.lockID = &id - return l.repo.RemoveUnpacked(context.TODO(), LockFile, *oldLockID) + return l.repo.RemoveUnpacked(ctx, LockFile, *oldLockID) } func (l *Lock) checkExistence(ctx context.Context) (bool, error) { diff --git a/internal/restic/lock_test.go b/internal/restic/lock_test.go index b96b11e35ee..606ed210de6 100644 --- a/internal/restic/lock_test.go +++ b/internal/restic/lock_test.go @@ -22,7 +22,7 @@ func TestLock(t *testing.T) { lock, err := restic.NewLock(context.TODO(), repo) rtest.OK(t, err) - rtest.OK(t, lock.Unlock()) + rtest.OK(t, lock.Unlock(context.TODO())) } func TestDoubleUnlock(t *testing.T) { @@ -32,9 +32,9 @@ func TestDoubleUnlock(t *testing.T) { lock, err := restic.NewLock(context.TODO(), repo) rtest.OK(t, err) - rtest.OK(t, lock.Unlock()) + rtest.OK(t, lock.Unlock(context.TODO())) - err = lock.Unlock() + err = lock.Unlock(context.TODO()) rtest.Assert(t, err != nil, "double unlock didn't return an error, got %v", err) } @@ -49,8 +49,8 @@ func TestMultipleLock(t *testing.T) { lock2, err := restic.NewLock(context.TODO(), repo) rtest.OK(t, err) - rtest.OK(t, lock1.Unlock()) - rtest.OK(t, lock2.Unlock()) + rtest.OK(t, lock1.Unlock(context.TODO())) + rtest.OK(t, lock2.Unlock(context.TODO())) } type failLockLoadingBackend struct { @@ -75,7 +75,7 @@ func TestMultipleLockFailure(t *testing.T) { _, err = restic.NewLock(context.TODO(), repo) rtest.Assert(t, err != nil, "unreadable lock file did not result in an error") - rtest.OK(t, lock1.Unlock()) + rtest.OK(t, lock1.Unlock(context.TODO())) } func TestLockExclusive(t *testing.T) { @@ -83,7 +83,7 @@ func TestLockExclusive(t *testing.T) { elock, err := restic.NewExclusiveLock(context.TODO(), repo) rtest.OK(t, err) - rtest.OK(t, elock.Unlock()) + rtest.OK(t, elock.Unlock(context.TODO())) } func TestLockOnExclusiveLockedRepo(t *testing.T) { @@ -99,8 +99,8 @@ func TestLockOnExclusiveLockedRepo(t *testing.T) { rtest.Assert(t, restic.IsAlreadyLocked(err), "create normal lock with exclusively locked repo didn't return the correct error") - rtest.OK(t, lock.Unlock()) - rtest.OK(t, elock.Unlock()) + rtest.OK(t, lock.Unlock(context.TODO())) + rtest.OK(t, elock.Unlock(context.TODO())) } func TestExclusiveLockOnLockedRepo(t *testing.T) { @@ -116,8 +116,8 @@ func TestExclusiveLockOnLockedRepo(t *testing.T) { rtest.Assert(t, restic.IsAlreadyLocked(err), "create normal lock with exclusively locked repo didn't return the correct error") - rtest.OK(t, lock.Unlock()) - rtest.OK(t, elock.Unlock()) + rtest.OK(t, lock.Unlock(context.TODO())) + rtest.OK(t, elock.Unlock(context.TODO())) } func createFakeLock(repo restic.SaverUnpacked, t time.Time, pid int) (restic.ID, error) { @@ -296,7 +296,7 @@ func testLockRefresh(t *testing.T, refresh func(lock *restic.Lock) error) { rtest.OK(t, err) rtest.Assert(t, lock2.Time.After(time0), "expected a later timestamp after lock refresh") - rtest.OK(t, lock.Unlock()) + rtest.OK(t, lock.Unlock(context.TODO())) } func TestLockRefresh(t *testing.T) { From 723247c8e50f92dde4389564956d784ae6760460 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 20 May 2024 20:37:28 +0200 Subject: [PATCH 253/893] add changelog for longer retries --- changelog/unreleased/issue-4627 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/changelog/unreleased/issue-4627 b/changelog/unreleased/issue-4627 index 95c9d4bc401..6f6a00098e6 100644 --- a/changelog/unreleased/issue-4627 +++ b/changelog/unreleased/issue-4627 @@ -9,7 +9,9 @@ downloading are now forcibly interrupted. This ensures that stuck requests are retried after a short timeout. Attempts to access a missing file or a truncated file will no longer be retried. -This avoids unnecessary retries in those cases. +This avoids unnecessary retries in those cases. All other backend requests are +retried for up to 15 minutes. This ensures that a temporarily interrupted network +connections can be tolerated. If a download yields a corrupt file or blob, then the download will be retried once. @@ -26,3 +28,4 @@ https://github.com/restic/restic/issues/4515 https://github.com/restic/restic/issues/1523 https://github.com/restic/restic/pull/4520 https://github.com/restic/restic/pull/4800 +https://github.com/restic/restic/pull/4784 From e4a48085aee84712c23f8357445f6c6c9ded68a1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 24 May 2024 20:16:58 +0200 Subject: [PATCH 254/893] backend/retry: feature flag new retry behavior --- internal/backend/retry/backend_retry.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index 7f2b4f745b7..d5134d43338 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -108,9 +108,10 @@ func (be *Backend) retry(ctx context.Context, msg string, f func() error) error bo := backoff.NewExponentialBackOff() bo.MaxElapsedTime = be.MaxElapsedTime - bo.InitialInterval = 1 * time.Second - bo.Multiplier = 2 - + if feature.Flag.Enabled(feature.BackendErrorRedesign) { + bo.InitialInterval = 1 * time.Second + bo.Multiplier = 2 + } if fastRetries { // speed up integration tests bo.InitialInterval = 1 * time.Millisecond @@ -120,6 +121,12 @@ func (be *Backend) retry(ctx context.Context, msg string, f func() error) error } } + var b backoff.BackOff = withRetryAtLeastOnce(bo) + if !feature.Flag.Enabled(feature.BackendErrorRedesign) { + // deprecated behavior + b = backoff.WithMaxRetries(b, 10) + } + err := retryNotifyErrorWithSuccess( func() error { err := f() @@ -130,7 +137,7 @@ func (be *Backend) retry(ctx context.Context, msg string, f func() error) error } return err }, - backoff.WithContext(withRetryAtLeastOnce(bo), ctx), + backoff.WithContext(b, ctx), func(err error, d time.Duration) { if be.Report != nil { be.Report(msg, err, d) From d106ad6921823fd3bc2e6dcf9ae9b853f6ab68c8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 18 May 2024 23:28:35 +0200 Subject: [PATCH 255/893] restic: regenerate snapshot keep policy golden test files --- .../restic/testdata/policy_keep_snapshots_36 | 412 ++++++++++---- .../restic/testdata/policy_keep_snapshots_37 | 515 ++++++++++++++---- .../restic/testdata/policy_keep_snapshots_38 | 362 ++++++++---- .../restic/testdata/policy_keep_snapshots_39 | 67 ++- 4 files changed, 1045 insertions(+), 311 deletions(-) diff --git a/internal/restic/testdata/policy_keep_snapshots_36 b/internal/restic/testdata/policy_keep_snapshots_36 index 75a3a5b4645..cce4cf537fb 100644 --- a/internal/restic/testdata/policy_keep_snapshots_36 +++ b/internal/restic/testdata/policy_keep_snapshots_36 @@ -590,7 +590,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -601,7 +603,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -612,7 +616,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -623,7 +629,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -634,7 +642,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -645,7 +655,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -656,7 +668,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -667,7 +681,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -678,7 +694,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -689,7 +707,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -700,7 +720,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -711,7 +733,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -722,7 +746,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -733,7 +759,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -744,7 +772,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -755,7 +785,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -766,7 +798,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -777,7 +811,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -788,7 +824,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -799,7 +837,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -810,7 +850,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -821,7 +863,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -832,7 +876,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -843,7 +889,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -854,7 +902,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -865,7 +915,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -876,7 +928,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -887,7 +941,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -898,7 +954,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -909,7 +967,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -920,7 +980,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -935,7 +997,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -950,7 +1014,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -968,7 +1034,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -979,7 +1047,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -990,7 +1060,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1001,7 +1073,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1012,7 +1086,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1023,7 +1099,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1034,7 +1112,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1045,7 +1125,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1056,7 +1138,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1067,7 +1151,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1078,7 +1164,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1089,7 +1177,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1100,7 +1190,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1111,7 +1203,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1122,7 +1216,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1133,7 +1229,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1144,7 +1242,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1155,7 +1255,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1166,7 +1268,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1177,7 +1281,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1188,7 +1294,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1199,7 +1307,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1210,7 +1320,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1221,7 +1333,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1232,7 +1346,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1243,7 +1359,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1254,7 +1372,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1265,7 +1385,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1276,7 +1398,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1287,7 +1411,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1298,7 +1424,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1309,7 +1437,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1320,7 +1450,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1331,7 +1463,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1346,7 +1480,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1360,7 +1496,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1374,7 +1512,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1388,7 +1528,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1402,7 +1544,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1416,7 +1560,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1430,7 +1576,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1444,7 +1592,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1458,7 +1608,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1472,7 +1624,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1486,7 +1640,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1500,7 +1656,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1514,7 +1672,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1528,7 +1688,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1542,7 +1704,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1556,7 +1720,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1567,7 +1733,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1578,7 +1746,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1589,7 +1759,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1600,7 +1772,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1611,7 +1785,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1622,7 +1798,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1633,7 +1811,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1644,7 +1824,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1655,7 +1837,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1666,7 +1850,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1677,7 +1863,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1688,7 +1876,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1699,7 +1889,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1710,7 +1902,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1721,7 +1915,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1732,7 +1928,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1743,7 +1941,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1754,7 +1954,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1765,7 +1967,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } }, { "snapshot": { @@ -1776,7 +1980,9 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1} + "counters": { + "last": -1 + } } ] } \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_37 b/internal/restic/testdata/policy_keep_snapshots_37 index f6ffa40ea06..9856a83d69b 100644 --- a/internal/restic/testdata/policy_keep_snapshots_37 +++ b/internal/restic/testdata/policy_keep_snapshots_37 @@ -591,7 +591,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -603,7 +606,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -614,7 +620,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -626,7 +635,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -638,7 +650,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -650,7 +665,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -662,7 +680,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -674,7 +695,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -686,7 +710,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -698,7 +725,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -709,7 +739,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -720,7 +753,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -731,7 +767,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -743,7 +782,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -755,7 +797,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -767,7 +812,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -779,7 +827,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -791,7 +842,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -802,7 +856,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -814,7 +871,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -826,7 +886,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -838,7 +901,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -850,7 +916,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -862,7 +931,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -874,7 +946,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -885,7 +960,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -897,7 +975,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -909,7 +990,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -921,7 +1005,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -933,7 +1020,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -944,7 +1034,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -959,7 +1052,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -974,7 +1070,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -992,7 +1091,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1004,7 +1106,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1016,7 +1121,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1028,7 +1136,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1040,7 +1151,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1052,7 +1166,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1064,7 +1181,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1076,7 +1196,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1088,7 +1211,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1100,7 +1226,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1112,7 +1241,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1124,7 +1256,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1136,7 +1271,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1148,7 +1286,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1160,7 +1301,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1172,7 +1316,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1184,7 +1331,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1196,7 +1346,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1208,7 +1361,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1220,7 +1376,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1232,7 +1391,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1244,7 +1406,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1256,7 +1421,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1268,7 +1436,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1280,7 +1451,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1292,7 +1466,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1303,7 +1480,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1315,7 +1495,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1327,7 +1510,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1339,7 +1525,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1351,7 +1540,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1363,7 +1555,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1375,7 +1570,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1387,7 +1585,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1403,7 +1604,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1418,7 +1622,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1432,7 +1639,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1447,7 +1657,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1462,7 +1675,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1477,7 +1693,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1492,7 +1711,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1507,7 +1729,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1522,7 +1747,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1537,7 +1765,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1552,7 +1783,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1567,7 +1801,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1582,7 +1819,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1597,7 +1837,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1612,7 +1855,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1627,7 +1873,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1639,7 +1888,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1651,7 +1903,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1663,7 +1918,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1675,7 +1933,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1687,7 +1948,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1699,7 +1963,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1711,7 +1978,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1723,7 +1993,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1735,7 +2008,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1747,7 +2023,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1759,7 +2038,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1771,7 +2053,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1783,7 +2068,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1795,7 +2083,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1807,7 +2098,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1819,7 +2113,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1830,7 +2127,10 @@ "matches": [ "last snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1842,7 +2142,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1854,7 +2157,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } }, { "snapshot": { @@ -1866,7 +2172,10 @@ "last snapshot", "hourly snapshot" ], - "counters": {"Last": -1, "Hourly": -1} + "counters": { + "last": -1, + "hourly": -1 + } } ] } \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_38 b/internal/restic/testdata/policy_keep_snapshots_38 index 6bfdd57f181..f5d7136d421 100644 --- a/internal/restic/testdata/policy_keep_snapshots_38 +++ b/internal/restic/testdata/policy_keep_snapshots_38 @@ -507,7 +507,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -518,7 +520,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -529,7 +533,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -540,7 +546,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -551,7 +559,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -562,7 +572,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -573,7 +585,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -584,7 +598,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -595,7 +611,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -606,7 +624,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -617,7 +637,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -628,7 +650,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -639,7 +663,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -650,7 +676,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -661,7 +689,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -672,7 +702,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -683,7 +715,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -694,7 +728,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -705,7 +741,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -716,7 +754,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -727,7 +767,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -738,7 +780,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -749,7 +793,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -760,7 +806,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -771,7 +819,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -782,7 +832,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -793,7 +845,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -804,7 +858,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -815,7 +871,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -826,7 +884,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -837,7 +897,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -848,7 +910,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -859,7 +923,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -870,7 +936,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -881,7 +949,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -892,7 +962,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -903,7 +975,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -914,7 +988,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -925,7 +1001,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -936,7 +1014,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -947,7 +1027,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -958,7 +1040,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -969,7 +1053,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -980,7 +1066,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -991,7 +1079,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1002,7 +1092,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1013,7 +1105,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1024,7 +1118,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1035,7 +1131,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1046,7 +1144,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1057,7 +1157,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1068,7 +1170,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1079,7 +1183,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1090,7 +1196,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1101,7 +1209,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1112,7 +1222,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1127,7 +1239,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1141,7 +1255,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1155,7 +1271,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1169,7 +1287,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1183,7 +1303,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1197,7 +1319,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1211,7 +1335,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1225,7 +1351,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1239,7 +1367,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1253,7 +1383,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1267,7 +1399,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1281,7 +1415,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1295,7 +1431,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1309,7 +1447,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1323,7 +1463,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1334,7 +1476,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1345,7 +1489,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1356,7 +1502,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1367,7 +1515,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1378,7 +1528,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1389,7 +1541,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1400,7 +1554,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1411,7 +1567,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1422,7 +1580,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1433,7 +1593,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1444,7 +1606,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1455,7 +1619,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1466,7 +1632,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1477,7 +1645,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1488,7 +1658,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1499,7 +1671,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1510,7 +1684,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1521,7 +1697,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } }, { "snapshot": { @@ -1532,7 +1710,9 @@ "matches": [ "hourly snapshot" ], - "counters": {"Hourly": -1} + "counters": { + "hourly": -1 + } } ] -} +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_39 b/internal/restic/testdata/policy_keep_snapshots_39 index 4b111503bc1..f5fb4b1bf6e 100644 --- a/internal/restic/testdata/policy_keep_snapshots_39 +++ b/internal/restic/testdata/policy_keep_snapshots_39 @@ -74,10 +74,15 @@ "matches": [ "daily snapshot", "weekly snapshot", - "monthly snapshot", - "yearly snapshot" + "monthly snapshot", + "yearly snapshot" ], - "counters": {"Daily": 2, "Weekly": 1, "Monthly": -1, "Yearly": -1} + "counters": { + "daily": 2, + "weekly": 1, + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -89,7 +94,11 @@ "daily snapshot", "weekly snapshot" ], - "counters": {"Daily": 1, "Monthly": -1, "Yearly": -1} + "counters": { + "daily": 1, + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -100,7 +109,10 @@ "matches": [ "daily snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -112,7 +124,10 @@ "monthly snapshot", "yearly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -123,7 +138,10 @@ "matches": [ "monthly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -134,7 +152,10 @@ "matches": [ "monthly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -145,7 +166,10 @@ "matches": [ "monthly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -157,7 +181,10 @@ "monthly snapshot", "yearly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -171,7 +198,10 @@ "matches": [ "monthly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -182,7 +212,10 @@ "matches": [ "monthly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -193,7 +226,10 @@ "matches": [ "monthly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } }, { "snapshot": { @@ -205,7 +241,10 @@ "monthly snapshot", "yearly snapshot" ], - "counters": {"Monthly": -1, "Yearly": -1} + "counters": { + "monthly": -1, + "yearly": -1 + } } ] } \ No newline at end of file From c0e1f36830bf9eda7bd1e660270ebc388e590176 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 22:44:32 +0200 Subject: [PATCH 256/893] forget: refuse deleting the last snapshot in a snapshot group `--keep-tag invalid-tag` was previously able to wipe all snapshots in a repository. As a user specified a `--keep-*` option this is likely unintentional. This forbid deleting all snapshot if a `--keep-*` option was specified to prevent data loss. (Not specifying such an option currently also causes the command to abort) --- cmd/restic/cmd_forget.go | 5 +++++ internal/restic/snapshot_find.go | 4 ++-- internal/restic/snapshot_group.go | 14 ++++++++++++++ 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 92eeed4a174..6d04d0f704e 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -3,6 +3,7 @@ package main import ( "context" "encoding/json" + "fmt" "io" "strconv" @@ -248,6 +249,10 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy) + if !policy.Empty() && len(keep) == 0 { + return fmt.Errorf("refusing to delete last snapshot of snapshot group %v", key) + } + if len(keep) != 0 && !gopts.Quiet && !gopts.JSON { printer.P("keep %d snapshots:\n", len(keep)) PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact) diff --git a/internal/restic/snapshot_find.go b/internal/restic/snapshot_find.go index cb761aee35b..6d1ab9a7a22 100644 --- a/internal/restic/snapshot_find.go +++ b/internal/restic/snapshot_find.go @@ -24,7 +24,7 @@ type SnapshotFilter struct { TimestampLimit time.Time } -func (f *SnapshotFilter) empty() bool { +func (f *SnapshotFilter) Empty() bool { return len(f.Hosts)+len(f.Tags)+len(f.Paths) == 0 } @@ -173,7 +173,7 @@ func (f *SnapshotFilter) FindAll(ctx context.Context, be Lister, loader LoaderUn } // Give the user some indication their filters are not used. - if !usedFilter && !f.empty() { + if !usedFilter && !f.Empty() { return fn("filters", nil, errors.Errorf("explicit snapshot ids are given")) } return nil diff --git a/internal/restic/snapshot_group.go b/internal/restic/snapshot_group.go index 964a230b3af..f4e1ed3843d 100644 --- a/internal/restic/snapshot_group.go +++ b/internal/restic/snapshot_group.go @@ -66,6 +66,20 @@ type SnapshotGroupKey struct { Tags []string `json:"tags"` } +func (s *SnapshotGroupKey) String() string { + var parts []string + if s.Hostname != "" { + parts = append(parts, fmt.Sprintf("host %v", s.Hostname)) + } + if len(s.Paths) != 0 { + parts = append(parts, fmt.Sprintf("path %v", s.Paths)) + } + if len(s.Tags) != 0 { + parts = append(parts, fmt.Sprintf("tags %v", s.Tags)) + } + return strings.Join(parts, ", ") +} + // GroupSnapshots takes a list of snapshots and a grouping criteria and creates // a grouped list of snapshots. func GroupSnapshots(snapshots Snapshots, groupBy SnapshotGroupByOptions) (map[string]Snapshots, bool, error) { From 5b7952e426b21358557669fcdc7960e011fc5416 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 22:46:49 +0200 Subject: [PATCH 257/893] forget: return error if no policy was specified --- cmd/restic/cmd_forget.go | 75 +++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 39 deletions(-) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 6d04d0f704e..e568bf11d1b 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -222,58 +222,55 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption Tags: opts.KeepTags, } - if policy.Empty() && len(args) == 0 { - printer.P("no policy was specified, no snapshots will be removed\n") + if policy.Empty() { + return errors.Fatal("no policy was specified, no snapshots will be removed") } - if !policy.Empty() { - printer.P("Applying Policy: %v\n", policy) + printer.P("Applying Policy: %v\n", policy) - for k, snapshotGroup := range snapshotGroups { - if gopts.Verbose >= 1 && !gopts.JSON { - err = PrintSnapshotGroupHeader(globalOptions.stdout, k) - if err != nil { - return err - } - } - - var key restic.SnapshotGroupKey - if json.Unmarshal([]byte(k), &key) != nil { + for k, snapshotGroup := range snapshotGroups { + if gopts.Verbose >= 1 && !gopts.JSON { + err = PrintSnapshotGroupHeader(globalOptions.stdout, k) + if err != nil { return err } + } - var fg ForgetGroup - fg.Tags = key.Tags - fg.Host = key.Hostname - fg.Paths = key.Paths + var key restic.SnapshotGroupKey + if json.Unmarshal([]byte(k), &key) != nil { + return err + } - keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy) + var fg ForgetGroup + fg.Tags = key.Tags + fg.Host = key.Hostname + fg.Paths = key.Paths - if !policy.Empty() && len(keep) == 0 { - return fmt.Errorf("refusing to delete last snapshot of snapshot group %v", key) - } + keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy) - if len(keep) != 0 && !gopts.Quiet && !gopts.JSON { - printer.P("keep %d snapshots:\n", len(keep)) - PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact) - printer.P("\n") - } - fg.Keep = asJSONSnapshots(keep) + if !policy.Empty() && len(keep) == 0 { + return fmt.Errorf("refusing to delete last snapshot of snapshot group \"%v\"", key.String()) + } + if len(keep) != 0 && !gopts.Quiet && !gopts.JSON { + printer.P("keep %d snapshots:\n", len(keep)) + PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact) + printer.P("\n") + } + fg.Keep = asJSONSnapshots(keep) - if len(remove) != 0 && !gopts.Quiet && !gopts.JSON { - printer.P("remove %d snapshots:\n", len(remove)) - PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact) - printer.P("\n") - } - fg.Remove = asJSONSnapshots(remove) + if len(remove) != 0 && !gopts.Quiet && !gopts.JSON { + printer.P("remove %d snapshots:\n", len(remove)) + PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact) + printer.P("\n") + } + fg.Remove = asJSONSnapshots(remove) - fg.Reasons = asJSONKeeps(reasons) + fg.Reasons = asJSONKeeps(reasons) - jsonGroups = append(jsonGroups, &fg) + jsonGroups = append(jsonGroups, &fg) - for _, sn := range remove { - removeSnIDs.Insert(*sn.ID()) - } + for _, sn := range remove { + removeSnIDs.Insert(*sn.ID()) } } } From 57f9739573435af041a2b85a2e05c5ebfc931784 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 15 Apr 2024 22:02:14 +0200 Subject: [PATCH 258/893] forget: Add --unsafe-allow-remove-all option To prevent accidentally wiping all snapshots from a repository, that option can only be used if either a snapshot filter or a keep policy is specified. Essentially, the option allows `forget --tag something --unsafe-allow-remove-all` calls to remove all snapshots with a specific tag. --- cmd/restic/cmd_forget.go | 12 +- internal/restic/snapshot_policy.go | 16 +- .../restic/testdata/policy_keep_snapshots_0 | 1781 +---------------- 3 files changed, 17 insertions(+), 1792 deletions(-) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index e568bf11d1b..328b2827124 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -92,6 +92,8 @@ type ForgetOptions struct { WithinYearly restic.Duration KeepTags restic.TagLists + UnsafeAllowRemoveAll bool + restic.SnapshotFilter Compact bool @@ -121,6 +123,7 @@ func init() { f.VarP(&forgetOptions.WithinMonthly, "keep-within-monthly", "", "keep monthly snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot") f.VarP(&forgetOptions.WithinYearly, "keep-within-yearly", "", "keep yearly snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot") f.Var(&forgetOptions.KeepTags, "keep-tag", "keep snapshots with this `taglist` (can be specified multiple times)") + f.BoolVar(&forgetOptions.UnsafeAllowRemoveAll, "unsafe-allow-remove-all", false, "allow deleting all snapshots of a snapshot group") initMultiSnapshotFilter(f, &forgetOptions.SnapshotFilter, false) f.StringArrayVar(&forgetOptions.Hosts, "hostname", nil, "only consider snapshots with the given `hostname` (can be specified multiple times)") @@ -223,7 +226,14 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } if policy.Empty() { - return errors.Fatal("no policy was specified, no snapshots will be removed") + if opts.UnsafeAllowRemoveAll { + if opts.SnapshotFilter.Empty() { + return errors.Fatal("--unsafe-allow-remove-all is not allowed unless a snapshot filter option is specified") + } + // UnsafeAllowRemoveAll together with snapshot filter is fine + } else { + return errors.Fatal("no policy was specified, no snapshots will be removed") + } } printer.P("Applying Policy: %v\n", policy) diff --git a/internal/restic/snapshot_policy.go b/internal/restic/snapshot_policy.go index 0ff0c5ec829..950c26c9182 100644 --- a/internal/restic/snapshot_policy.go +++ b/internal/restic/snapshot_policy.go @@ -94,7 +94,11 @@ func (e ExpirePolicy) String() (s string) { s += fmt.Sprintf("all snapshots within %s of the newest", e.Within) } - s = "keep " + s + if s == "" { + s = "remove" + } else { + s = "keep " + s + } return s } @@ -186,16 +190,6 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reason // sort newest snapshots first sort.Stable(list) - if p.Empty() { - for _, sn := range list { - reasons = append(reasons, KeepReason{ - Snapshot: sn, - Matches: []string{"policy is empty"}, - }) - } - return list, remove, reasons - } - if len(list) == 0 { return list, nil, nil } diff --git a/internal/restic/testdata/policy_keep_snapshots_0 b/internal/restic/testdata/policy_keep_snapshots_0 index 11ca587c8af..96cc25cc7a5 100644 --- a/internal/restic/testdata/policy_keep_snapshots_0 +++ b/internal/restic/testdata/policy_keep_snapshots_0 @@ -1,1782 +1,3 @@ { - "keep": [ - { - "time": "2016-01-18T12:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-12T21:08:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-12T21:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-09T21:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-08T20:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-07T10:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-06T08:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-05T09:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T16:23:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T12:30:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T12:28:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T12:24:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T12:23:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T11:23:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T10:23:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-03T07:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-01T07:08:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-01T01:03:03Z", - "tree": null, - "paths": null - }, - { - "time": "2016-01-01T01:02:03Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-21T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-18T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-15T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-13T10:20:30.1Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-13T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-12T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-10T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-11-08T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo", - "bar" - ] - }, - { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo", - "bar" - ] - }, - { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": [ - "path1", - "path2" - ], - "tags": [ - "foo", - "bar" - ] - }, - { - "time": "2015-10-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-11T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-10T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-09T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-08T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-06T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-05T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-02T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-10-01T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-11T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-10T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-09T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-08T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-06T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-05T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-02T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-09-01T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-21T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-18T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-15T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-13T10:20:30.1Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-13T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-12T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-10T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2015-08-08T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-11-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-11-21T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-11-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-11-18T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-11-15T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo", - "bar" - ] - }, - { - "time": "2014-11-13T10:20:30.1Z", - "tree": null, - "paths": null, - "tags": [ - "bar" - ] - }, - { - "time": "2014-11-13T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-11-12T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-11-10T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-11-08T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-22T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-20T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-11T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-10T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-09T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-08T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-06T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-05T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-02T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-10-01T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - { - "time": "2014-09-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-11T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-10T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-09T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-08T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-06T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-05T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-02T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-09-01T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-22T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-21T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-20T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-18T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-15T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-13T10:20:30.1Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-13T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-12T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-10T10:20:30Z", - "tree": null, - "paths": null - }, - { - "time": "2014-08-08T10:20:30Z", - "tree": null, - "paths": null - } - ], - "reasons": [ - { - "snapshot": { - "time": "2016-01-18T12:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-12T21:08:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-12T21:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-09T21:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-08T20:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-07T10:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-06T08:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-05T09:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T16:23:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T12:30:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T12:28:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T12:24:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T12:23:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T11:23:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-04T10:23:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-03T07:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-01T07:08:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-01T01:03:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2016-01-01T01:02:03Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-21T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-18T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-15T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-13T10:20:30.1Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-13T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-12T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-10T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-11-08T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo", - "bar" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo", - "bar" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-22T10:20:30Z", - "tree": null, - "paths": [ - "path1", - "path2" - ], - "tags": [ - "foo", - "bar" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-11T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-10T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-09T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-08T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-06T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-05T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-02T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-10-01T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-11T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-10T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-09T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-08T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-06T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-05T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-02T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-09-01T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-21T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-18T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-15T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-13T10:20:30.1Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-13T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-12T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-10T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2015-08-08T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-21T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-18T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-15T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo", - "bar" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-13T10:20:30.1Z", - "tree": null, - "paths": null, - "tags": [ - "bar" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-13T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-12T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-10T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-11-08T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-22T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-20T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-11T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-10T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-09T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-08T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-06T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-05T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-02T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-10-01T10:20:30Z", - "tree": null, - "paths": null, - "tags": [ - "foo" - ] - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-11T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-10T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-09T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-08T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-06T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-05T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-02T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-09-01T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-22T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-21T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-20T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-18T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-15T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-13T10:20:30.1Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-13T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-12T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-10T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - }, - { - "snapshot": { - "time": "2014-08-08T10:20:30Z", - "tree": null, - "paths": null - }, - "matches": [ - "policy is empty" - ], - "counters": {} - } - ] + "keep": null } \ No newline at end of file From 82ca0030b7667e46c00cddaa55db0aa66e383d19 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 00:09:12 +0200 Subject: [PATCH 259/893] forget: test --unsafe-allow-remove-all and --keep-tags safety check --- cmd/restic/cmd_forget_integration_test.go | 55 +++++++++++++++++-- cmd/restic/cmd_prune_integration_test.go | 4 +- .../cmd_repair_snapshots_integration_test.go | 4 +- 3 files changed, 55 insertions(+), 8 deletions(-) diff --git a/cmd/restic/cmd_forget_integration_test.go b/cmd/restic/cmd_forget_integration_test.go index e4cdb744e81..06487a84126 100644 --- a/cmd/restic/cmd_forget_integration_test.go +++ b/cmd/restic/cmd_forget_integration_test.go @@ -2,18 +2,65 @@ package main import ( "context" + "path/filepath" + "strings" "testing" + "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/termstatus" ) -func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) { - opts := ForgetOptions{} +func testRunForgetMayFail(gopts GlobalOptions, opts ForgetOptions, args ...string) error { pruneOpts := PruneOptions{ MaxUnused: "5%", } - rtest.OK(t, withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { return runForget(context.TODO(), opts, pruneOpts, gopts, term, args) - })) + }) +} + +func testRunForget(t testing.TB, gopts GlobalOptions, opts ForgetOptions, args ...string) { + rtest.OK(t, testRunForgetMayFail(gopts, opts, args...)) +} + +func TestRunForgetSafetyNet(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + + opts := BackupOptions{ + Host: "example", + } + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) + testListSnapshots(t, env.gopts, 2) + + // --keep-tags invalid + err := testRunForgetMayFail(env.gopts, ForgetOptions{ + KeepTags: restic.TagLists{restic.TagList{"invalid"}}, + GroupBy: restic.SnapshotGroupByOptions{Host: true, Path: true}, + }) + rtest.Assert(t, strings.Contains(err.Error(), `refusing to delete last snapshot of snapshot group "host example, path`), "wrong error message got %v", err) + + // disallow `forget --unsafe-allow-remove-all` + err = testRunForgetMayFail(env.gopts, ForgetOptions{ + UnsafeAllowRemoveAll: true, + }) + rtest.Assert(t, strings.Contains(err.Error(), `--unsafe-allow-remove-all is not allowed unless a snapshot filter option is specified`), "wrong error message got %v", err) + + // disallow `forget` without options + err = testRunForgetMayFail(env.gopts, ForgetOptions{}) + rtest.Assert(t, strings.Contains(err.Error(), `no policy was specified, no snapshots will be removed`), "wrong error message got %v", err) + + // `forget --host example --unsafe-allow-remmove-all` should work + testRunForget(t, env.gopts, ForgetOptions{ + UnsafeAllowRemoveAll: true, + GroupBy: restic.SnapshotGroupByOptions{Host: true, Path: true}, + SnapshotFilter: restic.SnapshotFilter{ + Hosts: []string{opts.Host}, + }, + }) + testListSnapshots(t, env.gopts, 0) } diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index 715adea9a6f..5eb16a2ea0e 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -75,7 +75,7 @@ func createPrunableRepo(t *testing.T, env *testEnvironment) { testListSnapshots(t, env.gopts, 3) testRunForgetJSON(t, env.gopts) - testRunForget(t, env.gopts, firstSnapshot.String()) + testRunForget(t, env.gopts, ForgetOptions{}, firstSnapshot.String()) } func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) { @@ -129,7 +129,7 @@ func TestPruneWithDamagedRepository(t *testing.T) { // create and delete snapshot to create unused blobs testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) firstSnapshot := testListSnapshots(t, env.gopts, 1)[0] - testRunForget(t, env.gopts, firstSnapshot.String()) + testRunForget(t, env.gopts, ForgetOptions{}, firstSnapshot.String()) oldPacks := listPacks(env.gopts, t) diff --git a/cmd/restic/cmd_repair_snapshots_integration_test.go b/cmd/restic/cmd_repair_snapshots_integration_test.go index 34cd186d3ff..9f65c9328a8 100644 --- a/cmd/restic/cmd_repair_snapshots_integration_test.go +++ b/cmd/restic/cmd_repair_snapshots_integration_test.go @@ -62,7 +62,7 @@ func TestRepairSnapshotsWithLostData(t *testing.T) { testRunCheckMustFail(t, env.gopts) // repository must be ok after removing the broken snapshots - testRunForget(t, env.gopts, snapshotIDs[0].String(), snapshotIDs[1].String()) + testRunForget(t, env.gopts, ForgetOptions{}, snapshotIDs[0].String(), snapshotIDs[1].String()) testListSnapshots(t, env.gopts, 2) _, err := testRunCheckOutput(env.gopts, false) rtest.OK(t, err) @@ -86,7 +86,7 @@ func TestRepairSnapshotsWithLostTree(t *testing.T) { // remove tree for foo/bar and the now completely broken first snapshot removePacks(env.gopts, t, restic.NewIDSet(oldPacks...)) - testRunForget(t, env.gopts, oldSnapshot[0].String()) + testRunForget(t, env.gopts, ForgetOptions{}, oldSnapshot[0].String()) testRunCheckMustFail(t, env.gopts) // repair From 3f468088983177d3b87b009ddd8c489a91a3ffc6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 00:26:12 +0200 Subject: [PATCH 260/893] add forget safety net changelogs --- changelog/unreleased/issue-4568 | 13 +++++++++++++ changelog/unreleased/pull-4764 | 8 ++++++++ 2 files changed, 21 insertions(+) create mode 100644 changelog/unreleased/issue-4568 create mode 100644 changelog/unreleased/pull-4764 diff --git a/changelog/unreleased/issue-4568 b/changelog/unreleased/issue-4568 new file mode 100644 index 00000000000..4d44809c5e7 --- /dev/null +++ b/changelog/unreleased/issue-4568 @@ -0,0 +1,13 @@ +Bugfix: Prevent `forget --keep-tags invalid` from deleting all snapshots + +Running `forget --keep-tags invalid`, where the tag `invalid` does not +exist in the repository, would remove all snapshots. This is especially +problematic if the tag name contains a typo. + +The `forget` command now fails with an error if all snapshots in a snapshot +group would be deleted. This prevents the above example from deleting all +snapshots. + +https://github.com/restic/restic/issues/4568 +https://github.com/restic/restic/pull/4764 +https://forum.restic.net/t/delete-all-snapshots-in-one-command-is-this-feature-intentional/6923/3 diff --git a/changelog/unreleased/pull-4764 b/changelog/unreleased/pull-4764 new file mode 100644 index 00000000000..61b2edac939 --- /dev/null +++ b/changelog/unreleased/pull-4764 @@ -0,0 +1,8 @@ +Enhancement: Remove all snapshots using `forget --unsafe-allow-remove-all` + +The forget command now supports the `--unsafe-allow-remove-all` option. It must +always be combined with a snapshot filter (by host, path or tag). +For example the command `forget --tag example --unsafe-allow-remove-all`, +removes all snapshots with tag `example`. + +https://github.com/restic/restic/pull/4764 From 7948912b311cc3a304fa6c804cb2fa4f24c9e709 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 00:34:18 +0200 Subject: [PATCH 261/893] document --unsafe-allow-remove-all option --- doc/060_forget.rst | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/doc/060_forget.rst b/doc/060_forget.rst index 113098d0c36..b8d206c97c1 100644 --- a/doc/060_forget.rst +++ b/doc/060_forget.rst @@ -182,7 +182,9 @@ The ``forget`` command accepts the following policy options: - ``--keep-yearly n`` for the last ``n`` years which have one or more snapshots, keep only the most recent one for each year. - ``--keep-tag`` keep all snapshots which have all tags specified by - this option (can be specified multiple times). + this option (can be specified multiple times). The ``forget`` command will + exit with an error if all snapshots in a snapshot group would be removed + as none of them have the specified tags. - ``--keep-within duration`` keep all snapshots having a timestamp within the specified duration of the latest snapshot, where ``duration`` is a number of years, months, days, and hours. E.g. ``2y5m7d3h`` will keep all @@ -336,12 +338,23 @@ year and yearly for the last 75 years, you can instead specify ``forget --keep-within-yearly 75y`` (note that `1w` is not a recognized duration, so you will have to specify `7d` instead). + +Removing all snapshots +====================== + For safety reasons, restic refuses to act on an "empty" policy. For example, if one were to specify ``--keep-last 0`` to forget *all* snapshots in the repository, restic will respond that no snapshots will be removed. To delete all snapshots, use ``--keep-last 1`` and then finally remove the last snapshot manually (by passing the ID to ``forget``). +Since restic 0.17.0, it is possible to delete all snapshots for a specific +host, tag or path using the ``--unsafe-allow-remove-all`` option. The option +must always be combined with a snapshot filter (by host, path or tag). +For example the command ``forget --tag example --unsafe-allow-remove-all`` +removes all snapshots with tag ``example``. + + Security considerations in append-only mode =========================================== From 3cc6827f09f75f94904cbfeb3f1497af6510a56a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 24 May 2024 20:41:19 +0200 Subject: [PATCH 262/893] forget: add feature flag for safe --keep-tags behavior --- changelog/unreleased/issue-4568 | 4 ++++ cmd/restic/cmd_forget.go | 3 ++- internal/feature/registry.go | 2 ++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/changelog/unreleased/issue-4568 b/changelog/unreleased/issue-4568 index 4d44809c5e7..fedf373eae0 100644 --- a/changelog/unreleased/issue-4568 +++ b/changelog/unreleased/issue-4568 @@ -8,6 +8,10 @@ The `forget` command now fails with an error if all snapshots in a snapshot group would be deleted. This prevents the above example from deleting all snapshots. +It is possible to temporarily disable the new check by setting the environment variable +`RESTIC_FEATURES=safe-forget-keep-tags=false`. Note that this feature flag +will be removed in the next minor restic version. + https://github.com/restic/restic/issues/4568 https://github.com/restic/restic/pull/4764 https://forum.restic.net/t/delete-all-snapshots-in-one-command-is-this-feature-intentional/6923/3 diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 328b2827124..9c40b1d093d 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -8,6 +8,7 @@ import ( "strconv" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" @@ -258,7 +259,7 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy) - if !policy.Empty() && len(keep) == 0 { + if feature.Flag.Enabled(feature.SafeForgetKeepTags) && !policy.Empty() && len(keep) == 0 { return fmt.Errorf("refusing to delete last snapshot of snapshot group \"%v\"", key.String()) } if len(keep) != 0 && !gopts.Quiet && !gopts.JSON { diff --git a/internal/feature/registry.go b/internal/feature/registry.go index ac410514028..74d8a2f6132 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -9,6 +9,7 @@ const ( DeprecateLegacyIndex FlagName = "deprecate-legacy-index" DeprecateS3LegacyLayout FlagName = "deprecate-s3-legacy-layout" DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" + SafeForgetKeepTags FlagName = "safe-forget-keep-tags" ) func init() { @@ -17,5 +18,6 @@ func init() { DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, DeprecateS3LegacyLayout: {Type: Beta, Description: "disable support for S3 legacy layout used up to restic 0.7.0. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your S3 repository if necessary."}, DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, + SafeForgetKeepTags: {Type: Beta, Description: "prevent deleting all snapshots if the tag passed to `forget --keep-tags tagname` does not exist"}, }) } From 4df887406ff42320ca5d157e66847c3fba36ae5b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 12:41:56 +0200 Subject: [PATCH 263/893] repository: inline MasterIndex interface into Repository interface --- cmd/restic/cmd_cat.go | 2 +- cmd/restic/cmd_copy.go | 6 ++-- cmd/restic/cmd_debug.go | 2 +- cmd/restic/cmd_find.go | 6 ++-- cmd/restic/cmd_recover.go | 2 +- cmd/restic/cmd_stats.go | 4 +-- cmd/restic/integration_helpers_test.go | 4 +-- internal/archiver/archiver.go | 4 +-- internal/checker/checker.go | 24 +++++++------- internal/index/master_index_test.go | 2 +- internal/pack/pack.go | 4 +-- internal/repository/check.go | 3 +- internal/repository/prune.go | 19 ++++++----- internal/repository/repack.go | 2 +- internal/repository/repack_test.go | 13 +++----- internal/repository/repair_index.go | 4 +-- internal/repository/repair_pack.go | 2 +- internal/repository/repair_pack_test.go | 6 ++-- internal/repository/repository.go | 33 ++++++++++++++----- internal/repository/repository_test.go | 4 +-- internal/restic/repository.go | 43 ++++++++++++++----------- internal/restorer/restorer.go | 2 +- 22 files changed, 103 insertions(+), 88 deletions(-) diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index 8d11a9dc429..e776b67a14d 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -168,7 +168,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error { for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { bh := restic.BlobHandle{ID: id, Type: t} - if !repo.Index().Has(bh) { + if !repo.HasBlob(bh) { continue } diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index ad6c58a2526..26b16a374c5 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -187,7 +187,7 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep packList := restic.NewIDSet() enqueue := func(h restic.BlobHandle) { - pb := srcRepo.Index().Lookup(h) + pb := srcRepo.LookupBlob(h) copyBlobs.Insert(h) for _, p := range pb { packList.Insert(p.PackID) @@ -202,7 +202,7 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep // Do we already have this tree blob? treeHandle := restic.BlobHandle{ID: tree.ID, Type: restic.TreeBlob} - if !dstRepo.Index().Has(treeHandle) { + if !dstRepo.HasBlob(treeHandle) { // copy raw tree bytes to avoid problems if the serialization changes enqueue(treeHandle) } @@ -212,7 +212,7 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep // Copy the blobs for this file. for _, blobID := range entry.Content { h := restic.BlobHandle{Type: restic.DataBlob, ID: blobID} - if !dstRepo.Index().Has(h) { + if !dstRepo.HasBlob(h) { enqueue(h) } } diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 9fb6969d03b..7b0cdb53e1c 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -492,7 +492,7 @@ func examinePack(ctx context.Context, opts DebugExamineOptions, repo restic.Repo blobsLoaded := false // examine all data the indexes have for the pack file - for b := range repo.Index().ListPacks(ctx, restic.NewIDSet(id)) { + for b := range repo.ListPacksFromIndex(ctx, restic.NewIDSet(id)) { blobs := b.Blobs if len(blobs) == 0 { continue diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 81df0ab9882..7ad8886c836 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -465,7 +465,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc // remember which packs were found in the index indexPackIDs := make(map[string]struct{}) - err := f.repo.Index().Each(wctx, func(pb restic.PackedBlob) { + err := f.repo.ListBlobs(wctx, func(pb restic.PackedBlob) { idStr := pb.PackID.String() // keep entry in packIDs as Each() returns individual index entries matchingID := false @@ -503,15 +503,13 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc } func (f *Finder) findObjectPack(id string, t restic.BlobType) { - idx := f.repo.Index() - rid, err := restic.ParseID(id) if err != nil { Printf("Note: cannot find pack for object '%s', unable to parse ID: %v\n", id, err) return } - blobs := idx.Lookup(restic.BlobHandle{ID: rid, Type: t}) + blobs := f.repo.LookupBlob(restic.BlobHandle{ID: rid, Type: t}) if len(blobs) == 0 { Printf("Object %s not found in the index\n", rid.Str()) return diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index debaa4e5b5f..726f1bf652c 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -61,7 +61,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { // tree. If it is not referenced, we have a root tree. trees := make(map[restic.ID]bool) - err = repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err = repo.ListBlobs(ctx, func(blob restic.PackedBlob) { if blob.Type == restic.TreeBlob { trees[blob.Blob.ID] = false } diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index a7891e5b036..3bec18f4cf5 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -124,7 +124,7 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args if opts.countMode == countModeRawData { // the blob handles have been collected, but not yet counted for blobHandle := range stats.blobs { - pbs := repo.Index().Lookup(blobHandle) + pbs := repo.LookupBlob(blobHandle) if len(pbs) == 0 { return fmt.Errorf("blob %v not found", blobHandle) } @@ -378,7 +378,7 @@ func statsDebugBlobs(ctx context.Context, repo restic.Repository) ([restic.NumBl hist[i] = newSizeHistogram(2 * chunker.MaxSize) } - err := repo.Index().Each(ctx, func(pb restic.PackedBlob) { + err := repo.ListBlobs(ctx, func(pb restic.PackedBlob) { hist[pb.Type].Add(uint64(pb.Length)) }) diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index 2812eda6d7e..978deab3dda 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -252,7 +252,7 @@ func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet { rtest.OK(t, r.LoadIndex(ctx, nil)) treePacks := restic.NewIDSet() - rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) { + rtest.OK(t, r.ListBlobs(ctx, func(pb restic.PackedBlob) { if pb.Type == restic.TreeBlob { treePacks.Insert(pb.PackID) } @@ -280,7 +280,7 @@ func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, rem rtest.OK(t, r.LoadIndex(ctx, nil)) treePacks := restic.NewIDSet() - rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) { + rtest.OK(t, r.ListBlobs(ctx, func(pb restic.PackedBlob) { if pb.Type == restic.TreeBlob { treePacks.Insert(pb.PackID) } diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index c1f73eea664..10034afa14a 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -276,7 +276,7 @@ func (arch *Archiver) loadSubtree(ctx context.Context, node *restic.Node) (*rest } func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { - if arch.Repo.Index().Has(restic.BlobHandle{ID: id, Type: restic.TreeBlob}) { + if arch.Repo.HasBlob(restic.BlobHandle{ID: id, Type: restic.TreeBlob}) { err = errors.Errorf("tree %v could not be loaded; the repository could be damaged: %v", id, err) } else { err = errors.Errorf("tree %v is not known; the repository could be damaged, run `repair index` to try to repair it", id) @@ -390,7 +390,7 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult { func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool { // check if all blobs are contained in index for _, id := range previous.Content { - if !arch.Repo.Index().Has(restic.BlobHandle{ID: id, Type: restic.DataBlob}) { + if !arch.Repo.HasBlob(restic.BlobHandle{ID: id, Type: restic.DataBlob}) { return false } } diff --git a/internal/checker/checker.go b/internal/checker/checker.go index dc83aef5b33..09b1dd7eb89 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -91,9 +91,9 @@ func (c *Checker) LoadSnapshots(ctx context.Context) error { return err } -func computePackTypes(ctx context.Context, idx restic.MasterIndex) (map[restic.ID]restic.BlobType, error) { +func computePackTypes(ctx context.Context, idx restic.ListBlobser) (map[restic.ID]restic.BlobType, error) { packs := make(map[restic.ID]restic.BlobType) - err := idx.Each(ctx, func(pb restic.PackedBlob) { + err := idx.ListBlobs(ctx, func(pb restic.PackedBlob) { tpe, exists := packs[pb.PackID] if exists { if pb.Type != tpe { @@ -177,12 +177,18 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e return hints, append(errs, err) } + err = c.repo.SetIndex(c.masterIndex) + if err != nil { + debug.Log("SetIndex returned error: %v", err) + errs = append(errs, err) + } + // compute pack size using index entries - c.packs, err = pack.Size(ctx, c.masterIndex, false) + c.packs, err = pack.Size(ctx, c.repo, false) if err != nil { return hints, append(errs, err) } - packTypes, err := computePackTypes(ctx, c.masterIndex) + packTypes, err := computePackTypes(ctx, c.repo) if err != nil { return hints, append(errs, err) } @@ -203,12 +209,6 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e } } - err = c.repo.SetIndex(c.masterIndex) - if err != nil { - debug.Log("SetIndex returned error: %v", err) - errs = append(errs, err) - } - return hints, errs } @@ -488,7 +488,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles, er ctx, cancel := context.WithCancel(ctx) defer cancel() - err = c.repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err = c.repo.ListBlobs(ctx, func(blob restic.PackedBlob) { h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} if !c.blobRefs.M.Has(h) { debug.Log("blob %v not referenced", h) @@ -573,7 +573,7 @@ func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *p } // push packs to ch - for pbs := range c.repo.Index().ListPacks(ctx, packSet) { + for pbs := range c.repo.ListPacksFromIndex(ctx, packSet) { size := packs[pbs.PackID] debug.Log("listed %v", pbs.PackID) select { diff --git a/internal/index/master_index_test.go b/internal/index/master_index_test.go index c3560a7fb3a..36a028768d5 100644 --- a/internal/index/master_index_test.go +++ b/internal/index/master_index_test.go @@ -362,7 +362,7 @@ func testIndexSave(t *testing.T, version uint) { t.Fatal(err) } - err = repo.Index().Save(context.TODO(), repo, nil, nil, restic.MasterIndexSaveOpts{}) + err = repo.SaveIndex(context.TODO(), nil, nil, restic.MasterIndexSaveOpts{}) if err != nil { t.Fatalf("unable to save new index: %v", err) } diff --git a/internal/pack/pack.go b/internal/pack/pack.go index 7d8d87e71d6..57957ce91a4 100644 --- a/internal/pack/pack.go +++ b/internal/pack/pack.go @@ -389,10 +389,10 @@ func CalculateHeaderSize(blobs []restic.Blob) int { // If onlyHdr is set to true, only the size of the header is returned // Note that this function only gives correct sizes, if there are no // duplicates in the index. -func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) (map[restic.ID]int64, error) { +func Size(ctx context.Context, mi restic.ListBlobser, onlyHdr bool) (map[restic.ID]int64, error) { packSize := make(map[restic.ID]int64) - err := mi.Each(ctx, func(blob restic.PackedBlob) { + err := mi.ListBlobs(ctx, func(blob restic.PackedBlob) { size, ok := packSize[blob.PackID] if !ok { size = headerSize diff --git a/internal/repository/check.go b/internal/repository/check.go index 8018f490238..05605db8695 100644 --- a/internal/repository/check.go +++ b/internal/repository/check.go @@ -158,11 +158,10 @@ func checkPackInner(ctx context.Context, r *Repository, id restic.ID, blobs []re errs = append(errs, errors.Errorf("pack header size does not match, want %v, got %v", idxHdrSize, hdrSize)) } - idx := r.Index() for _, blob := range blobs { // Check if blob is contained in index and position is correct idxHas := false - for _, pb := range idx.Lookup(blob.BlobHandle) { + for _, pb := range r.LookupBlob(blob.BlobHandle) { if pb.PackID == id && pb.Blob == blob { idxHas = true break diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 479439e6abf..66eab28b289 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -7,7 +7,6 @@ import ( "sort" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" @@ -67,7 +66,7 @@ type PrunePlan struct { removePacks restic.IDSet // packs to remove ignorePacks restic.IDSet // packs to ignore when rebuilding the index - repo restic.Repository + repo *Repository stats PruneStats opts PruneOptions } @@ -89,7 +88,7 @@ type packInfoWithID struct { // PlanPrune selects which files to rewrite and which to delete and which blobs to keep. // Also some summary statistics are returned. -func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (*PrunePlan, error) { +func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (*PrunePlan, error) { var stats PruneStats if opts.UnsafeRecovery { @@ -109,7 +108,7 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g } printer.P("searching used packs...\n") - keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer) + keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo, usedBlobs, &stats, printer) if err != nil { return nil, err } @@ -124,7 +123,7 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g blobCount := keepBlobs.Len() // when repacking, we do not want to keep blobs which are // already contained in kept packs, so delete them from keepBlobs - err := repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err := repo.ListBlobs(ctx, func(blob restic.PackedBlob) { if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) { return } @@ -151,11 +150,11 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g return &plan, nil } -func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { +func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { // iterate over all blobs in index to find out which blobs are duplicates // The counter in usedBlobs describes how many instances of the blob exist in the repository index // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist - err := idx.Each(ctx, func(blob restic.PackedBlob) { + err := idx.ListBlobs(ctx, func(blob restic.PackedBlob) { bh := blob.BlobHandle count, ok := usedBlobs[bh] if ok { @@ -205,7 +204,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re hasDuplicates := false // iterate over all blobs in index to generate packInfo - err = idx.Each(ctx, func(blob restic.PackedBlob) { + err = idx.ListBlobs(ctx, func(blob restic.PackedBlob) { ip := indexPack[blob.PackID] // Set blob type if not yet set @@ -260,7 +259,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re // - if there are no used blobs in a pack, possibly mark duplicates as "unused" if hasDuplicates { // iterate again over all blobs in index (this is pretty cheap, all in-mem) - err = idx.Each(ctx, func(blob restic.PackedBlob) { + err = idx.ListBlobs(ctx, func(blob restic.PackedBlob) { bh := blob.BlobHandle count, ok := usedBlobs[bh] // skip non-duplicate, aka. normal blobs @@ -581,7 +580,7 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e if plan.opts.UnsafeRecovery { printer.P("deleting index files\n") - indexFiles := repo.Index().(*index.MasterIndex).IDs() + indexFiles := repo.idx.IDs() err = deleteFiles(ctx, false, repo, indexFiles, restic.IndexFile, printer) if err != nil { return errors.Fatalf("%s", err) diff --git a/internal/repository/repack.go b/internal/repository/repack.go index 5bedcfa56d1..8c9ca28bb58 100644 --- a/internal/repository/repack.go +++ b/internal/repository/repack.go @@ -54,7 +54,7 @@ func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito downloadQueue := make(chan restic.PackBlobs) wg.Go(func() error { defer close(downloadQueue) - for pbs := range repo.Index().ListPacks(wgCtx, packs) { + for pbs := range repo.ListPacksFromIndex(wgCtx, packs) { var packBlobs []restic.Blob keepMutex.Lock() // filter out unnecessary blobs diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index 3fd56ccb120..e0f1b4254b9 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -145,9 +145,8 @@ func listFiles(t *testing.T, repo restic.Lister, tpe backend.FileType) restic.ID func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSet) restic.IDSet { packs := restic.NewIDSet() - idx := repo.Index() for h := range blobs { - list := idx.Lookup(h) + list := repo.LookupBlob(h) if len(list) == 0 { t.Fatal("Failed to find blob", h.ID.Str(), "with type", h.Type) } @@ -195,7 +194,7 @@ func rebuildIndex(t *testing.T, repo restic.Repository) { }) rtest.OK(t, err) - err = repo.Index().Save(context.TODO(), repo, restic.NewIDSet(), obsoleteIndexes, restic.MasterIndexSaveOpts{}) + err = repo.SaveIndex(context.TODO(), restic.NewIDSet(), obsoleteIndexes, restic.MasterIndexSaveOpts{}) rtest.OK(t, err) } @@ -252,10 +251,8 @@ func testRepack(t *testing.T, version uint) { } } - idx := repo.Index() - for h := range keepBlobs { - list := idx.Lookup(h) + list := repo.LookupBlob(h) if len(list) == 0 { t.Errorf("unable to find blob %v in repo", h.ID.Str()) continue @@ -318,10 +315,8 @@ func testRepackCopy(t *testing.T, version uint) { rebuildIndex(t, dstRepo) reloadIndex(t, dstRepo) - idx := dstRepo.Index() - for h := range keepBlobs { - list := idx.Lookup(h) + list := dstRepo.LookupBlob(h) if len(list) == 0 { t.Errorf("unable to find blob %v in repo", h.ID.Str()) continue diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index a6e732b4452..a7d94fcf82e 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -54,7 +54,7 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, if err != nil { return err } - packSizeFromIndex, err = pack.Size(ctx, repo.Index(), false) + packSizeFromIndex, err = pack.Size(ctx, repo, false) if err != nil { return err } @@ -115,7 +115,7 @@ func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks printer.P("rebuilding index\n") bar := printer.NewCounter("packs processed") - return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{ + return repo.SaveIndex(ctx, removePacks, extraObsolete, restic.MasterIndexSaveOpts{ SaveProgress: bar, DeleteProgress: func() *progress.Counter { return printer.NewCounter("old indexes deleted") diff --git a/internal/repository/repair_pack.go b/internal/repository/repair_pack.go index a0bd56012d9..cac7aac1020 100644 --- a/internal/repository/repair_pack.go +++ b/internal/repository/repair_pack.go @@ -21,7 +21,7 @@ func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, wg.Go(func() error { // examine all data the indexes have for the pack file - for b := range repo.Index().ListPacks(wgCtx, ids) { + for b := range repo.ListPacksFromIndex(wgCtx, ids) { blobs := b.Blobs if len(blobs) == 0 { printer.E("no blobs found for pack %v", b.PackID) diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index 7acdc646e6f..28a5525a296 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -18,7 +18,7 @@ import ( func listBlobs(repo restic.Repository) restic.BlobSet { blobs := restic.NewBlobSet() - _ = repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) { + _ = repo.ListBlobs(context.TODO(), func(pb restic.PackedBlob) { blobs.Insert(pb.BlobHandle) }) return blobs @@ -68,7 +68,7 @@ func testRepairBrokenPack(t *testing.T, version uint) { // find blob that starts at offset 0 var damagedBlob restic.BlobHandle - for blobs := range repo.Index().ListPacks(context.TODO(), restic.NewIDSet(damagedID)) { + for blobs := range repo.ListPacksFromIndex(context.TODO(), restic.NewIDSet(damagedID)) { for _, blob := range blobs.Blobs { if blob.Offset == 0 { damagedBlob = blob.BlobHandle @@ -91,7 +91,7 @@ func testRepairBrokenPack(t *testing.T, version uint) { // all blobs in the file are broken damagedBlobs := restic.NewBlobSet() - for blobs := range repo.Index().ListPacks(context.TODO(), restic.NewIDSet(damagedID)) { + for blobs := range repo.ListPacksFromIndex(context.TODO(), restic.NewIDSet(damagedID)) { for _, blob := range blobs.Blobs { damagedBlobs.Insert(blob.BlobHandle) } diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 534edc9fdb9..4aa8106ab29 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -300,11 +300,6 @@ func (r *Repository) loadBlob(ctx context.Context, blobs []restic.PackedBlob, bu return nil, errors.Errorf("loading %v from %v packs failed", blobs[0].BlobHandle, len(blobs)) } -// LookupBlobSize returns the size of blob id. -func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, bool) { - return r.idx.LookupSize(restic.BlobHandle{ID: id, Type: tpe}) -} - func (r *Repository) getZstdEncoder() *zstd.Encoder { r.allocEnc.Do(func() { level := zstd.SpeedDefault @@ -583,9 +578,31 @@ func (r *Repository) Connections() uint { return r.be.Connections() } -// Index returns the currently used MasterIndex. -func (r *Repository) Index() restic.MasterIndex { - return r.idx +func (r *Repository) HasBlob(bh restic.BlobHandle) bool { + return r.idx.Has(bh) +} + +func (r *Repository) LookupBlob(bh restic.BlobHandle) []restic.PackedBlob { + return r.idx.Lookup(bh) +} + +// LookupBlobSize returns the size of blob id. +func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, bool) { + return r.idx.LookupSize(restic.BlobHandle{ID: id, Type: tpe}) +} + +func (r *Repository) SaveIndex(ctx context.Context, excludePacks restic.IDSet, extraObsolete restic.IDs, opts restic.MasterIndexSaveOpts) error { + return r.idx.Save(ctx, r, excludePacks, extraObsolete, opts) +} + +// ListBlobs runs fn on all blobs known to the index. When the context is cancelled, +// the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. +func (r *Repository) ListBlobs(ctx context.Context, fn func(restic.PackedBlob)) error { + return r.idx.Each(ctx, fn) +} + +func (r *Repository) ListPacksFromIndex(ctx context.Context, packs restic.IDSet) <-chan restic.PackBlobs { + return r.idx.ListPacks(ctx, packs) } // SetIndex instructs the repository to use the given index. diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index f0d3ae486e9..31a588f620b 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -161,7 +161,7 @@ func TestLoadBlobBroken(t *testing.T) { data, err := repo.LoadBlob(context.TODO(), restic.TreeBlob, id, nil) rtest.OK(t, err) rtest.Assert(t, bytes.Equal(buf, data), "data mismatch") - pack := repo.Index().Lookup(restic.BlobHandle{Type: restic.TreeBlob, ID: id})[0].PackID + pack := repo.LookupBlob(restic.BlobHandle{Type: restic.TreeBlob, ID: id})[0].PackID rtest.Assert(t, c.Has(backend.Handle{Type: restic.PackFile, Name: pack.String()}), "expected tree pack to be cached") } @@ -439,7 +439,7 @@ func TestListPack(t *testing.T) { repo.UseCache(c) // Forcibly cache pack file - packID := repo.Index().Lookup(restic.BlobHandle{Type: restic.TreeBlob, ID: id})[0].PackID + packID := repo.LookupBlob(restic.BlobHandle{Type: restic.TreeBlob, ID: id})[0].PackID rtest.OK(t, be.Load(context.TODO(), backend.Handle{Type: restic.PackFile, IsMetadata: true, Name: packID.String()}, 0, 0, func(rd io.Reader) error { return nil })) // Get size to list pack diff --git a/internal/restic/repository.go b/internal/restic/repository.go index bc0ec2d43dd..ee32beb7e8f 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -18,49 +18,52 @@ var ErrInvalidData = errors.New("invalid data returned") type Repository interface { // Connections returns the maximum number of concurrent backend operations Connections() uint - + Config() Config + PackSize() uint Key() *crypto.Key - Index() MasterIndex LoadIndex(context.Context, *progress.Counter) error ClearIndex() SetIndex(MasterIndex) error - LookupBlobSize(ID, BlobType) (uint, bool) + SaveIndex(ctx context.Context, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error - Config() Config - PackSize() uint - - // List calls the function fn for each file of type t in the repository. - // When an error is returned by fn, processing stops and List() returns the - // error. - // - // The function fn is called in the same Goroutine List() was called from. - List(ctx context.Context, t FileType, fn func(ID, int64) error) error + HasBlob(BlobHandle) bool + LookupBlob(BlobHandle) []PackedBlob + LookupBlobSize(ID, BlobType) (uint, bool) + // ListBlobs runs fn on all blobs known to the index. When the context is cancelled, + // the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. + ListBlobs(ctx context.Context, fn func(PackedBlob)) error + ListPacksFromIndex(ctx context.Context, packs IDSet) <-chan PackBlobs // ListPack returns the list of blobs saved in the pack id and the length of // the pack header. ListPack(context.Context, ID, int64) ([]Blob, uint32, error) LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error) LoadBlobsFromPack(ctx context.Context, packID ID, blobs []Blob, handleBlobFn func(blob BlobHandle, buf []byte, err error) error) error - SaveBlob(context.Context, BlobType, []byte, ID, bool) (ID, bool, int, error) // StartPackUploader start goroutines to upload new pack files. The errgroup // is used to immediately notify about an upload error. Flush() will also return // that error. StartPackUploader(ctx context.Context, wg *errgroup.Group) + SaveBlob(context.Context, BlobType, []byte, ID, bool) (ID, bool, int, error) Flush(context.Context) error + // List calls the function fn for each file of type t in the repository. + // When an error is returned by fn, processing stops and List() returns the + // error. + // + // The function fn is called in the same Goroutine List() was called from. + List(ctx context.Context, t FileType, fn func(ID, int64) error) error + // LoadRaw reads all data stored in the backend for the file with id and filetype t. + // If the backend returns data that does not match the id, then the buffer is returned + // along with an error that is a restic.ErrInvalidData error. + LoadRaw(ctx context.Context, t FileType, id ID) (data []byte, err error) // LoadUnpacked loads and decrypts the file with the given type and ID. LoadUnpacked(ctx context.Context, t FileType, id ID) (data []byte, err error) SaveUnpacked(context.Context, FileType, []byte) (ID, error) // RemoveUnpacked removes a file from the repository. This will eventually be restricted to deleting only snapshots. RemoveUnpacked(ctx context.Context, t FileType, id ID) error - - // LoadRaw reads all data stored in the backend for the file with id and filetype t. - // If the backend returns data that does not match the id, then the buffer is returned - // along with an error that is a restic.ErrInvalidData error. - LoadRaw(ctx context.Context, t FileType, id ID) (data []byte, err error) } type FileType = backend.FileType @@ -141,3 +144,7 @@ type Unpacked interface { SaverUnpacked RemoverUnpacked } + +type ListBlobser interface { + ListBlobs(ctx context.Context, fn func(PackedBlob)) error +} diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 9f41f5cf2c8..721330a8c88 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -240,7 +240,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { } idx := NewHardlinkIndex[string]() - filerestorer := newFileRestorer(dst, res.repo.LoadBlobsFromPack, res.repo.Index().Lookup, + filerestorer := newFileRestorer(dst, res.repo.LoadBlobsFromPack, res.repo.LookupBlob, res.repo.Connections(), res.sparse, res.progress) filerestorer.Error = res.Error From 8f1e70cd9b3ceac6f35562abdbff8044f15f9a89 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 12:51:58 +0200 Subject: [PATCH 264/893] repository: remove clearIndex and packSize from public interface --- internal/repository/prune.go | 8 ++++---- internal/repository/repair_index.go | 2 +- internal/repository/repository.go | 12 ++++++------ internal/restic/repository.go | 2 -- 4 files changed, 11 insertions(+), 13 deletions(-) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 66eab28b289..712986e6160 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -313,7 +313,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs re return usedBlobs, indexPack, nil } -func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *PruneStats, printer progress.Printer) (PrunePlan, error) { +func decidePackAction(ctx context.Context, opts PruneOptions, repo *Repository, indexPack map[restic.ID]packInfo, stats *PruneStats, printer progress.Printer) (PrunePlan, error) { removePacksFirst := restic.NewIDSet() removePacks := restic.NewIDSet() repackPacks := restic.NewIDSet() @@ -322,10 +322,10 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi var repackSmallCandidates []packInfoWithID repoVersion := repo.Config().Version // only repack very small files by default - targetPackSize := repo.PackSize() / 25 + targetPackSize := repo.packSize() / 25 if opts.RepackSmall { // consider files with at least 80% of the target size as large enough - targetPackSize = repo.PackSize() / 5 * 4 + targetPackSize = repo.packSize() / 5 * 4 } // loop over all packs and decide what to do @@ -612,7 +612,7 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e } // drop outdated in-memory index - repo.ClearIndex() + repo.clearIndex() printer.P("done\n") return nil diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index a7d94fcf82e..c20641d508d 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -107,7 +107,7 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, } // drop outdated in-memory index - repo.ClearIndex() + repo.clearIndex() return nil } diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 4aa8106ab29..48bb9a0ad5f 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -146,8 +146,8 @@ func (r *Repository) Config() restic.Config { return r.cfg } -// PackSize return the target size of a pack file when uploading -func (r *Repository) PackSize() uint { +// packSize return the target size of a pack file when uploading +func (r *Repository) packSize() uint { return r.opts.PackSize } @@ -541,8 +541,8 @@ func (r *Repository) StartPackUploader(ctx context.Context, wg *errgroup.Group) innerWg, ctx := errgroup.WithContext(ctx) r.packerWg = innerWg r.uploader = newPackerUploader(ctx, innerWg, r, r.be.Connections()) - r.treePM = newPackerManager(r.key, restic.TreeBlob, r.PackSize(), r.uploader.QueuePacker) - r.dataPM = newPackerManager(r.key, restic.DataBlob, r.PackSize(), r.uploader.QueuePacker) + r.treePM = newPackerManager(r.key, restic.TreeBlob, r.packSize(), r.uploader.QueuePacker) + r.dataPM = newPackerManager(r.key, restic.DataBlob, r.packSize(), r.uploader.QueuePacker) wg.Go(func() error { return innerWg.Wait() @@ -612,7 +612,7 @@ func (r *Repository) SetIndex(i restic.MasterIndex) error { return r.prepareCache() } -func (r *Repository) ClearIndex() { +func (r *Repository) clearIndex() { r.idx = index.NewMasterIndex() r.configureIndex() } @@ -646,7 +646,7 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { } // reset in-memory index before loading it from the repository - r.ClearIndex() + r.clearIndex() err = index.ForAllIndexes(ctx, indexList, r, func(_ restic.ID, idx *index.Index, _ bool, err error) error { if err != nil { diff --git a/internal/restic/repository.go b/internal/restic/repository.go index ee32beb7e8f..06344a63030 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -19,11 +19,9 @@ type Repository interface { // Connections returns the maximum number of concurrent backend operations Connections() uint Config() Config - PackSize() uint Key() *crypto.Key LoadIndex(context.Context, *progress.Counter) error - ClearIndex() SetIndex(MasterIndex) error SaveIndex(ctx context.Context, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error From 0aa5c5384263a398ef87927151c204f8eeed6d49 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 12:58:41 +0200 Subject: [PATCH 265/893] repository: replace HasBlob with LookupBlobSize --- cmd/restic/cmd_cat.go | 3 +-- cmd/restic/cmd_copy.go | 4 ++-- internal/archiver/archiver.go | 4 ++-- internal/repository/repository.go | 4 ---- internal/restic/repository.go | 1 - 5 files changed, 5 insertions(+), 11 deletions(-) diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index e776b67a14d..de579587f11 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -167,8 +167,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error { } for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { - bh := restic.BlobHandle{ID: id, Type: t} - if !repo.HasBlob(bh) { + if _, ok := repo.LookupBlobSize(id, t); !ok { continue } diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 26b16a374c5..0df89932186 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -202,7 +202,7 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep // Do we already have this tree blob? treeHandle := restic.BlobHandle{ID: tree.ID, Type: restic.TreeBlob} - if !dstRepo.HasBlob(treeHandle) { + if _, ok := dstRepo.LookupBlobSize(treeHandle.ID, treeHandle.Type); !ok { // copy raw tree bytes to avoid problems if the serialization changes enqueue(treeHandle) } @@ -212,7 +212,7 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep // Copy the blobs for this file. for _, blobID := range entry.Content { h := restic.BlobHandle{Type: restic.DataBlob, ID: blobID} - if !dstRepo.HasBlob(h) { + if _, ok := dstRepo.LookupBlobSize(h.ID, h.Type); !ok { enqueue(h) } } diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 10034afa14a..1de28082bd0 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -276,7 +276,7 @@ func (arch *Archiver) loadSubtree(ctx context.Context, node *restic.Node) (*rest } func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { - if arch.Repo.HasBlob(restic.BlobHandle{ID: id, Type: restic.TreeBlob}) { + if _, ok := arch.Repo.LookupBlobSize(id, restic.TreeBlob); ok { err = errors.Errorf("tree %v could not be loaded; the repository could be damaged: %v", id, err) } else { err = errors.Errorf("tree %v is not known; the repository could be damaged, run `repair index` to try to repair it", id) @@ -390,7 +390,7 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult { func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool { // check if all blobs are contained in index for _, id := range previous.Content { - if !arch.Repo.HasBlob(restic.BlobHandle{ID: id, Type: restic.DataBlob}) { + if _, ok := arch.Repo.LookupBlobSize(id, restic.DataBlob); !ok { return false } } diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 48bb9a0ad5f..e5983ee1646 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -578,10 +578,6 @@ func (r *Repository) Connections() uint { return r.be.Connections() } -func (r *Repository) HasBlob(bh restic.BlobHandle) bool { - return r.idx.Has(bh) -} - func (r *Repository) LookupBlob(bh restic.BlobHandle) []restic.PackedBlob { return r.idx.Lookup(bh) } diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 06344a63030..d16c12018f0 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -25,7 +25,6 @@ type Repository interface { SetIndex(MasterIndex) error SaveIndex(ctx context.Context, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error - HasBlob(BlobHandle) bool LookupBlob(BlobHandle) []PackedBlob LookupBlobSize(ID, BlobType) (uint, bool) From c01bcb10018ac6f416d7f7750f83fb814b4d7d55 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 13:03:14 +0200 Subject: [PATCH 266/893] archiver: remove unused masterIndex from test --- internal/archiver/blob_saver_test.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/internal/archiver/blob_saver_test.go b/internal/archiver/blob_saver_test.go index 180f95b3d7a..f7ef2f47dfe 100644 --- a/internal/archiver/blob_saver_test.go +++ b/internal/archiver/blob_saver_test.go @@ -10,7 +10,6 @@ import ( "testing" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" @@ -19,7 +18,6 @@ import ( var errTest = errors.New("test error") type saveFail struct { - idx restic.MasterIndex cnt int32 failAt int32 } @@ -33,18 +31,12 @@ func (b *saveFail) SaveBlob(_ context.Context, _ restic.BlobType, _ []byte, id r return id, false, 0, nil } -func (b *saveFail) Index() restic.MasterIndex { - return b.idx -} - func TestBlobSaver(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() wg, ctx := errgroup.WithContext(ctx) - saver := &saveFail{ - idx: index.NewMasterIndex(), - } + saver := &saveFail{} b := NewBlobSaver(ctx, wg, saver, uint(runtime.NumCPU())) @@ -100,7 +92,6 @@ func TestBlobSaverError(t *testing.T) { wg, ctx := errgroup.WithContext(ctx) saver := &saveFail{ - idx: index.NewMasterIndex(), failAt: int32(test.failAt), } From 0bb0720348c055222c04b105fe44e40c68e6a3a2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 14:41:11 +0200 Subject: [PATCH 267/893] test cleanups --- internal/archiver/testing.go | 3 +-- internal/repository/repack_test.go | 5 ----- internal/repository/repair_pack_test.go | 2 -- 3 files changed, 1 insertion(+), 9 deletions(-) diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go index a186a4ee57d..278c6a44823 100644 --- a/internal/archiver/testing.go +++ b/internal/archiver/testing.go @@ -11,7 +11,6 @@ import ( "testing" "time" - "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" @@ -239,7 +238,7 @@ func TestEnsureFileContent(ctx context.Context, t testing.TB, repo restic.BlobLo return } - content := make([]byte, crypto.CiphertextLength(len(file.Content))) + content := make([]byte, len(file.Content)) pos := 0 for _, id := range node.Content { part, err := repo.LoadBlob(ctx, restic.DataBlob, id, content[pos:]) diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index e0f1b4254b9..f47f3288111 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -199,11 +199,6 @@ func rebuildIndex(t *testing.T, repo restic.Repository) { } func reloadIndex(t *testing.T, repo restic.Repository) { - err := repo.SetIndex(index.NewMasterIndex()) - if err != nil { - t.Fatal(err) - } - if err := repo.LoadIndex(context.TODO(), nil); err != nil { t.Fatalf("error loading new index: %v", err) } diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index 28a5525a296..0d6d340f493 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -8,7 +8,6 @@ import ( "github.com/restic/restic/internal/backend" backendtest "github.com/restic/restic/internal/backend/test" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" @@ -118,7 +117,6 @@ func testRepairBrokenPack(t *testing.T, version uint) { rtest.OK(t, repository.RepairPacks(context.TODO(), repo, toRepair, &progress.NoopPrinter{})) // reload index - rtest.OK(t, repo.SetIndex(index.NewMasterIndex())) rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) packsAfter := listPacks(t, repo) From e848ad651a253def63d989c044d374ee40265c8c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 14:41:31 +0200 Subject: [PATCH 268/893] restic: name parameters in restic interface --- internal/restic/repository.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/internal/restic/repository.go b/internal/restic/repository.go index d16c12018f0..7c0c747bf0f 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -21,12 +21,12 @@ type Repository interface { Config() Config Key() *crypto.Key - LoadIndex(context.Context, *progress.Counter) error - SetIndex(MasterIndex) error + LoadIndex(ctx context.Context, p *progress.Counter) error + SetIndex(mi MasterIndex) error SaveIndex(ctx context.Context, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error - LookupBlob(BlobHandle) []PackedBlob - LookupBlobSize(ID, BlobType) (uint, bool) + LookupBlob(bh BlobHandle) []PackedBlob + LookupBlobSize(id ID, t BlobType) (size uint, exists bool) // ListBlobs runs fn on all blobs known to the index. When the context is cancelled, // the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. @@ -34,17 +34,17 @@ type Repository interface { ListPacksFromIndex(ctx context.Context, packs IDSet) <-chan PackBlobs // ListPack returns the list of blobs saved in the pack id and the length of // the pack header. - ListPack(context.Context, ID, int64) ([]Blob, uint32, error) + ListPack(ctx context.Context, id ID, packSize int64) (entries []Blob, hdrSize uint32, err error) - LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error) + LoadBlob(ctx context.Context, t BlobType, id ID, buf []byte) ([]byte, error) LoadBlobsFromPack(ctx context.Context, packID ID, blobs []Blob, handleBlobFn func(blob BlobHandle, buf []byte, err error) error) error // StartPackUploader start goroutines to upload new pack files. The errgroup // is used to immediately notify about an upload error. Flush() will also return // that error. StartPackUploader(ctx context.Context, wg *errgroup.Group) - SaveBlob(context.Context, BlobType, []byte, ID, bool) (ID, bool, int, error) - Flush(context.Context) error + SaveBlob(ctx context.Context, t BlobType, buf []byte, id ID, storeDuplicate bool) (newID ID, known bool, size int, err error) + Flush(ctx context.Context) error // List calls the function fn for each file of type t in the repository. // When an error is returned by fn, processing stops and List() returns the @@ -58,7 +58,7 @@ type Repository interface { LoadRaw(ctx context.Context, t FileType, id ID) (data []byte, err error) // LoadUnpacked loads and decrypts the file with the given type and ID. LoadUnpacked(ctx context.Context, t FileType, id ID) (data []byte, err error) - SaveUnpacked(context.Context, FileType, []byte) (ID, error) + SaveUnpacked(ctx context.Context, t FileType, buf []byte) (ID, error) // RemoveUnpacked removes a file from the repository. This will eventually be restricted to deleting only snapshots. RemoveUnpacked(ctx context.Context, t FileType, id ID) error } @@ -86,7 +86,7 @@ type LoaderUnpacked interface { type SaverUnpacked interface { // Connections returns the maximum number of concurrent backend operations Connections() uint - SaveUnpacked(context.Context, FileType, []byte) (ID, error) + SaveUnpacked(ctx context.Context, t FileType, buf []byte) (ID, error) } // RemoverUnpacked allows removing an unpacked blob @@ -115,8 +115,8 @@ type MasterIndexSaveOpts struct { // MasterIndex keeps track of the blobs are stored within files. type MasterIndex interface { - Has(BlobHandle) bool - Lookup(BlobHandle) []PackedBlob + Has(bh BlobHandle) bool + Lookup(bh BlobHandle) []PackedBlob // Each runs fn on all blobs known to the index. When the context is cancelled, // the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. From 1266a4932f2974d67fcc75c951584e55c5713362 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 14:54:50 +0200 Subject: [PATCH 269/893] repository: fix parameter order of LookupBlobSize All methods should use blobType followed by ID. --- cmd/restic/cmd_cat.go | 2 +- cmd/restic/cmd_copy.go | 4 ++-- cmd/restic/cmd_diff.go | 2 +- cmd/restic/cmd_repair_snapshots.go | 2 +- cmd/restic/cmd_stats.go | 2 +- internal/archiver/archiver.go | 4 ++-- internal/checker/checker.go | 2 +- internal/checker/checker_test.go | 4 ++-- internal/fuse/file.go | 2 +- internal/fuse/fuse_test.go | 2 +- internal/repository/repack_test.go | 2 +- internal/repository/repository.go | 4 ++-- internal/restic/find.go | 2 +- internal/restic/find_test.go | 2 +- internal/restic/repository.go | 2 +- internal/restic/tree_stream.go | 2 +- internal/restorer/restorer.go | 2 +- 17 files changed, 21 insertions(+), 21 deletions(-) diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index de579587f11..23205771a18 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -167,7 +167,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error { } for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { - if _, ok := repo.LookupBlobSize(id, t); !ok { + if _, ok := repo.LookupBlobSize(t, id); !ok { continue } diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 0df89932186..4b2f95bf20a 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -202,7 +202,7 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep // Do we already have this tree blob? treeHandle := restic.BlobHandle{ID: tree.ID, Type: restic.TreeBlob} - if _, ok := dstRepo.LookupBlobSize(treeHandle.ID, treeHandle.Type); !ok { + if _, ok := dstRepo.LookupBlobSize(treeHandle.Type, treeHandle.ID); !ok { // copy raw tree bytes to avoid problems if the serialization changes enqueue(treeHandle) } @@ -212,7 +212,7 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep // Copy the blobs for this file. for _, blobID := range entry.Content { h := restic.BlobHandle{Type: restic.DataBlob, ID: blobID} - if _, ok := dstRepo.LookupBlobSize(h.ID, h.Type); !ok { + if _, ok := dstRepo.LookupBlobSize(h.Type, h.ID); !ok { enqueue(h) } } diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index b156191dc59..28c7426254c 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -156,7 +156,7 @@ func updateBlobs(repo restic.Loader, blobs restic.BlobSet, stats *DiffStat) { stats.TreeBlobs++ } - size, found := repo.LookupBlobSize(h.ID, h.Type) + size, found := repo.LookupBlobSize(h.Type, h.ID) if !found { Warnf("unable to find blob size for %v\n", h) continue diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index b200d100adc..be5ef4ad92d 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -97,7 +97,7 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt var newSize uint64 // check all contents and remove if not available for _, id := range node.Content { - if size, found := repo.LookupBlobSize(id, restic.DataBlob); !found { + if size, found := repo.LookupBlobSize(restic.DataBlob, id); !found { ok = false } else { newContent = append(newContent, id) diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 3bec18f4cf5..0926a54ef8c 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -238,7 +238,7 @@ func statsWalkTree(repo restic.Loader, opts StatsOptions, stats *statsContainer, } if _, ok := stats.fileBlobs[nodePath][blobID]; !ok { // is always a data blob since we're accessing it via a file's Content array - blobSize, found := repo.LookupBlobSize(blobID, restic.DataBlob) + blobSize, found := repo.LookupBlobSize(restic.DataBlob, blobID) if !found { return fmt.Errorf("blob %s not found for tree %s", blobID, parentTreeID) } diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 1de28082bd0..50b09583c89 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -276,7 +276,7 @@ func (arch *Archiver) loadSubtree(ctx context.Context, node *restic.Node) (*rest } func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { - if _, ok := arch.Repo.LookupBlobSize(id, restic.TreeBlob); ok { + if _, ok := arch.Repo.LookupBlobSize(restic.TreeBlob, id); ok { err = errors.Errorf("tree %v could not be loaded; the repository could be damaged: %v", id, err) } else { err = errors.Errorf("tree %v is not known; the repository could be damaged, run `repair index` to try to repair it", id) @@ -390,7 +390,7 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult { func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool { // check if all blobs are contained in index for _, id := range previous.Content { - if _, ok := arch.Repo.LookupBlobSize(id, restic.DataBlob); !ok { + if _, ok := arch.Repo.LookupBlobSize(restic.DataBlob, id); !ok { return false } } diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 09b1dd7eb89..db3bf807d62 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -429,7 +429,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { // unfortunately fails in some cases that are not resolvable // by users, so we omit this check, see #1887 - _, found := c.repo.LookupBlobSize(blobID, restic.DataBlob) + _, found := c.repo.LookupBlobSize(restic.DataBlob, blobID) if !found { debug.Log("tree %v references blob %v which isn't contained in index", id, blobID) errs = append(errs, &Error{TreeID: id, Err: errors.Errorf("file %q blob %v not found in index", node.Name, blobID)}) diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index baec8862890..1219f4e2bad 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -461,11 +461,11 @@ func (r *delayRepository) LoadTree(ctx context.Context, id restic.ID) (*restic.T return restic.LoadTree(ctx, r.Repository, id) } -func (r *delayRepository) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, bool) { +func (r *delayRepository) LookupBlobSize(t restic.BlobType, id restic.ID) (uint, bool) { if id == r.DelayTree && t == restic.DataBlob { r.Unblock() } - return r.Repository.LookupBlobSize(id, t) + return r.Repository.LookupBlobSize(t, id) } func (r *delayRepository) Unblock() { diff --git a/internal/fuse/file.go b/internal/fuse/file.go index 5190febbb55..e2e0cf9a016 100644 --- a/internal/fuse/file.go +++ b/internal/fuse/file.go @@ -72,7 +72,7 @@ func (f *file) Open(_ context.Context, _ *fuse.OpenRequest, _ *fuse.OpenResponse var bytes uint64 cumsize := make([]uint64, 1+len(f.node.Content)) for i, id := range f.node.Content { - size, found := f.root.repo.LookupBlobSize(id, restic.DataBlob) + size, found := f.root.repo.LookupBlobSize(restic.DataBlob, id) if !found { return nil, errors.Errorf("id %v not found in repository", id) } diff --git a/internal/fuse/fuse_test.go b/internal/fuse/fuse_test.go index 1053d49a434..aebcb127282 100644 --- a/internal/fuse/fuse_test.go +++ b/internal/fuse/fuse_test.go @@ -89,7 +89,7 @@ func TestFuseFile(t *testing.T) { memfile []byte ) for _, id := range content { - size, found := repo.LookupBlobSize(id, restic.DataBlob) + size, found := repo.LookupBlobSize(restic.DataBlob, id) rtest.Assert(t, found, "Expected to find blob id %v", id) filesize += uint64(size) diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index f47f3288111..524ab648586 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -266,7 +266,7 @@ func testRepack(t *testing.T, version uint) { } for h := range removeBlobs { - if _, found := repo.LookupBlobSize(h.ID, h.Type); found { + if _, found := repo.LookupBlobSize(h.Type, h.ID); found { t.Errorf("blob %v still contained in the repo", h) } } diff --git a/internal/repository/repository.go b/internal/repository/repository.go index e5983ee1646..d68ed88379e 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -583,8 +583,8 @@ func (r *Repository) LookupBlob(bh restic.BlobHandle) []restic.PackedBlob { } // LookupBlobSize returns the size of blob id. -func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, bool) { - return r.idx.LookupSize(restic.BlobHandle{ID: id, Type: tpe}) +func (r *Repository) LookupBlobSize(tpe restic.BlobType, id restic.ID) (uint, bool) { + return r.idx.LookupSize(restic.BlobHandle{Type: tpe, ID: id}) } func (r *Repository) SaveIndex(ctx context.Context, excludePacks restic.IDSet, extraObsolete restic.IDs, opts restic.MasterIndexSaveOpts) error { diff --git a/internal/restic/find.go b/internal/restic/find.go index 08670a49f48..cefef2196e3 100644 --- a/internal/restic/find.go +++ b/internal/restic/find.go @@ -11,7 +11,7 @@ import ( // Loader loads a blob from a repository. type Loader interface { LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error) - LookupBlobSize(id ID, tpe BlobType) (uint, bool) + LookupBlobSize(tpe BlobType, id ID) (uint, bool) Connections() uint } diff --git a/internal/restic/find_test.go b/internal/restic/find_test.go index 1ae30ded980..9b8315ad4f1 100644 --- a/internal/restic/find_test.go +++ b/internal/restic/find_test.go @@ -166,7 +166,7 @@ func (r ForbiddenRepo) LoadBlob(context.Context, restic.BlobType, restic.ID, []b return nil, errors.New("should not be called") } -func (r ForbiddenRepo) LookupBlobSize(_ restic.ID, _ restic.BlobType) (uint, bool) { +func (r ForbiddenRepo) LookupBlobSize(_ restic.BlobType, _ restic.ID) (uint, bool) { return 0, false } diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 7c0c747bf0f..3d5bccec05c 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -26,7 +26,7 @@ type Repository interface { SaveIndex(ctx context.Context, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error LookupBlob(bh BlobHandle) []PackedBlob - LookupBlobSize(id ID, t BlobType) (size uint, exists bool) + LookupBlobSize(t BlobType, id ID) (size uint, exists bool) // ListBlobs runs fn on all blobs known to the index. When the context is cancelled, // the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. diff --git a/internal/restic/tree_stream.go b/internal/restic/tree_stream.go index 4110a5e8d83..123295533e3 100644 --- a/internal/restic/tree_stream.go +++ b/internal/restic/tree_stream.go @@ -77,7 +77,7 @@ func filterTrees(ctx context.Context, repo Loader, trees IDs, loaderChan chan<- continue } - treeSize, found := repo.LookupBlobSize(nextTreeID.ID, TreeBlob) + treeSize, found := repo.LookupBlobSize(TreeBlob, nextTreeID.ID) if found && treeSize > 50*1024*1024 { loadCh = hugeTreeLoaderChan } else { diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 721330a8c88..c471800df63 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -435,7 +435,7 @@ func (res *Restorer) verifyFile(target string, node *restic.Node, buf []byte) ([ var offset int64 for _, blobID := range node.Content { - length, found := res.repo.LookupBlobSize(blobID, restic.DataBlob) + length, found := res.repo.LookupBlobSize(restic.DataBlob, blobID) if !found { return buf, errors.Errorf("Unable to fetch blob %s", blobID) } From 864995271ec7413594ce8a186385b6b61cdb310d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 14:56:17 +0200 Subject: [PATCH 270/893] repository: unwrap BlobHandle parameters of LookupBlob The method now uses the same parameters as LookupBlobSize. --- cmd/restic/cmd_copy.go | 2 +- cmd/restic/cmd_find.go | 2 +- cmd/restic/cmd_stats.go | 2 +- internal/repository/check.go | 2 +- internal/repository/repack_test.go | 6 +++--- internal/repository/repository.go | 4 ++-- internal/repository/repository_test.go | 4 ++-- internal/restic/repository.go | 2 +- internal/restorer/filerestorer.go | 8 ++++---- internal/restorer/filerestorer_test.go | 4 ++-- 10 files changed, 18 insertions(+), 18 deletions(-) diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 4b2f95bf20a..d12501dd96b 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -187,7 +187,7 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep packList := restic.NewIDSet() enqueue := func(h restic.BlobHandle) { - pb := srcRepo.LookupBlob(h) + pb := srcRepo.LookupBlob(h.Type, h.ID) copyBlobs.Insert(h) for _, p := range pb { packList.Insert(p.PackID) diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 7ad8886c836..59e34c46813 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -509,7 +509,7 @@ func (f *Finder) findObjectPack(id string, t restic.BlobType) { return } - blobs := f.repo.LookupBlob(restic.BlobHandle{ID: rid, Type: t}) + blobs := f.repo.LookupBlob(t, rid) if len(blobs) == 0 { Printf("Object %s not found in the index\n", rid.Str()) return diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 0926a54ef8c..0f8e45f366c 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -124,7 +124,7 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args if opts.countMode == countModeRawData { // the blob handles have been collected, but not yet counted for blobHandle := range stats.blobs { - pbs := repo.LookupBlob(blobHandle) + pbs := repo.LookupBlob(blobHandle.Type, blobHandle.ID) if len(pbs) == 0 { return fmt.Errorf("blob %v not found", blobHandle) } diff --git a/internal/repository/check.go b/internal/repository/check.go index 05605db8695..f16cd749230 100644 --- a/internal/repository/check.go +++ b/internal/repository/check.go @@ -161,7 +161,7 @@ func checkPackInner(ctx context.Context, r *Repository, id restic.ID, blobs []re for _, blob := range blobs { // Check if blob is contained in index and position is correct idxHas := false - for _, pb := range r.LookupBlob(blob.BlobHandle) { + for _, pb := range r.LookupBlob(blob.BlobHandle.Type, blob.BlobHandle.ID) { if pb.PackID == id && pb.Blob == blob { idxHas = true break diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index 524ab648586..96b75ca4652 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -146,7 +146,7 @@ func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSe packs := restic.NewIDSet() for h := range blobs { - list := repo.LookupBlob(h) + list := repo.LookupBlob(h.Type, h.ID) if len(list) == 0 { t.Fatal("Failed to find blob", h.ID.Str(), "with type", h.Type) } @@ -247,7 +247,7 @@ func testRepack(t *testing.T, version uint) { } for h := range keepBlobs { - list := repo.LookupBlob(h) + list := repo.LookupBlob(h.Type, h.ID) if len(list) == 0 { t.Errorf("unable to find blob %v in repo", h.ID.Str()) continue @@ -311,7 +311,7 @@ func testRepackCopy(t *testing.T, version uint) { reloadIndex(t, dstRepo) for h := range keepBlobs { - list := dstRepo.LookupBlob(h) + list := dstRepo.LookupBlob(h.Type, h.ID) if len(list) == 0 { t.Errorf("unable to find blob %v in repo", h.ID.Str()) continue diff --git a/internal/repository/repository.go b/internal/repository/repository.go index d68ed88379e..73d05fe7b12 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -578,8 +578,8 @@ func (r *Repository) Connections() uint { return r.be.Connections() } -func (r *Repository) LookupBlob(bh restic.BlobHandle) []restic.PackedBlob { - return r.idx.Lookup(bh) +func (r *Repository) LookupBlob(tpe restic.BlobType, id restic.ID) []restic.PackedBlob { + return r.idx.Lookup(restic.BlobHandle{Type: tpe, ID: id}) } // LookupBlobSize returns the size of blob id. diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 31a588f620b..bc950d0b0d9 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -161,7 +161,7 @@ func TestLoadBlobBroken(t *testing.T) { data, err := repo.LoadBlob(context.TODO(), restic.TreeBlob, id, nil) rtest.OK(t, err) rtest.Assert(t, bytes.Equal(buf, data), "data mismatch") - pack := repo.LookupBlob(restic.BlobHandle{Type: restic.TreeBlob, ID: id})[0].PackID + pack := repo.LookupBlob(restic.TreeBlob, id)[0].PackID rtest.Assert(t, c.Has(backend.Handle{Type: restic.PackFile, Name: pack.String()}), "expected tree pack to be cached") } @@ -439,7 +439,7 @@ func TestListPack(t *testing.T) { repo.UseCache(c) // Forcibly cache pack file - packID := repo.LookupBlob(restic.BlobHandle{Type: restic.TreeBlob, ID: id})[0].PackID + packID := repo.LookupBlob(restic.TreeBlob, id)[0].PackID rtest.OK(t, be.Load(context.TODO(), backend.Handle{Type: restic.PackFile, IsMetadata: true, Name: packID.String()}, 0, 0, func(rd io.Reader) error { return nil })) // Get size to list pack diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 3d5bccec05c..9e6d6b99bd8 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -25,7 +25,7 @@ type Repository interface { SetIndex(mi MasterIndex) error SaveIndex(ctx context.Context, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error - LookupBlob(bh BlobHandle) []PackedBlob + LookupBlob(t BlobType, id ID) []PackedBlob LookupBlobSize(t BlobType, id ID) (size uint, exists bool) // ListBlobs runs fn on all blobs known to the index. When the context is cancelled, diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index f2c134ea926..3551857dd91 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -48,7 +48,7 @@ type blobsLoaderFn func(ctx context.Context, packID restic.ID, blobs []restic.Bl // fileRestorer restores set of files type fileRestorer struct { - idx func(restic.BlobHandle) []restic.PackedBlob + idx func(restic.BlobType, restic.ID) []restic.PackedBlob blobsLoader blobsLoaderFn workerCount int @@ -64,7 +64,7 @@ type fileRestorer struct { func newFileRestorer(dst string, blobsLoader blobsLoaderFn, - idx func(restic.BlobHandle) []restic.PackedBlob, + idx func(restic.BlobType, restic.ID) []restic.PackedBlob, connections uint, sparse bool, progress *restore.Progress) *fileRestorer { @@ -99,7 +99,7 @@ func (r *fileRestorer) forEachBlob(blobIDs []restic.ID, fn func(packID restic.ID } for _, blobID := range blobIDs { - packs := r.idx(restic.BlobHandle{ID: blobID, Type: restic.DataBlob}) + packs := r.idx(restic.DataBlob, blobID) if len(packs) == 0 { return errors.Errorf("Unknown blob %s", blobID.String()) } @@ -227,7 +227,7 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error { } } else if packsMap, ok := file.blobs.(map[restic.ID][]fileBlobInfo); ok { for _, blob := range packsMap[pack.id] { - idxPacks := r.idx(restic.BlobHandle{ID: blob.id, Type: restic.DataBlob}) + idxPacks := r.idx(restic.DataBlob, blob.id) for _, idxPack := range idxPacks { if idxPack.PackID.Equal(pack.id) { addBlob(idxPack.Blob, blob.offset) diff --git a/internal/restorer/filerestorer_test.go b/internal/restorer/filerestorer_test.go index befeb5d2c0c..03797e0c83b 100644 --- a/internal/restorer/filerestorer_test.go +++ b/internal/restorer/filerestorer_test.go @@ -35,8 +35,8 @@ type TestRepo struct { loader blobsLoaderFn } -func (i *TestRepo) Lookup(bh restic.BlobHandle) []restic.PackedBlob { - packs := i.blobs[bh.ID] +func (i *TestRepo) Lookup(tpe restic.BlobType, id restic.ID) []restic.PackedBlob { + packs := i.blobs[id] return packs } From 6ca12c1b4aaf28b8a0a322d8b5e1f46e480e3802 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 15:11:32 +0200 Subject: [PATCH 271/893] archiver: replace most uses of restic.Repository --- internal/archiver/archiver.go | 14 +++++++++++-- internal/archiver/archiver_test.go | 28 ++++++++++++------------- internal/archiver/archiver_unix_test.go | 2 +- internal/archiver/testing.go | 2 +- 4 files changed, 28 insertions(+), 18 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 50b09583c89..86b329a9a20 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -64,9 +64,19 @@ func (s *ItemStats) Add(other ItemStats) { s.TreeSizeInRepo += other.TreeSizeInRepo } +type archiverRepo interface { + restic.Loader + restic.BlobSaver + restic.SaverUnpacked + + Config() restic.Config + StartPackUploader(ctx context.Context, wg *errgroup.Group) + Flush(ctx context.Context) error +} + // Archiver saves a directory structure to the repo. type Archiver struct { - Repo restic.Repository + Repo archiverRepo SelectByName SelectByNameFunc Select SelectFunc FS fs.FS @@ -160,7 +170,7 @@ func (o Options) ApplyDefaults() Options { } // New initializes a new archiver. -func New(repo restic.Repository, fs fs.FS, opts Options) *Archiver { +func New(repo archiverRepo, fs fs.FS, opts Options) *Archiver { arch := &Archiver{ Repo: repo, SelectByName: func(_ string) bool { return true }, diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 8d0c2c02ff1..f38d5b0de3b 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -36,7 +36,7 @@ func prepareTempdirRepoSrc(t testing.TB, src TestDir) (string, restic.Repository return tempdir, repo } -func saveFile(t testing.TB, repo restic.Repository, filename string, filesystem fs.FS) (*restic.Node, ItemStats) { +func saveFile(t testing.TB, repo archiverRepo, filename string, filesystem fs.FS) (*restic.Node, ItemStats) { wg, ctx := errgroup.WithContext(context.TODO()) repo.StartPackUploader(ctx, wg) @@ -416,14 +416,14 @@ func BenchmarkArchiverSaveFileLarge(b *testing.B) { } type blobCountingRepo struct { - restic.Repository + archiverRepo m sync.Mutex saved map[restic.BlobHandle]uint } func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) { - id, exists, size, err := repo.Repository.SaveBlob(ctx, t, buf, id, storeDuplicate) + id, exists, size, err := repo.archiverRepo.SaveBlob(ctx, t, buf, id, storeDuplicate) if exists { return id, exists, size, err } @@ -435,7 +435,7 @@ func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, b } func (repo *blobCountingRepo) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) { - id, err := restic.SaveTree(ctx, repo.Repository, t) + id, err := restic.SaveTree(ctx, repo.archiverRepo, t) h := restic.BlobHandle{ID: id, Type: restic.TreeBlob} repo.m.Lock() repo.saved[h]++ @@ -465,8 +465,8 @@ func TestArchiverSaveFileIncremental(t *testing.T) { tempdir := rtest.TempDir(t) repo := &blobCountingRepo{ - Repository: repository.TestRepository(t), - saved: make(map[restic.BlobHandle]uint), + archiverRepo: repository.TestRepository(t), + saved: make(map[restic.BlobHandle]uint), } data := rtest.Random(23, 512*1024+887898) @@ -902,8 +902,8 @@ func TestArchiverSaveDirIncremental(t *testing.T) { tempdir := rtest.TempDir(t) repo := &blobCountingRepo{ - Repository: repository.TestRepository(t), - saved: make(map[restic.BlobHandle]uint), + archiverRepo: repository.TestRepository(t), + saved: make(map[restic.BlobHandle]uint), } appendToFile(t, filepath.Join(tempdir, "testfile"), []byte("foobar")) @@ -2017,7 +2017,7 @@ func (m *TrackFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, er } type failSaveRepo struct { - restic.Repository + archiverRepo failAfter int32 cnt int32 err error @@ -2029,7 +2029,7 @@ func (f *failSaveRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []by return restic.Hash(buf), false, 0, f.err } - return f.Repository.SaveBlob(ctx, t, buf, id, storeDuplicate) + return f.archiverRepo.SaveBlob(ctx, t, buf, id, storeDuplicate) } func TestArchiverAbortEarlyOnError(t *testing.T) { @@ -2105,9 +2105,9 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { } testRepo := &failSaveRepo{ - Repository: repo, - failAfter: int32(test.failAfter), - err: test.err, + archiverRepo: repo, + failAfter: int32(test.failAfter), + err: test.err, } // at most two files may be queued @@ -2134,7 +2134,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { } } -func snapshot(t testing.TB, repo restic.Repository, fs fs.FS, parent *restic.Snapshot, filename string) (*restic.Snapshot, *restic.Node) { +func snapshot(t testing.TB, repo archiverRepo, fs fs.FS, parent *restic.Snapshot, filename string) (*restic.Snapshot, *restic.Node) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index a6b1aad2e15..4a380dff8d2 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -46,7 +46,7 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { return res } -func statAndSnapshot(t *testing.T, repo restic.Repository, name string) (*restic.Node, *restic.Node) { +func statAndSnapshot(t *testing.T, repo archiverRepo, name string) (*restic.Node, *restic.Node) { fi := lstat(t, name) want, err := restic.NodeFromFileInfo(name, fi, false) rtest.OK(t, err) diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go index 278c6a44823..106e6844553 100644 --- a/internal/archiver/testing.go +++ b/internal/archiver/testing.go @@ -25,7 +25,7 @@ func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *res Tags: []string{"test"}, } if parent != nil { - sn, err := restic.LoadSnapshot(context.TODO(), arch.Repo, *parent) + sn, err := restic.LoadSnapshot(context.TODO(), repo, *parent) if err != nil { t.Fatal(err) } From 447b486c20c4933eb06011fb435a93641550db4e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 15:37:54 +0200 Subject: [PATCH 272/893] index: deduplicate index loading of check and repository --- internal/checker/checker.go | 37 +++---------------------- internal/index/master_index.go | 45 +++++++++++++++++++++++++++++++ internal/repository/repository.go | 35 +----------------------- 3 files changed, 49 insertions(+), 68 deletions(-) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index db3bf807d62..61c017414b7 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -111,33 +111,10 @@ func computePackTypes(ctx context.Context, idx restic.ListBlobser) (map[restic.I func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []error, errs []error) { debug.Log("Start") - indexList, err := restic.MemorizeList(ctx, c.repo, restic.IndexFile) - if err != nil { - // abort if an error occurs while listing the indexes - return hints, append(errs, err) - } - - if p != nil { - var numIndexFiles uint64 - err := indexList.List(ctx, restic.IndexFile, func(_ restic.ID, _ int64) error { - numIndexFiles++ - return nil - }) - if err != nil { - return hints, append(errs, err) - } - p.SetMax(numIndexFiles) - defer p.Done() - } - packToIndex := make(map[restic.ID]restic.IDSet) - err = index.ForAllIndexes(ctx, indexList, c.repo, func(id restic.ID, index *index.Index, oldFormat bool, err error) error { + err := c.masterIndex.Load(ctx, c.repo, p, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error { debug.Log("process index %v, err %v", id, err) - if p != nil { - p.Add(1) - } - if oldFormat { debug.Log("index %v has old format", id) hints = append(hints, &ErrOldIndexFormat{id}) @@ -150,11 +127,9 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e return nil } - c.masterIndex.Insert(index) - debug.Log("process blobs") cnt := 0 - err = index.Each(ctx, func(blob restic.PackedBlob) { + err = idx.Each(ctx, func(blob restic.PackedBlob) { cnt++ if _, ok := packToIndex[blob.PackID]; !ok { @@ -167,13 +142,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e return err }) if err != nil { - errs = append(errs, err) - } - - // Merge index before computing pack sizes, as this needs removed duplicates - err = c.masterIndex.MergeFinalIndexes() - if err != nil { - // abort if an error occurs merging the indexes + // failed to load the index return hints, append(errs, err) } diff --git a/internal/index/master_index.go b/internal/index/master_index.go index 21ab344d673..796559fd7aa 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -9,6 +9,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" "golang.org/x/sync/errgroup" ) @@ -267,6 +268,50 @@ func (mi *MasterIndex) MergeFinalIndexes() error { return nil } +func (mi *MasterIndex) Load(ctx context.Context, r restic.ListerLoaderUnpacked, p *progress.Counter, cb func(id restic.ID, idx *Index, oldFormat bool, err error) error) error { + indexList, err := restic.MemorizeList(ctx, r, restic.IndexFile) + if err != nil { + return err + } + + if p != nil { + var numIndexFiles uint64 + err := indexList.List(ctx, restic.IndexFile, func(_ restic.ID, _ int64) error { + numIndexFiles++ + return nil + }) + if err != nil { + return err + } + p.SetMax(numIndexFiles) + defer p.Done() + } + + err = ForAllIndexes(ctx, indexList, r, func(id restic.ID, idx *Index, oldFormat bool, err error) error { + if p != nil { + p.Add(1) + } + if cb != nil { + err = cb(id, idx, oldFormat, err) + } + if err != nil { + return err + } + // special case to allow check to ignore index loading errors + if idx == nil { + return nil + } + mi.Insert(idx) + return nil + }) + + if err != nil { + return err + } + + return mi.MergeFinalIndexes() +} + // Save saves all known indexes to index files, leaving out any // packs whose ID is contained in packBlacklist from finalized indexes. // It also removes the old index files and those listed in extraObsolete. diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 73d05fe7b12..bd7de0de4ba 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -623,43 +623,10 @@ func (r *Repository) configureIndex() { func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { debug.Log("Loading index") - indexList, err := restic.MemorizeList(ctx, r, restic.IndexFile) - if err != nil { - return err - } - - if p != nil { - var numIndexFiles uint64 - err := indexList.List(ctx, restic.IndexFile, func(_ restic.ID, _ int64) error { - numIndexFiles++ - return nil - }) - if err != nil { - return err - } - p.SetMax(numIndexFiles) - defer p.Done() - } - // reset in-memory index before loading it from the repository r.clearIndex() - err = index.ForAllIndexes(ctx, indexList, r, func(_ restic.ID, idx *index.Index, _ bool, err error) error { - if err != nil { - return err - } - r.idx.Insert(idx) - if p != nil { - p.Add(1) - } - return nil - }) - - if err != nil { - return err - } - - err = r.idx.MergeFinalIndexes() + err := r.idx.Load(ctx, r, p, nil) if err != nil { return err } From fb59e0061426ed5ad5e9d3ee9ba30285678f49eb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 15:48:25 +0200 Subject: [PATCH 273/893] index: rewrite MasterIndex load/save test to be independent of repository --- internal/index/master_index_test.go | 55 ++++++++++------------------- 1 file changed, 18 insertions(+), 37 deletions(-) diff --git a/internal/index/master_index_test.go b/internal/index/master_index_test.go index 36a028768d5..3a7f3da88eb 100644 --- a/internal/index/master_index_test.go +++ b/internal/index/master_index_test.go @@ -357,44 +357,25 @@ func TestIndexSave(t *testing.T) { func testIndexSave(t *testing.T, version uint) { repo := createFilledRepo(t, 3, version) - err := repo.LoadIndex(context.TODO(), nil) - if err != nil { - t.Fatal(err) - } - - err = repo.SaveIndex(context.TODO(), nil, nil, restic.MasterIndexSaveOpts{}) - if err != nil { - t.Fatalf("unable to save new index: %v", err) - } - - checker := checker.New(repo, false) - err = checker.LoadSnapshots(context.TODO()) - if err != nil { - t.Error(err) - } - - hints, errs := checker.LoadIndex(context.TODO(), nil) - for _, h := range hints { - t.Logf("hint: %v\n", h) - } + idx := index.NewMasterIndex() + rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) + blobs := make(map[restic.PackedBlob]struct{}) + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { + blobs[pb] = struct{}{} + })) - for _, err := range errs { - t.Errorf("checker found error: %v", err) - } + rtest.OK(t, idx.Save(context.TODO(), repo, nil, nil, restic.MasterIndexSaveOpts{})) + idx = index.NewMasterIndex() + rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - errCh := make(chan error) - go checker.Structure(ctx, nil, errCh) - i := 0 - for err := range errCh { - t.Errorf("checker returned error: %v", err) - i++ - if i == 10 { - t.Errorf("more than 10 errors returned, skipping the rest") - cancel() - break + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { + if _, ok := blobs[pb]; ok { + delete(blobs, pb) + } else { + t.Fatalf("unexpected blobs %v", pb) } - } + })) + rtest.Equals(t, 0, len(blobs), "saved index is missing blobs") + + checker.TestCheckRepo(t, repo, false) } From 550d1eeac3540812a811a3ba9fac2eb9a9923f20 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 15:55:42 +0200 Subject: [PATCH 274/893] repository: remove SaveIndex from interface The method is now only indirectly accessible via Prune or RepairIndex. --- internal/index/master_index.go | 9 ++++++- internal/index/master_index_test.go | 2 +- internal/repository/repack_test.go | 41 ++++++----------------------- internal/repository/repair_index.go | 6 +++-- internal/repository/repair_pack.go | 2 +- internal/repository/repository.go | 4 --- internal/restic/repository.go | 10 ------- 7 files changed, 22 insertions(+), 52 deletions(-) diff --git a/internal/index/master_index.go b/internal/index/master_index.go index 796559fd7aa..981a2b31b19 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -312,10 +312,17 @@ func (mi *MasterIndex) Load(ctx context.Context, r restic.ListerLoaderUnpacked, return mi.MergeFinalIndexes() } +type MasterIndexSaveOpts struct { + SaveProgress *progress.Counter + DeleteProgress func() *progress.Counter + DeleteReport func(id restic.ID, err error) + SkipDeletion bool +} + // Save saves all known indexes to index files, leaving out any // packs whose ID is contained in packBlacklist from finalized indexes. // It also removes the old index files and those listed in extraObsolete. -func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverRemoverUnpacked, excludePacks restic.IDSet, extraObsolete restic.IDs, opts restic.MasterIndexSaveOpts) error { +func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverRemoverUnpacked, excludePacks restic.IDSet, extraObsolete restic.IDs, opts MasterIndexSaveOpts) error { p := opts.SaveProgress p.SetMax(uint64(len(mi.Packs(excludePacks)))) diff --git a/internal/index/master_index_test.go b/internal/index/master_index_test.go index 3a7f3da88eb..41f4cc53410 100644 --- a/internal/index/master_index_test.go +++ b/internal/index/master_index_test.go @@ -364,7 +364,7 @@ func testIndexSave(t *testing.T, version uint) { blobs[pb] = struct{}{} })) - rtest.OK(t, idx.Save(context.TODO(), repo, nil, nil, restic.MasterIndexSaveOpts{})) + rtest.OK(t, idx.Save(context.TODO(), repo, nil, nil, index.MasterIndexSaveOpts{})) idx = index.NewMasterIndex() rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index 96b75ca4652..476e63b4721 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -7,10 +7,10 @@ import ( "time" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" "golang.org/x/sync/errgroup" ) @@ -173,35 +173,12 @@ func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs rest } } -func rebuildIndex(t *testing.T, repo restic.Repository) { - err := repo.SetIndex(index.NewMasterIndex()) - rtest.OK(t, err) - - packs := make(map[restic.ID]int64) - err = repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { - packs[id] = size - return nil - }) - rtest.OK(t, err) - - _, err = repo.(*repository.Repository).CreateIndexFromPacks(context.TODO(), packs, nil) - rtest.OK(t, err) +func rebuildAndReloadIndex(t *testing.T, repo *repository.Repository) { + rtest.OK(t, repository.RepairIndex(context.TODO(), repo, repository.RepairIndexOptions{ + ReadAllPacks: true, + }, &progress.NoopPrinter{})) - var obsoleteIndexes restic.IDs - err = repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error { - obsoleteIndexes = append(obsoleteIndexes, id) - return nil - }) - rtest.OK(t, err) - - err = repo.SaveIndex(context.TODO(), restic.NewIDSet(), obsoleteIndexes, restic.MasterIndexSaveOpts{}) - rtest.OK(t, err) -} - -func reloadIndex(t *testing.T, repo restic.Repository) { - if err := repo.LoadIndex(context.TODO(), nil); err != nil { - t.Fatalf("error loading new index: %v", err) - } + rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } func TestRepack(t *testing.T) { @@ -236,8 +213,7 @@ func testRepack(t *testing.T, version uint) { removePacks := findPacksForBlobs(t, repo, removeBlobs) repack(t, repo, removePacks, keepBlobs) - rebuildIndex(t, repo) - reloadIndex(t, repo) + rebuildAndReloadIndex(t, repo) packsAfter = listPacks(t, repo) for id := range removePacks { @@ -307,8 +283,7 @@ func testRepackCopy(t *testing.T, version uint) { if err != nil { t.Fatal(err) } - rebuildIndex(t, dstRepo) - reloadIndex(t, dstRepo) + rebuildAndReloadIndex(t, dstRepo) for h := range keepBlobs { list := dstRepo.LookupBlob(h.Type, h.ID) diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index c20641d508d..5674a396347 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -28,6 +28,8 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, if err != nil { return err } + repo.clearIndex() + } else { printer.P("loading indexes...\n") mi := index.NewMasterIndex() @@ -111,11 +113,11 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, return nil } -func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error { +func rebuildIndexFiles(ctx context.Context, repo *Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error { printer.P("rebuilding index\n") bar := printer.NewCounter("packs processed") - return repo.SaveIndex(ctx, removePacks, extraObsolete, restic.MasterIndexSaveOpts{ + return repo.idx.Save(ctx, repo, removePacks, extraObsolete, index.MasterIndexSaveOpts{ SaveProgress: bar, DeleteProgress: func() *progress.Counter { return printer.NewCounter("old indexes deleted") diff --git a/internal/repository/repair_pack.go b/internal/repository/repair_pack.go index cac7aac1020..7cb9d9f3ec3 100644 --- a/internal/repository/repair_pack.go +++ b/internal/repository/repair_pack.go @@ -10,7 +10,7 @@ import ( "golang.org/x/sync/errgroup" ) -func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, printer progress.Printer) error { +func RepairPacks(ctx context.Context, repo *Repository, ids restic.IDSet, printer progress.Printer) error { wg, wgCtx := errgroup.WithContext(ctx) repo.StartPackUploader(wgCtx, wg) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index bd7de0de4ba..7abc5911af5 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -587,10 +587,6 @@ func (r *Repository) LookupBlobSize(tpe restic.BlobType, id restic.ID) (uint, bo return r.idx.LookupSize(restic.BlobHandle{Type: tpe, ID: id}) } -func (r *Repository) SaveIndex(ctx context.Context, excludePacks restic.IDSet, extraObsolete restic.IDs, opts restic.MasterIndexSaveOpts) error { - return r.idx.Save(ctx, r, excludePacks, extraObsolete, opts) -} - // ListBlobs runs fn on all blobs known to the index. When the context is cancelled, // the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. func (r *Repository) ListBlobs(ctx context.Context, fn func(restic.PackedBlob)) error { diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 9e6d6b99bd8..b18b036a733 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -23,7 +23,6 @@ type Repository interface { LoadIndex(ctx context.Context, p *progress.Counter) error SetIndex(mi MasterIndex) error - SaveIndex(ctx context.Context, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error LookupBlob(t BlobType, id ID) []PackedBlob LookupBlobSize(t BlobType, id ID) (size uint, exists bool) @@ -106,13 +105,6 @@ type PackBlobs struct { Blobs []Blob } -type MasterIndexSaveOpts struct { - SaveProgress *progress.Counter - DeleteProgress func() *progress.Counter - DeleteReport func(id ID, err error) - SkipDeletion bool -} - // MasterIndex keeps track of the blobs are stored within files. type MasterIndex interface { Has(bh BlobHandle) bool @@ -122,8 +114,6 @@ type MasterIndex interface { // the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. Each(ctx context.Context, fn func(PackedBlob)) error ListPacks(ctx context.Context, packs IDSet) <-chan PackBlobs - - Save(ctx context.Context, repo SaverRemoverUnpacked, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error } // Lister allows listing files in a backend. From 04ad9f0c0c5f28d47d9c921a5219d820ef1af364 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 16:10:48 +0200 Subject: [PATCH 275/893] repository: remove Packer and SavePacker from public interface --- internal/repository/packer_manager.go | 18 +++++++++--------- internal/repository/packer_manager_test.go | 6 +++--- internal/repository/packer_uploader.go | 12 ++++++------ 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/internal/repository/packer_manager.go b/internal/repository/packer_manager.go index 22eca0c2ef7..c7599f5af3d 100644 --- a/internal/repository/packer_manager.go +++ b/internal/repository/packer_manager.go @@ -21,8 +21,8 @@ import ( "github.com/minio/sha256-simd" ) -// Packer holds a pack.Packer together with a hash writer. -type Packer struct { +// packer holds a pack.packer together with a hash writer. +type packer struct { *pack.Packer tmpfile *os.File bufWr *bufio.Writer @@ -32,16 +32,16 @@ type Packer struct { type packerManager struct { tpe restic.BlobType key *crypto.Key - queueFn func(ctx context.Context, t restic.BlobType, p *Packer) error + queueFn func(ctx context.Context, t restic.BlobType, p *packer) error pm sync.Mutex - packer *Packer + packer *packer packSize uint } // newPackerManager returns a new packer manager which writes temporary files // to a temporary directory -func newPackerManager(key *crypto.Key, tpe restic.BlobType, packSize uint, queueFn func(ctx context.Context, t restic.BlobType, p *Packer) error) *packerManager { +func newPackerManager(key *crypto.Key, tpe restic.BlobType, packSize uint, queueFn func(ctx context.Context, t restic.BlobType, p *packer) error) *packerManager { return &packerManager{ tpe: tpe, key: key, @@ -114,7 +114,7 @@ func (r *packerManager) SaveBlob(ctx context.Context, t restic.BlobType, id rest // findPacker returns a packer for a new blob of size bytes. Either a new one is // created or one is returned that already has some blobs. -func (r *packerManager) newPacker() (packer *Packer, err error) { +func (r *packerManager) newPacker() (pck *packer, err error) { debug.Log("create new pack") tmpfile, err := fs.TempFile("", "restic-temp-pack-") if err != nil { @@ -123,17 +123,17 @@ func (r *packerManager) newPacker() (packer *Packer, err error) { bufWr := bufio.NewWriter(tmpfile) p := pack.NewPacker(r.key, bufWr) - packer = &Packer{ + pck = &packer{ Packer: p, tmpfile: tmpfile, bufWr: bufWr, } - return packer, nil + return pck, nil } // savePacker stores p in the backend. -func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packer) error { +func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *packer) error { debug.Log("save packer for %v with %d blobs (%d bytes)\n", t, p.Packer.Count(), p.Packer.Size()) err := p.Packer.Finalize() if err != nil { diff --git a/internal/repository/packer_manager_test.go b/internal/repository/packer_manager_test.go index 8984073dae0..0f3aea05f4b 100644 --- a/internal/repository/packer_manager_test.go +++ b/internal/repository/packer_manager_test.go @@ -70,7 +70,7 @@ func testPackerManager(t testing.TB) int64 { rnd := rand.New(rand.NewSource(randomSeed)) savedBytes := int(0) - pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, DefaultPackSize, func(ctx context.Context, tp restic.BlobType, p *Packer) error { + pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, DefaultPackSize, func(ctx context.Context, tp restic.BlobType, p *packer) error { err := p.Finalize() if err != nil { return err @@ -92,7 +92,7 @@ func testPackerManager(t testing.TB) int64 { func TestPackerManagerWithOversizeBlob(t *testing.T) { packFiles := int(0) sizeLimit := uint(512 * 1024) - pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, sizeLimit, func(ctx context.Context, tp restic.BlobType, p *Packer) error { + pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, sizeLimit, func(ctx context.Context, tp restic.BlobType, p *packer) error { packFiles++ return nil }) @@ -122,7 +122,7 @@ func BenchmarkPackerManager(t *testing.B) { for i := 0; i < t.N; i++ { rnd.Seed(randomSeed) - pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, DefaultPackSize, func(ctx context.Context, t restic.BlobType, p *Packer) error { + pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, DefaultPackSize, func(ctx context.Context, t restic.BlobType, p *packer) error { return nil }) fillPacks(t, rnd, pm, blobBuf) diff --git a/internal/repository/packer_uploader.go b/internal/repository/packer_uploader.go index 30c8f77afc6..936e7ea1d89 100644 --- a/internal/repository/packer_uploader.go +++ b/internal/repository/packer_uploader.go @@ -7,13 +7,13 @@ import ( "golang.org/x/sync/errgroup" ) -// SavePacker implements saving a pack in the repository. -type SavePacker interface { - savePacker(ctx context.Context, t restic.BlobType, p *Packer) error +// savePacker implements saving a pack in the repository. +type savePacker interface { + savePacker(ctx context.Context, t restic.BlobType, p *packer) error } type uploadTask struct { - packer *Packer + packer *packer tpe restic.BlobType } @@ -21,7 +21,7 @@ type packerUploader struct { uploadQueue chan uploadTask } -func newPackerUploader(ctx context.Context, wg *errgroup.Group, repo SavePacker, connections uint) *packerUploader { +func newPackerUploader(ctx context.Context, wg *errgroup.Group, repo savePacker, connections uint) *packerUploader { pu := &packerUploader{ uploadQueue: make(chan uploadTask), } @@ -48,7 +48,7 @@ func newPackerUploader(ctx context.Context, wg *errgroup.Group, repo SavePacker, return pu } -func (pu *packerUploader) QueuePacker(ctx context.Context, t restic.BlobType, p *Packer) (err error) { +func (pu *packerUploader) QueuePacker(ctx context.Context, t restic.BlobType, p *packer) (err error) { select { case <-ctx.Done(): return ctx.Err() From 76e6719f2e8b6366eef3a5cba876f1dfb789a0bb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 16:14:31 +0200 Subject: [PATCH 276/893] repository: make CreateIndexFromPacks method private --- internal/repository/repair_index.go | 2 +- internal/repository/repository.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index 5674a396347..4ac6cdd3a54 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -92,7 +92,7 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, printer.P("reading pack files\n") bar := printer.NewCounter("packs") bar.SetMax(uint64(len(packSizeFromList))) - invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar) + invalidFiles, err := repo.createIndexFromPacks(ctx, packSizeFromList, bar) bar.Done() if err != nil { return err diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 7abc5911af5..d9dfc45566c 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -656,10 +656,10 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { return r.prepareCache() } -// CreateIndexFromPacks creates a new index by reading all given pack files (with sizes). +// createIndexFromPacks creates a new index by reading all given pack files (with sizes). // The index is added to the MasterIndex but not marked as finalized. // Returned is the list of pack files which could not be read. -func (r *Repository) CreateIndexFromPacks(ctx context.Context, packsize map[restic.ID]int64, p *progress.Counter) (invalid restic.IDs, err error) { +func (r *Repository) createIndexFromPacks(ctx context.Context, packsize map[restic.ID]int64, p *progress.Counter) (invalid restic.IDs, err error) { var m sync.Mutex debug.Log("Loading index from pack files") From 9aa0c90fb2d5eae91b74b58c822da6b030471182 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 16:40:40 +0200 Subject: [PATCH 277/893] index: remove supersedes field Using the field with its current semantics is nearly impossible to get right. Remove it as it will be replaced anyways in repository format 3. --- internal/index/index.go | 38 +++++++--------------------------- internal/index/index_test.go | 6 ------ internal/index/master_index.go | 13 +----------- 3 files changed, 8 insertions(+), 49 deletions(-) diff --git a/internal/index/index.go b/internal/index/index.go index 1c20fe38d60..e09b683f160 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -50,10 +50,9 @@ type Index struct { byType [restic.NumBlobTypes]indexMap packs restic.IDs - final bool // set to true for all indexes read from the backend ("finalized") - ids restic.IDs // set to the IDs of the contained finalized indexes - supersedes restic.IDs - created time.Time + final bool // set to true for all indexes read from the backend ("finalized") + ids restic.IDs // set to the IDs of the contained finalized indexes + created time.Time } // NewIndex returns a new index. @@ -197,25 +196,6 @@ func (idx *Index) LookupSize(bh restic.BlobHandle) (plaintextLength uint, found return uint(crypto.PlaintextLength(int(e.length))), true } -// Supersedes returns the list of indexes this index supersedes, if any. -func (idx *Index) Supersedes() restic.IDs { - return idx.supersedes -} - -// AddToSupersedes adds the ids to the list of indexes superseded by this -// index. If the index has already been finalized, an error is returned. -func (idx *Index) AddToSupersedes(ids ...restic.ID) error { - idx.m.Lock() - defer idx.m.Unlock() - - if idx.final { - return errors.New("index already finalized") - } - - idx.supersedes = append(idx.supersedes, ids...) - return nil -} - // Each passes all blobs known to the index to the callback fn. This blocks any // modification of the index. func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) error { @@ -356,8 +336,8 @@ func (idx *Index) generatePackList() ([]packJSON, error) { } type jsonIndex struct { - Supersedes restic.IDs `json:"supersedes,omitempty"` - Packs []packJSON `json:"packs"` + // removed: Supersedes restic.IDs `json:"supersedes,omitempty"` + Packs []packJSON `json:"packs"` } // Encode writes the JSON serialization of the index to the writer w. @@ -373,8 +353,7 @@ func (idx *Index) Encode(w io.Writer) error { enc := json.NewEncoder(w) idxJSON := jsonIndex{ - Supersedes: idx.supersedes, - Packs: list, + Packs: list, } return enc.Encode(idxJSON) } @@ -433,8 +412,7 @@ func (idx *Index) Dump(w io.Writer) error { } outer := jsonIndex{ - Supersedes: idx.Supersedes(), - Packs: list, + Packs: list, } buf, err := json.MarshalIndent(outer, "", " ") @@ -495,7 +473,6 @@ func (idx *Index) merge(idx2 *Index) error { } idx.ids = append(idx.ids, idx2.ids...) - idx.supersedes = append(idx.supersedes, idx2.supersedes...) return nil } @@ -545,7 +522,6 @@ func DecodeIndex(buf []byte, id restic.ID) (idx *Index, oldFormat bool, err erro }) } } - idx.supersedes = idxJSON.Supersedes idx.ids = append(idx.ids, id) idx.final = true diff --git a/internal/index/index_test.go b/internal/index/index_test.go index bafd95c4838..66cec23f6d2 100644 --- a/internal/index/index_test.go +++ b/internal/index/index_test.go @@ -309,8 +309,6 @@ func TestIndexUnserialize(t *testing.T) { {docExampleV1, 1}, {docExampleV2, 2}, } { - oldIdx := restic.IDs{restic.TestParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")} - idx, oldFormat, err := index.DecodeIndex(task.idxBytes, restic.NewRandomID()) rtest.OK(t, err) rtest.Assert(t, !oldFormat, "new index format recognized as old format") @@ -337,8 +335,6 @@ func TestIndexUnserialize(t *testing.T) { } } - rtest.Equals(t, oldIdx, idx.Supersedes()) - blobs := listPack(t, idx, exampleLookupTest.packID) if len(blobs) != len(exampleLookupTest.blobs) { t.Fatalf("expected %d blobs in pack, got %d", len(exampleLookupTest.blobs), len(blobs)) @@ -446,8 +442,6 @@ func TestIndexUnserializeOld(t *testing.T) { rtest.Equals(t, test.offset, blob.Offset) rtest.Equals(t, test.length, blob.Length) } - - rtest.Equals(t, 0, len(idx.Supersedes())) } func TestIndexPacks(t *testing.T) { diff --git a/internal/index/master_index.go b/internal/index/master_index.go index 981a2b31b19..17e681411c6 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -332,7 +332,7 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverRemoverUnpacke debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(mi.idx), excludePacks) newIndex := NewIndex() - obsolete := restic.NewIDSet() + obsolete := restic.NewIDSet(extraObsolete...) // track spawned goroutines using wg, create a new context which is // cancelled as soon as an error occurs. @@ -351,11 +351,6 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverRemoverUnpacke } debug.Log("adding index ids %v to supersedes field", ids) - - err = newIndex.AddToSupersedes(ids...) - if err != nil { - return err - } obsolete.Merge(restic.NewIDSet(ids...)) } else { debug.Log("index %d isn't final, don't add to supersedes field", i) @@ -380,12 +375,6 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverRemoverUnpacke } } - err := newIndex.AddToSupersedes(extraObsolete...) - if err != nil { - return err - } - obsolete.Merge(restic.NewIDSet(extraObsolete...)) - select { case ch <- newIndex: case <-wgCtx.Done(): From 68fa0e0305421a5cd33779398a345d7c9b15178f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 19:10:08 +0200 Subject: [PATCH 278/893] prune: no longer disable automatic index updates this allows prune to resume an interrupted prune run. --- cmd/restic/cmd_prune.go | 3 --- internal/repository/packer_manager.go | 3 --- internal/repository/repository.go | 12 ------------ 3 files changed, 18 deletions(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index d62a7df7569..7872589be0d 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -162,9 +162,6 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term } func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet, term *termstatus.Terminal) error { - // we do not need index updates while pruning! - repo.DisableAutoIndexUpdate() - if repo.Cache == nil { Print("warning: running prune without a cache, this may be very slow!\n") } diff --git a/internal/repository/packer_manager.go b/internal/repository/packer_manager.go index c7599f5af3d..76734fb87ef 100644 --- a/internal/repository/packer_manager.go +++ b/internal/repository/packer_manager.go @@ -200,8 +200,5 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *packe r.idx.StorePack(id, p.Packer.Blobs()) // Save index if full - if r.noAutoIndexUpdate { - return nil - } return r.idx.SaveFullIndex(ctx, r) } diff --git a/internal/repository/repository.go b/internal/repository/repository.go index d9dfc45566c..f0ef93ecf89 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -42,8 +42,6 @@ type Repository struct { opts Options - noAutoIndexUpdate bool - packerWg *errgroup.Group uploader *packerUploader treePM *packerManager @@ -130,12 +128,6 @@ func New(be backend.Backend, opts Options) (*Repository, error) { return repo, nil } -// DisableAutoIndexUpdate deactives the automatic finalization and upload of new -// indexes once these are full -func (r *Repository) DisableAutoIndexUpdate() { - r.noAutoIndexUpdate = true -} - // setConfig assigns the given config and updates the repository parameters accordingly func (r *Repository) setConfig(cfg restic.Config) { r.cfg = cfg @@ -526,10 +518,6 @@ func (r *Repository) Flush(ctx context.Context) error { return err } - // Save index after flushing only if noAutoIndexUpdate is not set - if r.noAutoIndexUpdate { - return nil - } return r.idx.SaveIndex(ctx, r) } From 72482ce5bd173ed5a8a4e9dda393925e24c3196b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 20:36:16 +0200 Subject: [PATCH 279/893] index: misc cleanups --- internal/index/index_parallel.go | 2 +- internal/index/master_index.go | 13 ++++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/internal/index/index_parallel.go b/internal/index/index_parallel.go index d51d5930fba..3d5621a2ddc 100644 --- a/internal/index/index_parallel.go +++ b/internal/index/index_parallel.go @@ -11,7 +11,7 @@ import ( // ForAllIndexes loads all index files in parallel and calls the given callback. // It is guaranteed that the function is not run concurrently. If the callback // returns an error, this function is cancelled and also returns that error. -func ForAllIndexes(ctx context.Context, lister restic.Lister, repo restic.ListerLoaderUnpacked, +func ForAllIndexes(ctx context.Context, lister restic.Lister, repo restic.LoaderUnpacked, fn func(id restic.ID, index *Index, oldFormat bool, err error) error) error { // decoding an index can take quite some time such that this can be both CPU- or IO-bound diff --git a/internal/index/master_index.go b/internal/index/master_index.go index 17e681411c6..a5ee40b52c3 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -23,12 +23,15 @@ type MasterIndex struct { // NewMasterIndex creates a new master index. func NewMasterIndex() *MasterIndex { + mi := &MasterIndex{pendingBlobs: restic.NewBlobSet()} + mi.clear() + return mi +} + +func (mi *MasterIndex) clear() { // Always add an empty final index, such that MergeFinalIndexes can merge into this. - // Note that removing this index could lead to a race condition in the rare - // situation that only two indexes exist which are saved and merged concurrently. - idx := []*Index{NewIndex()} - idx[0].Finalize() - return &MasterIndex{idx: idx, pendingBlobs: restic.NewBlobSet()} + mi.idx = []*Index{NewIndex()} + mi.idx[0].Finalize() } func (mi *MasterIndex) MarkCompressed() { From ad98fbf7dd847e4a5888475f1ef6456ffb491dc3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 20:37:19 +0200 Subject: [PATCH 280/893] restic: add IDSet.Clone() method --- internal/restic/idset.go | 6 ++++++ internal/restic/idset_test.go | 3 +++ 2 files changed, 9 insertions(+) diff --git a/internal/restic/idset.go b/internal/restic/idset.go index 1b12a6398fc..9e6e3c6fd85 100644 --- a/internal/restic/idset.go +++ b/internal/restic/idset.go @@ -105,3 +105,9 @@ func (s IDSet) String() string { str := s.List().String() return "{" + str[1:len(str)-1] + "}" } + +func (s IDSet) Clone() IDSet { + c := NewIDSet() + c.Merge(s) + return c +} diff --git a/internal/restic/idset_test.go b/internal/restic/idset_test.go index 734b3123775..14c88b3143e 100644 --- a/internal/restic/idset_test.go +++ b/internal/restic/idset_test.go @@ -35,4 +35,7 @@ func TestIDSet(t *testing.T) { } rtest.Equals(t, "{1285b303 7bb086db f658198b}", set.String()) + + copied := set.Clone() + rtest.Equals(t, "{1285b303 7bb086db f658198b}", copied.String()) } From 5f7b48e65f302c2ca4965a4f815ef4bba0d2e842 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 20:38:58 +0200 Subject: [PATCH 281/893] index: replace Save() method with Rewrite and SaveFallback Rewrite implements a streaming rewrite of the index that excludes the given packs. For this it loads all index files from the repository and only modifies those that require changes. This will reduce the index churn when running prune. Rewrite does not require the in-memory index and thus can drop it to significantly reduce the memory usage. However, `prune --unsafe-recovery` cannot use this strategy and requires a separate method to save the whole in-memory index. This is now handled using SaveFallback. --- internal/index/master_index.go | 225 +++++++++++++++++++---- internal/index/master_index_test.go | 2 +- internal/repository/prune.go | 12 +- internal/repository/repair_index.go | 15 +- internal/repository/repair_index_test.go | 4 - internal/repository/repair_pack.go | 2 +- 6 files changed, 202 insertions(+), 58 deletions(-) diff --git a/internal/index/master_index.go b/internal/index/master_index.go index a5ee40b52c3..40d1e344665 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -315,58 +315,127 @@ func (mi *MasterIndex) Load(ctx context.Context, r restic.ListerLoaderUnpacked, return mi.MergeFinalIndexes() } -type MasterIndexSaveOpts struct { +type MasterIndexRewriteOpts struct { SaveProgress *progress.Counter DeleteProgress func() *progress.Counter DeleteReport func(id restic.ID, err error) - SkipDeletion bool } -// Save saves all known indexes to index files, leaving out any -// packs whose ID is contained in packBlacklist from finalized indexes. -// It also removes the old index files and those listed in extraObsolete. -func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverRemoverUnpacked, excludePacks restic.IDSet, extraObsolete restic.IDs, opts MasterIndexSaveOpts) error { - p := opts.SaveProgress - p.SetMax(uint64(len(mi.Packs(excludePacks)))) +// Rewrite removes packs whose ID is in excludePacks from all known indexes. +// It also removes the rewritten index files and those listed in extraObsolete. +// If oldIndexes is not nil, then only the indexes in this set are processed. +// This is used by repair index to only rewrite and delete the old indexes. +// +// Must not be called concurrently to any other MasterIndex operation. +func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, excludePacks restic.IDSet, oldIndexes restic.IDSet, extraObsolete restic.IDs, opts MasterIndexRewriteOpts) error { + for _, idx := range mi.idx { + if !idx.Final() { + panic("internal error - index must be saved before calling MasterIndex.Rewrite") + } + } - mi.idxMutex.Lock() - defer mi.idxMutex.Unlock() + var indexes restic.IDSet + if oldIndexes != nil { + // repair index adds new index entries for already existing pack files + // only remove the old (possibly broken) entries by only processing old indexes + indexes = oldIndexes + } else { + indexes = mi.IDs() + } - debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(mi.idx), excludePacks) + p := opts.SaveProgress + p.SetMax(uint64(len(indexes))) - newIndex := NewIndex() - obsolete := restic.NewIDSet(extraObsolete...) + // reset state which is not necessary for Rewrite and just consumes a lot of memory + // the index state would be invalid after Rewrite completes anyways + mi.clear() + runtime.GC() - // track spawned goroutines using wg, create a new context which is - // cancelled as soon as an error occurs. + // copy excludePacks to prevent unintended sideeffects + excludePacks = excludePacks.Clone() + debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(indexes), excludePacks) wg, wgCtx := errgroup.WithContext(ctx) - ch := make(chan *Index) + idxCh := make(chan restic.ID) + wg.Go(func() error { + defer close(idxCh) + for id := range indexes { + select { + case idxCh <- id: + case <-wgCtx.Done(): + return wgCtx.Err() + } + } + return nil + }) + + var rewriteWg sync.WaitGroup + type rewriteTask struct { + idx *Index + oldFormat bool + } + rewriteCh := make(chan rewriteTask) + loader := func() error { + defer rewriteWg.Done() + for id := range idxCh { + buf, err := repo.LoadUnpacked(wgCtx, restic.IndexFile, id) + if err != nil { + return fmt.Errorf("LoadUnpacked(%v): %w", id.Str(), err) + } + idx, oldFormat, err := DecodeIndex(buf, id) + if err != nil { + return err + } + select { + case rewriteCh <- rewriteTask{idx, oldFormat}: + case <-wgCtx.Done(): + return wgCtx.Err() + } + + } + return nil + } + // loading an index can take quite some time such that this can be both CPU- or IO-bound + loaderCount := int(repo.Connections()) + runtime.GOMAXPROCS(0) + // run workers on ch + for i := 0; i < loaderCount; i++ { + rewriteWg.Add(1) + wg.Go(loader) + } wg.Go(func() error { - defer close(ch) - for i, idx := range mi.idx { - if idx.Final() { - ids, err := idx.IDs() - if err != nil { - debug.Log("index %d does not have an ID: %v", err) - return err - } + rewriteWg.Wait() + close(rewriteCh) + return nil + }) - debug.Log("adding index ids %v to supersedes field", ids) - obsolete.Merge(restic.NewIDSet(ids...)) - } else { - debug.Log("index %d isn't final, don't add to supersedes field", i) + obsolete := restic.NewIDSet(extraObsolete...) + saveCh := make(chan *Index) + + wg.Go(func() error { + defer close(saveCh) + newIndex := NewIndex() + for task := range rewriteCh { + // always rewrite indexes using the old format, that include a pack that must be removed or that are not full + if !task.oldFormat && len(task.idx.Packs().Intersect(excludePacks)) == 0 && IndexFull(task.idx, mi.compress) { + // make sure that each pack is only stored exactly once in the index + excludePacks.Merge(task.idx.Packs()) + // index is already up to date + p.Add(1) + continue } - debug.Log("adding index %d", i) + ids, err := task.idx.IDs() + if err != nil || len(ids) != 1 { + panic("internal error, index has no ID") + } + obsolete.Merge(restic.NewIDSet(ids...)) - for pbs := range idx.EachByPack(wgCtx, excludePacks) { + for pbs := range task.idx.EachByPack(wgCtx, excludePacks) { newIndex.StorePack(pbs.PackID, pbs.Blobs) - p.Add(1) if IndexFull(newIndex, mi.compress) { select { - case ch <- newIndex: + case saveCh <- newIndex: case <-wgCtx.Done(): return wgCtx.Err() } @@ -376,10 +445,13 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverRemoverUnpacke if wgCtx.Err() != nil { return wgCtx.Err() } + // make sure that each pack is only stored exactly once in the index + excludePacks.Merge(task.idx.Packs()) + p.Add(1) } select { - case ch <- newIndex: + case saveCh <- newIndex: case <-wgCtx.Done(): } return nil @@ -387,7 +459,7 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverRemoverUnpacke // a worker receives an index from ch, and saves the index worker := func() error { - for idx := range ch { + for idx := range saveCh { idx.Finalize() if _, err := SaveIndex(wgCtx, repo, idx); err != nil { return err @@ -405,11 +477,7 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverRemoverUnpacke err := wg.Wait() p.Done() if err != nil { - return err - } - - if opts.SkipDeletion { - return nil + return fmt.Errorf("failed to rewrite indexes: %w", err) } p = nil @@ -425,6 +493,85 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverRemoverUnpacke }, p) } +// SaveFallback saves all known indexes to index files, leaving out any +// packs whose ID is contained in packBlacklist from finalized indexes. +// It is only intended for use by prune with the UnsafeRecovery option. +// +// Must not be called concurrently to any other MasterIndex operation. +func (mi *MasterIndex) SaveFallback(ctx context.Context, repo restic.SaverRemoverUnpacked, excludePacks restic.IDSet, p *progress.Counter) error { + p.SetMax(uint64(len(mi.Packs(excludePacks)))) + + mi.idxMutex.Lock() + defer mi.idxMutex.Unlock() + + debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(mi.idx), excludePacks) + + obsolete := restic.NewIDSet() + wg, wgCtx := errgroup.WithContext(ctx) + + ch := make(chan *Index) + wg.Go(func() error { + defer close(ch) + newIndex := NewIndex() + for _, idx := range mi.idx { + if idx.Final() { + ids, err := idx.IDs() + if err != nil { + panic("internal error - finalized index without ID") + } + debug.Log("adding index ids %v to supersedes field", ids) + obsolete.Merge(restic.NewIDSet(ids...)) + } + + for pbs := range idx.EachByPack(wgCtx, excludePacks) { + newIndex.StorePack(pbs.PackID, pbs.Blobs) + p.Add(1) + if IndexFull(newIndex, mi.compress) { + select { + case ch <- newIndex: + case <-wgCtx.Done(): + return wgCtx.Err() + } + newIndex = NewIndex() + } + } + if wgCtx.Err() != nil { + return wgCtx.Err() + } + } + + select { + case ch <- newIndex: + case <-wgCtx.Done(): + } + return nil + }) + + // a worker receives an index from ch, and saves the index + worker := func() error { + for idx := range ch { + idx.Finalize() + if _, err := SaveIndex(wgCtx, repo, idx); err != nil { + return err + } + } + return nil + } + + // keep concurrency bounded as we're on a fallback path + workerCount := int(repo.Connections()) + // run workers on ch + for i := 0; i < workerCount; i++ { + wg.Go(worker) + } + err := wg.Wait() + p.Done() + // the index no longer matches to stored state + mi.clear() + + return err +} + // SaveIndex saves an index in the repository. func SaveIndex(ctx context.Context, repo restic.SaverUnpacked, index *Index) (restic.ID, error) { buf := bytes.NewBuffer(nil) diff --git a/internal/index/master_index_test.go b/internal/index/master_index_test.go index 41f4cc53410..b8a29262e14 100644 --- a/internal/index/master_index_test.go +++ b/internal/index/master_index_test.go @@ -364,7 +364,7 @@ func testIndexSave(t *testing.T, version uint) { blobs[pb] = struct{}{} })) - rtest.OK(t, idx.Save(context.TODO(), repo, nil, nil, index.MasterIndexSaveOpts{})) + rtest.OK(t, idx.Rewrite(context.TODO(), repo, nil, nil, nil, index.MasterIndexRewriteOpts{})) idx = index.NewMasterIndex() rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 712986e6160..895b07994c7 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -522,7 +522,7 @@ func (plan *PrunePlan) Stats() PruneStats { // - rebuild the index while ignoring all files that will be deleted // - delete the files // plan.removePacks and plan.ignorePacks are modified in this function. -func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (err error) { +func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) error { if plan.opts.DryRun { printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") if len(plan.removePacksFirst) > 0 { @@ -581,12 +581,12 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e if plan.opts.UnsafeRecovery { printer.P("deleting index files\n") indexFiles := repo.idx.IDs() - err = deleteFiles(ctx, false, repo, indexFiles, restic.IndexFile, printer) + err := deleteFiles(ctx, false, repo, indexFiles, restic.IndexFile, printer) if err != nil { return errors.Fatalf("%s", err) } } else if len(plan.ignorePacks) != 0 { - err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, false, printer) + err := rewriteIndexFiles(ctx, repo, plan.ignorePacks, nil, nil, printer) if err != nil { return errors.Fatalf("%s", err) } @@ -601,16 +601,12 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e } if plan.opts.UnsafeRecovery { - err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) + err := repo.idx.SaveFallback(ctx, repo, plan.ignorePacks, printer.NewCounter("packs processed")) if err != nil { return errors.Fatalf("%s", err) } } - if err != nil { - return err - } - // drop outdated in-memory index repo.clearIndex() diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index 4ac6cdd3a54..e011319232d 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -62,6 +62,8 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, } } + oldIndexes := repo.idx.IDs() + printer.P("getting pack files to read...\n") err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { size, ok := packSizeFromIndex[id] @@ -103,7 +105,11 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, } } - err = rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer) + if err := repo.Flush(ctx); err != nil { + return err + } + + err = rewriteIndexFiles(ctx, repo, removePacks, oldIndexes, obsoleteIndexes, printer) if err != nil { return err } @@ -113,11 +119,11 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, return nil } -func rebuildIndexFiles(ctx context.Context, repo *Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error { +func rewriteIndexFiles(ctx context.Context, repo *Repository, removePacks restic.IDSet, oldIndexes restic.IDSet, extraObsolete restic.IDs, printer progress.Printer) error { printer.P("rebuilding index\n") - bar := printer.NewCounter("packs processed") - return repo.idx.Save(ctx, repo, removePacks, extraObsolete, index.MasterIndexSaveOpts{ + bar := printer.NewCounter("indexes processed") + return repo.idx.Rewrite(ctx, repo, removePacks, oldIndexes, extraObsolete, index.MasterIndexRewriteOpts{ SaveProgress: bar, DeleteProgress: func() *progress.Counter { return printer.NewCounter("old indexes deleted") @@ -129,6 +135,5 @@ func rebuildIndexFiles(ctx context.Context, repo *Repository, removePacks restic printer.VV("removed index %v\n", id.String()) } }, - SkipDeletion: skipDeletion, }) } diff --git a/internal/repository/repair_index_test.go b/internal/repository/repair_index_test.go index 79922e9ec3a..ac47d59ff7f 100644 --- a/internal/repository/repair_index_test.go +++ b/internal/repository/repair_index_test.go @@ -30,10 +30,6 @@ func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, ReadAllPacks: readAllPacks, }, &progress.NoopPrinter{})) - newIndexes := listIndex(t, repo) - old := indexes.Intersect(newIndexes) - rtest.Assert(t, len(old) == 0, "expected old indexes to be removed, found %v", old) - checker.TestCheckRepo(t, repo, true) } diff --git a/internal/repository/repair_pack.go b/internal/repository/repair_pack.go index 7cb9d9f3ec3..811388cc9ad 100644 --- a/internal/repository/repair_pack.go +++ b/internal/repository/repair_pack.go @@ -56,7 +56,7 @@ func RepairPacks(ctx context.Context, repo *Repository, ids restic.IDSet, printe } // remove salvaged packs from index - err = rebuildIndexFiles(ctx, repo, ids, nil, false, printer) + err = rewriteIndexFiles(ctx, repo, ids, nil, nil, printer) if err != nil { return err } From 2ca1c3772152829908e18da8be9ecc3b55128cd5 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 21:08:19 +0200 Subject: [PATCH 282/893] index: additional tests for new index save methods --- internal/index/master_index_test.go | 77 ++++++++++++++++++++++++++++- 1 file changed, 76 insertions(+), 1 deletion(-) diff --git a/internal/index/master_index_test.go b/internal/index/master_index_test.go index b8a29262e14..c42484c5516 100644 --- a/internal/index/master_index_test.go +++ b/internal/index/master_index_test.go @@ -355,8 +355,60 @@ func TestIndexSave(t *testing.T) { } func testIndexSave(t *testing.T, version uint) { + for _, test := range []struct { + name string + saver func(idx *index.MasterIndex, repo restic.Repository) error + }{ + {"rewrite no-op", func(idx *index.MasterIndex, repo restic.Repository) error { + return idx.Rewrite(context.TODO(), repo, nil, nil, nil, index.MasterIndexRewriteOpts{}) + }}, + {"rewrite skip-all", func(idx *index.MasterIndex, repo restic.Repository) error { + return idx.Rewrite(context.TODO(), repo, nil, restic.NewIDSet(), nil, index.MasterIndexRewriteOpts{}) + }}, + {"SaveFallback", func(idx *index.MasterIndex, repo restic.Repository) error { + err := restic.ParallelRemove(context.TODO(), repo, idx.IDs(), restic.IndexFile, nil, nil) + if err != nil { + return nil + } + return idx.SaveFallback(context.TODO(), repo, restic.NewIDSet(), nil) + }}, + } { + t.Run(test.name, func(t *testing.T) { + repo := createFilledRepo(t, 3, version) + + idx := index.NewMasterIndex() + rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) + blobs := make(map[restic.PackedBlob]struct{}) + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { + blobs[pb] = struct{}{} + })) + + rtest.OK(t, test.saver(idx, repo)) + idx = index.NewMasterIndex() + rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) + + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { + if _, ok := blobs[pb]; ok { + delete(blobs, pb) + } else { + t.Fatalf("unexpected blobs %v", pb) + } + })) + rtest.Equals(t, 0, len(blobs), "saved index is missing blobs") + + checker.TestCheckRepo(t, repo, false) + }) + } +} + +func TestIndexSavePartial(t *testing.T) { + repository.TestAllVersions(t, testIndexSavePartial) +} + +func testIndexSavePartial(t *testing.T, version uint) { repo := createFilledRepo(t, 3, version) + // capture blob list before adding fourth snapshot idx := index.NewMasterIndex() rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) blobs := make(map[restic.PackedBlob]struct{}) @@ -364,10 +416,21 @@ func testIndexSave(t *testing.T, version uint) { blobs[pb] = struct{}{} })) - rtest.OK(t, idx.Rewrite(context.TODO(), repo, nil, nil, nil, index.MasterIndexRewriteOpts{})) + // add+remove new snapshot and track its pack files + packsBefore := listPacks(t, repo) + sn := restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(4)*time.Second), depth) + rtest.OK(t, repo.RemoveUnpacked(context.TODO(), restic.SnapshotFile, *sn.ID())) + packsAfter := listPacks(t, repo) + newPacks := packsAfter.Sub(packsBefore) + + // rewrite index and remove pack files of new snapshot idx = index.NewMasterIndex() rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) + rtest.OK(t, idx.Rewrite(context.TODO(), repo, newPacks, nil, nil, index.MasterIndexRewriteOpts{})) + // check blobs + idx = index.NewMasterIndex() + rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { if _, ok := blobs[pb]; ok { delete(blobs, pb) @@ -377,5 +440,17 @@ func testIndexSave(t *testing.T, version uint) { })) rtest.Equals(t, 0, len(blobs), "saved index is missing blobs") + // remove pack files to make check happy + rtest.OK(t, restic.ParallelRemove(context.TODO(), repo, newPacks, restic.PackFile, nil, nil)) + checker.TestCheckRepo(t, repo, false) } + +func listPacks(t testing.TB, repo restic.Lister) restic.IDSet { + s := restic.NewIDSet() + rtest.OK(t, repo.List(context.TODO(), restic.PackFile, func(id restic.ID, _ int64) error { + s.Insert(id) + return nil + })) + return s +} From 57d69aa640799a82fa3f3cb01699e923e58ebc48 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 21:10:25 +0200 Subject: [PATCH 283/893] index: cleanup SaveIndex method --- internal/index/index.go | 19 +++++++++++++++++++ internal/index/master_index.go | 25 +++---------------------- internal/repository/repository_test.go | 2 +- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/internal/index/index.go b/internal/index/index.go index e09b683f160..12ef6b18a65 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -1,6 +1,7 @@ package index import ( + "bytes" "context" "encoding/json" "fmt" @@ -358,6 +359,24 @@ func (idx *Index) Encode(w io.Writer) error { return enc.Encode(idxJSON) } +// SaveIndex saves an index in the repository. +func (idx *Index) SaveIndex(ctx context.Context, repo restic.SaverUnpacked) (restic.ID, error) { + buf := bytes.NewBuffer(nil) + + err := idx.Encode(buf) + if err != nil { + return restic.ID{}, err + } + + id, err := repo.SaveUnpacked(ctx, restic.IndexFile, buf.Bytes()) + ierr := idx.SetID(id) + if ierr != nil { + // logic bug + panic(ierr) + } + return id, err +} + // Finalize sets the index to final. func (idx *Index) Finalize() { debug.Log("finalizing index") diff --git a/internal/index/master_index.go b/internal/index/master_index.go index 40d1e344665..8e959bed3d3 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -1,7 +1,6 @@ package index import ( - "bytes" "context" "fmt" "runtime" @@ -461,7 +460,7 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud worker := func() error { for idx := range saveCh { idx.Finalize() - if _, err := SaveIndex(wgCtx, repo, idx); err != nil { + if _, err := idx.SaveIndex(wgCtx, repo); err != nil { return err } } @@ -551,7 +550,7 @@ func (mi *MasterIndex) SaveFallback(ctx context.Context, repo restic.SaverRemove worker := func() error { for idx := range ch { idx.Finalize() - if _, err := SaveIndex(wgCtx, repo, idx); err != nil { + if _, err := idx.SaveIndex(wgCtx, repo); err != nil { return err } } @@ -572,30 +571,12 @@ func (mi *MasterIndex) SaveFallback(ctx context.Context, repo restic.SaverRemove return err } -// SaveIndex saves an index in the repository. -func SaveIndex(ctx context.Context, repo restic.SaverUnpacked, index *Index) (restic.ID, error) { - buf := bytes.NewBuffer(nil) - - err := index.Encode(buf) - if err != nil { - return restic.ID{}, err - } - - id, err := repo.SaveUnpacked(ctx, restic.IndexFile, buf.Bytes()) - ierr := index.SetID(id) - if ierr != nil { - // logic bug - panic(ierr) - } - return id, err -} - // saveIndex saves all indexes in the backend. func (mi *MasterIndex) saveIndex(ctx context.Context, r restic.SaverUnpacked, indexes ...*Index) error { for i, idx := range indexes { debug.Log("Saving index %d", i) - sid, err := SaveIndex(ctx, r, idx) + sid, err := idx.SaveIndex(ctx, r) if err != nil { return err } diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index bc950d0b0d9..05b790e33c3 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -336,7 +336,7 @@ func benchmarkLoadIndex(b *testing.B, version uint) { } idx.Finalize() - id, err := index.SaveIndex(context.TODO(), repo, idx) + id, err := idx.SaveIndex(context.TODO(), repo) rtest.OK(b, err) b.Logf("index saved as %v", id.Str()) From e52033a8bd1aa0bfb23ccde5805a00e64ea4cfa4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 22:14:28 +0200 Subject: [PATCH 284/893] index: slightly reduce Rewrite concurrency The index operations are likely CPU-bounded. Thus, reduce the concurrency accordingly. --- internal/index/master_index.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/internal/index/master_index.go b/internal/index/master_index.go index 8e959bed3d3..f9fc4505b76 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -395,8 +395,9 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud } return nil } - // loading an index can take quite some time such that this can be both CPU- or IO-bound - loaderCount := int(repo.Connections()) + runtime.GOMAXPROCS(0) + // loading an index can take quite some time such that this is probably CPU-bound + // the index files are probably already cached at this point + loaderCount := runtime.GOMAXPROCS(0) // run workers on ch for i := 0; i < loaderCount; i++ { rewriteWg.Add(1) @@ -467,8 +468,9 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud return nil } - // encoding an index can take quite some time such that this can be both CPU- or IO-bound - workerCount := int(repo.Connections()) + runtime.GOMAXPROCS(0) + // encoding an index can take quite some time such that this can be CPU- or IO-bound + // do not add repo.Connections() here as there are already the loader goroutines. + workerCount := runtime.GOMAXPROCS(0) // run workers on ch for i := 0; i < workerCount; i++ { wg.Go(worker) From 027cc647373b05718710831c76177d17ca6e8a14 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 23:24:18 +0200 Subject: [PATCH 285/893] repository: fix prune heuristic to allow resuming interrupted runs Pack files created by interrupted prune runs, appear to consist only of duplicate blobs on the next run. This caused the previous heuristic to ignore those pack files. Now, a duplicate blob in a specific pack file is also selected if that pack file only contains duplicate blobs. This allows prune to select the already rewritten pack files. --- internal/repository/prune.go | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 895b07994c7..49869fcacd9 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -72,10 +72,12 @@ type PrunePlan struct { } type packInfo struct { - usedBlobs uint - unusedBlobs uint - usedSize uint64 - unusedSize uint64 + usedBlobs uint + unusedBlobs uint + duplicateBlobs uint + usedSize uint64 + unusedSize uint64 + tpe restic.BlobType uncompressed bool } @@ -226,6 +228,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs re // mark as unused for now, we will later on select one copy ip.unusedSize += size ip.unusedBlobs++ + ip.duplicateBlobs++ // count as duplicate, will later on change one copy to be counted as used stats.Size.Duplicate += size @@ -256,6 +259,8 @@ func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs re // if duplicate blobs exist, those will be set to either "used" or "unused": // - mark only one occurrence of duplicate blobs as used // - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used" + // - if a pack only consists of duplicates (which by definition are used blobs), mark it as "used". This + // ensures that already rewritten packs are kept. // - if there are no used blobs in a pack, possibly mark duplicates as "unused" if hasDuplicates { // iterate again over all blobs in index (this is pretty cheap, all in-mem) @@ -271,8 +276,10 @@ func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs re ip := indexPack[blob.PackID] size := uint64(blob.Length) switch { - case ip.usedBlobs > 0, count == 0: - // other used blobs in pack or "last" occurrence -> transition to used + case ip.usedBlobs > 0, (ip.duplicateBlobs == ip.unusedBlobs), count == 0: + // other used blobs in pack, only duplicate blobs or "last" occurrence -> transition to used + // a pack file created by an interrupted prune run will consist of only duplicate blobs + // thus select such already repacked pack files ip.usedSize += size ip.usedBlobs++ ip.unusedSize -= size From f680a2331d7e5e5131e96fde7c0b5ac534e867d9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 23:50:40 +0200 Subject: [PATCH 286/893] add changelog for streaming index rewrite --- changelog/unreleased/issue-3806 | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 changelog/unreleased/issue-3806 diff --git a/changelog/unreleased/issue-3806 b/changelog/unreleased/issue-3806 new file mode 100644 index 00000000000..d3ae9b507bd --- /dev/null +++ b/changelog/unreleased/issue-3806 @@ -0,0 +1,11 @@ +Enhancement: Make `prune` command resumable + +When `prune` was interrupted, it a latter `prune` run previously started repacking +the pack files from the start as `prune` did not update the index while repacking. + +The `prune` command now supports resuming interrupted prune runs. The update +of the repository index also has been optimized to use less memory and only +rewrite parts of the index that have changed. + +https://github.com/restic/restic/issues/3806 +https://github.com/restic/restic/pull/4812 From 860b595a8b1a29adceaf9bb21df5cbdcf7927df6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 19 May 2024 23:52:21 +0200 Subject: [PATCH 287/893] backend: increase watchdog test timeout for deflaking --- internal/backend/watchdog_roundtriper_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/backend/watchdog_roundtriper_test.go b/internal/backend/watchdog_roundtriper_test.go index a13d670e036..b1f589bc01b 100644 --- a/internal/backend/watchdog_roundtriper_test.go +++ b/internal/backend/watchdog_roundtriper_test.go @@ -64,7 +64,7 @@ func TestRoundtrip(t *testing.T) { })) defer srv.Close() - rt := newWatchdogRoundtripper(http.DefaultTransport, 50*time.Millisecond, 2) + rt := newWatchdogRoundtripper(http.DefaultTransport, 100*time.Millisecond, 2) req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(newSlowReader(bytes.NewReader(msg), time.Duration(delay)*time.Millisecond))) rtest.OK(t, err) From 93098e9265d23ea6d4b9760bfb4a92d4193c04e2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 2 Jun 2023 20:18:46 +0200 Subject: [PATCH 288/893] prune: hide implementation details of counted blob set --- cmd/restic/cmd_prune.go | 4 +- internal/repository/prune.go | 36 ++++++++--------- internal/repository/prune_test.go | 2 +- internal/restic/counted_blob_set.go | 61 +++++++++++++++++++---------- 4 files changed, 61 insertions(+), 42 deletions(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 7872589be0d..31f920be5ca 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -188,7 +188,7 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption RepackUncompressed: opts.RepackUncompressed, } - plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { + plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs *restic.CountedBlobSet, err error) { return getUsedBlobs(ctx, repo, ignoreSnapshots, printer) }, printer) if err != nil { @@ -255,7 +255,7 @@ func printPruneStats(printer progress.Printer, stats repository.PruneStats) erro return nil } -func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (usedBlobs restic.CountedBlobSet, err error) { +func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (usedBlobs *restic.CountedBlobSet, err error) { var snapshotTrees restic.IDs printer.P("loading all snapshots...\n") err = restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots, diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 49869fcacd9..25bf3621ab9 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -60,11 +60,11 @@ type PruneStats struct { } type PrunePlan struct { - removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) - repackPacks restic.IDSet // packs to repack - keepBlobs restic.CountedBlobSet // blobs to keep during repacking - removePacks restic.IDSet // packs to remove - ignorePacks restic.IDSet // packs to ignore when rebuilding the index + removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) + repackPacks restic.IDSet // packs to repack + keepBlobs *restic.CountedBlobSet // blobs to keep during repacking + removePacks restic.IDSet // packs to remove + ignorePacks restic.IDSet // packs to ignore when rebuilding the index repo *Repository stats PruneStats @@ -90,7 +90,7 @@ type packInfoWithID struct { // PlanPrune selects which files to rewrite and which to delete and which blobs to keep. // Also some summary statistics are returned. -func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (*PrunePlan, error) { +func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs *restic.CountedBlobSet, err error), printer progress.Printer) (*PrunePlan, error) { var stats PruneStats if opts.UnsafeRecovery { @@ -152,13 +152,13 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsed return &plan, nil } -func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { +func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs *restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (*restic.CountedBlobSet, map[restic.ID]packInfo, error) { // iterate over all blobs in index to find out which blobs are duplicates // The counter in usedBlobs describes how many instances of the blob exist in the repository index // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist err := idx.ListBlobs(ctx, func(blob restic.PackedBlob) { bh := blob.BlobHandle - count, ok := usedBlobs[bh] + count, ok := usedBlobs.Get(bh) if ok { if count < math.MaxUint8 { // don't overflow, but saturate count at 255 @@ -167,7 +167,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs re count++ } - usedBlobs[bh] = count + usedBlobs.Set(bh, count) } }) if err != nil { @@ -176,12 +176,12 @@ func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs re // Check if all used blobs have been found in index missingBlobs := restic.NewBlobSet() - for bh, count := range usedBlobs { + usedBlobs.For(func(bh restic.BlobHandle, count uint8) { if count == 0 { // blob does not exist in any pack files missingBlobs.Insert(bh) } - } + }) if len(missingBlobs) != 0 { printer.E("%v not found in the index\n\n"+ @@ -221,7 +221,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs re bh := blob.BlobHandle size := uint64(blob.Length) - dupCount := usedBlobs[bh] + dupCount, _ := usedBlobs.Get(bh) switch { case dupCount >= 2: hasDuplicates = true @@ -266,7 +266,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs re // iterate again over all blobs in index (this is pretty cheap, all in-mem) err = idx.ListBlobs(ctx, func(blob restic.PackedBlob) { bh := blob.BlobHandle - count, ok := usedBlobs[bh] + count, ok := usedBlobs.Get(bh) // skip non-duplicate, aka. normal blobs // count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining if !ok || count == 1 { @@ -290,7 +290,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs re stats.Size.Duplicate -= size stats.Blobs.Duplicate-- // let other occurrences remain marked as unused - usedBlobs[bh] = 1 + usedBlobs.Set(bh, 1) default: // remain unused and decrease counter count-- @@ -299,7 +299,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs re // thus use the special value zero. This will select the last instance of the blob for keeping. count = 0 } - usedBlobs[bh] = count + usedBlobs.Set(bh, count) } // update indexPack indexPack[blob.PackID] = ip @@ -311,11 +311,11 @@ func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs re // Sanity check. If no duplicates exist, all blobs have value 1. After handling // duplicates, this also applies to duplicates. - for _, count := range usedBlobs { + usedBlobs.For(func(_ restic.BlobHandle, count uint8) { if count != 1 { panic("internal error during blob selection") } - } + }) return usedBlobs, indexPack, nil } @@ -567,7 +567,7 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) er // Also remove repacked packs plan.removePacks.Merge(plan.repackPacks) - if len(plan.keepBlobs) != 0 { + if plan.keepBlobs.Len() != 0 { printer.E("%v was not repacked\n\n"+ "Integrity check failed.\n"+ "Please report this error (along with the output of the 'prune' run) at\n"+ diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go index dbf36ffd050..2b3b3b0cd44 100644 --- a/internal/repository/prune_test.go +++ b/internal/repository/prune_test.go @@ -30,7 +30,7 @@ func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { } rtest.OK(t, repo.Flush(context.TODO())) - plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { + plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs *restic.CountedBlobSet, err error) { return restic.NewCountedBlobSet(keep.List()...), nil }, &progress.NoopPrinter{}) rtest.OK(t, err) diff --git a/internal/restic/counted_blob_set.go b/internal/restic/counted_blob_set.go index f965d3129f0..f0a43d5d60b 100644 --- a/internal/restic/counted_blob_set.go +++ b/internal/restic/counted_blob_set.go @@ -5,42 +5,54 @@ import "sort" // CountedBlobSet is a set of blobs. For each blob it also stores a uint8 value // which can be used to track some information. The CountedBlobSet does not use // that value in any way. New entries are created with value 0. -type CountedBlobSet map[BlobHandle]uint8 +type CountedBlobSet struct { + m map[BlobHandle]uint8 +} // NewCountedBlobSet returns a new CountedBlobSet, populated with ids. -func NewCountedBlobSet(handles ...BlobHandle) CountedBlobSet { - m := make(CountedBlobSet) +func NewCountedBlobSet(handles ...BlobHandle) *CountedBlobSet { + m := CountedBlobSet{} + m.m = make(map[BlobHandle]uint8) for _, h := range handles { - m[h] = 0 + m.m[h] = 0 } - return m + return &m +} + +func (s *CountedBlobSet) Get(h BlobHandle) (uint8, bool) { + val, ok := s.m[h] + return val, ok +} + +func (s *CountedBlobSet) Set(h BlobHandle, value uint8) { + s.m[h] = value } // Has returns true iff id is contained in the set. -func (s CountedBlobSet) Has(h BlobHandle) bool { - _, ok := s[h] +func (s *CountedBlobSet) Has(h BlobHandle) bool { + _, ok := s.m[h] return ok } // Insert adds id to the set. -func (s CountedBlobSet) Insert(h BlobHandle) { - s[h] = 0 +func (s *CountedBlobSet) Insert(h BlobHandle) { + s.m[h] = 0 } // Delete removes id from the set. -func (s CountedBlobSet) Delete(h BlobHandle) { - delete(s, h) +func (s *CountedBlobSet) Delete(h BlobHandle) { + delete(s.m, h) } -func (s CountedBlobSet) Len() int { - return len(s) +func (s *CountedBlobSet) Len() int { + return len(s.m) } // List returns a sorted slice of all BlobHandle in the set. -func (s CountedBlobSet) List() BlobHandles { - list := make(BlobHandles, 0, len(s)) - for h := range s { +func (s *CountedBlobSet) List() BlobHandles { + list := make(BlobHandles, 0, len(s.m)) + for h := range s.m { list = append(list, h) } @@ -49,7 +61,7 @@ func (s CountedBlobSet) List() BlobHandles { return list } -func (s CountedBlobSet) String() string { +func (s *CountedBlobSet) String() string { str := s.List().String() if len(str) < 2 { return "{}" @@ -59,10 +71,17 @@ func (s CountedBlobSet) String() string { } // Copy returns a copy of the CountedBlobSet. -func (s CountedBlobSet) Copy() CountedBlobSet { - cp := make(CountedBlobSet, len(s)) - for k, v := range s { - cp[k] = v +func (s *CountedBlobSet) Copy() *CountedBlobSet { + cp := &CountedBlobSet{} + cp.m = make(map[BlobHandle]uint8, len(s.m)) + for k, v := range s.m { + cp.m[k] = v } return cp } + +func (s *CountedBlobSet) For(cb func(h BlobHandle, value uint8)) { + for k, v := range s.m { + cb(k, v) + } +} From 2033c02b09a490e37e3805ef136b089ac837774d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 2 Jun 2023 20:27:47 +0200 Subject: [PATCH 289/893] index: replace CountedBlobSet with AssociatedSet --- cmd/restic/cmd_prune.go | 7 +- internal/index/associated_data.go | 156 +++++++++++++++++++++++ internal/index/associated_data_test.go | 154 ++++++++++++++++++++++ internal/index/index.go | 60 +++++---- internal/index/index_test.go | 3 + internal/index/indexmap.go | 33 ++++- internal/index/indexmap_test.go | 42 ++++++ internal/index/master_index.go | 18 +++ internal/index/master_index_test.go | 9 +- internal/index/testing.go | 9 +- internal/repository/prune.go | 21 ++- internal/repository/prune_test.go | 9 +- internal/restic/counted_blob_set.go | 87 ------------- internal/restic/counted_blob_set_test.go | 45 ------- 14 files changed, 472 insertions(+), 181 deletions(-) create mode 100644 internal/index/associated_data.go create mode 100644 internal/index/associated_data_test.go delete mode 100644 internal/restic/counted_blob_set.go delete mode 100644 internal/restic/counted_blob_set_test.go diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 31f920be5ca..bdf8951ed25 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -9,6 +9,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" @@ -188,7 +189,7 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption RepackUncompressed: opts.RepackUncompressed, } - plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs *restic.CountedBlobSet, err error) { + plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs *index.AssociatedSet[uint8], err error) { return getUsedBlobs(ctx, repo, ignoreSnapshots, printer) }, printer) if err != nil { @@ -255,7 +256,7 @@ func printPruneStats(printer progress.Printer, stats repository.PruneStats) erro return nil } -func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (usedBlobs *restic.CountedBlobSet, err error) { +func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (usedBlobs *index.AssociatedSet[uint8], err error) { var snapshotTrees restic.IDs printer.P("loading all snapshots...\n") err = restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots, @@ -274,7 +275,7 @@ func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots r printer.P("finding data that is still in use for %d snapshots\n", len(snapshotTrees)) - usedBlobs = restic.NewCountedBlobSet() + usedBlobs = index.NewAssociatedSet[uint8](repo.Index().(*index.MasterIndex)) bar := printer.NewCounter("snapshots") bar.SetMax(uint64(len(snapshotTrees))) diff --git a/internal/index/associated_data.go b/internal/index/associated_data.go new file mode 100644 index 00000000000..ee58957e0fe --- /dev/null +++ b/internal/index/associated_data.go @@ -0,0 +1,156 @@ +package index + +import ( + "context" + "sort" + + "github.com/restic/restic/internal/restic" +) + +type associatedSetSub[T any] struct { + value []T + isSet []bool +} + +// AssociatedSet is a memory efficient implementation of a BlobSet that can +// store a small data item for each BlobHandle. It relies on a special property +// of our MasterIndex implementation. A BlobHandle can be permanently identified +// using an offset that never changes as MasterIndex entries cannot be modified (only added). +// +// The AssociatedSet thus can use an array with the size of the MasterIndex to store +// its data. Access to an individual entry is possible by looking up the BlobHandle's +// offset from the MasterIndex. +// +// BlobHandles that are not part of the MasterIndex can be stored by placing them in +// an overflow set that is expected to be empty in the normal case. +type AssociatedSet[T any] struct { + byType [restic.NumBlobTypes]associatedSetSub[T] + overflow map[restic.BlobHandle]T + idx *MasterIndex +} + +func NewAssociatedSet[T any](mi *MasterIndex) *AssociatedSet[T] { + a := AssociatedSet[T]{ + overflow: make(map[restic.BlobHandle]T), + idx: mi, + } + + for typ := range a.byType { + if typ == 0 { + continue + } + // index starts counting at 1 + count := mi.stableLen(restic.BlobType(typ)) + 1 + a.byType[typ].value = make([]T, count) + a.byType[typ].isSet = make([]bool, count) + } + + return &a +} + +func (a *AssociatedSet[T]) Get(bh restic.BlobHandle) (T, bool) { + if val, ok := a.overflow[bh]; ok { + return val, true + } + + idx := a.idx.blobIndex(bh) + bt := &a.byType[bh.Type] + if idx >= len(bt.value) || idx == -1 { + var zero T + return zero, false + } + + has := bt.isSet[idx] + if has { + return bt.value[idx], has + } + var zero T + return zero, false +} + +func (a *AssociatedSet[T]) Has(bh restic.BlobHandle) bool { + _, ok := a.Get(bh) + return ok +} + +func (a *AssociatedSet[T]) Set(bh restic.BlobHandle, val T) { + if _, ok := a.overflow[bh]; ok { + a.overflow[bh] = val + return + } + + idx := a.idx.blobIndex(bh) + bt := &a.byType[bh.Type] + if idx >= len(bt.value) || idx == -1 { + a.overflow[bh] = val + } else { + bt.value[idx] = val + bt.isSet[idx] = true + } +} + +func (a *AssociatedSet[T]) Insert(bh restic.BlobHandle) { + var zero T + a.Set(bh, zero) +} + +func (a *AssociatedSet[T]) Delete(bh restic.BlobHandle) { + if _, ok := a.overflow[bh]; ok { + delete(a.overflow, bh) + return + } + + idx := a.idx.blobIndex(bh) + bt := &a.byType[bh.Type] + if idx < len(bt.value) && idx != -1 { + bt.isSet[idx] = false + } +} + +func (a *AssociatedSet[T]) Len() int { + count := 0 + a.For(func(_ restic.BlobHandle, _ T) { + count++ + }) + return count +} + +func (a *AssociatedSet[T]) For(cb func(bh restic.BlobHandle, val T)) { + for k, v := range a.overflow { + cb(k, v) + } + + _ = a.idx.Each(context.Background(), func(pb restic.PackedBlob) { + if _, ok := a.overflow[pb.BlobHandle]; ok { + // already reported via overflow set + return + } + + val, known := a.Get(pb.BlobHandle) + if known { + cb(pb.BlobHandle, val) + } + }) +} + +// List returns a sorted slice of all BlobHandle in the set. +func (a *AssociatedSet[T]) List() restic.BlobHandles { + list := make(restic.BlobHandles, 0) + a.For(func(bh restic.BlobHandle, _ T) { + list = append(list, bh) + }) + + return list +} + +func (a *AssociatedSet[T]) String() string { + list := a.List() + sort.Sort(list) + + str := list.String() + if len(str) < 2 { + return "{}" + } + + return "{" + str[1:len(str)-1] + "}" +} diff --git a/internal/index/associated_data_test.go b/internal/index/associated_data_test.go new file mode 100644 index 00000000000..715e3391a1a --- /dev/null +++ b/internal/index/associated_data_test.go @@ -0,0 +1,154 @@ +package index + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +type noopSaver struct{} + +func (n *noopSaver) Connections() uint { + return 2 +} +func (n *noopSaver) SaveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (restic.ID, error) { + return restic.Hash(buf), nil +} + +func makeFakePackedBlob() (restic.BlobHandle, restic.PackedBlob) { + bh := restic.NewRandomBlobHandle() + blob := restic.PackedBlob{ + PackID: restic.NewRandomID(), + Blob: restic.Blob{ + BlobHandle: bh, + Length: uint(crypto.CiphertextLength(10)), + Offset: 0, + }, + } + return bh, blob +} + +func TestAssociatedSet(t *testing.T) { + bh, blob := makeFakePackedBlob() + + mi := NewMasterIndex() + mi.StorePack(blob.PackID, []restic.Blob{blob.Blob}) + test.OK(t, mi.SaveIndex(context.TODO(), &noopSaver{})) + + bs := NewAssociatedSet[uint8](mi) + test.Equals(t, bs.Len(), 0) + test.Equals(t, bs.List(), restic.BlobHandles{}) + + // check non existent + test.Equals(t, bs.Has(bh), false) + _, ok := bs.Get(bh) + test.Equals(t, false, ok) + + // test insert + bs.Insert(bh) + test.Equals(t, bs.Has(bh), true) + test.Equals(t, bs.Len(), 1) + test.Equals(t, bs.List(), restic.BlobHandles{bh}) + test.Equals(t, 0, len(bs.overflow)) + + // test set + bs.Set(bh, 42) + test.Equals(t, bs.Has(bh), true) + test.Equals(t, bs.Len(), 1) + val, ok := bs.Get(bh) + test.Equals(t, true, ok) + test.Equals(t, uint8(42), val) + + s := bs.String() + test.Assert(t, len(s) > 10, "invalid string: %v", s) + + // test remove + bs.Delete(bh) + test.Equals(t, bs.Len(), 0) + test.Equals(t, bs.Has(bh), false) + test.Equals(t, bs.List(), restic.BlobHandles{}) + + test.Equals(t, "{}", bs.String()) + + // test set + bs.Set(bh, 43) + test.Equals(t, bs.Has(bh), true) + test.Equals(t, bs.Len(), 1) + val, ok = bs.Get(bh) + test.Equals(t, true, ok) + test.Equals(t, uint8(43), val) + test.Equals(t, 0, len(bs.overflow)) + // test update + bs.Set(bh, 44) + val, ok = bs.Get(bh) + test.Equals(t, true, ok) + test.Equals(t, uint8(44), val) + test.Equals(t, 0, len(bs.overflow)) + + // test overflow blob + of := restic.NewRandomBlobHandle() + test.Equals(t, false, bs.Has(of)) + // set + bs.Set(of, 7) + test.Equals(t, 1, len(bs.overflow)) + test.Equals(t, bs.Len(), 2) + // get + val, ok = bs.Get(of) + test.Equals(t, true, ok) + test.Equals(t, uint8(7), val) + test.Equals(t, bs.List(), restic.BlobHandles{of, bh}) + // update + bs.Set(of, 8) + val, ok = bs.Get(of) + test.Equals(t, true, ok) + test.Equals(t, uint8(8), val) + test.Equals(t, 1, len(bs.overflow)) + // delete + bs.Delete(of) + test.Equals(t, bs.Len(), 1) + test.Equals(t, bs.Has(of), false) + test.Equals(t, bs.List(), restic.BlobHandles{bh}) + test.Equals(t, 0, len(bs.overflow)) +} + +func TestAssociatedSetWithExtendedIndex(t *testing.T) { + _, blob := makeFakePackedBlob() + + mi := NewMasterIndex() + mi.StorePack(blob.PackID, []restic.Blob{blob.Blob}) + test.OK(t, mi.SaveIndex(context.TODO(), &noopSaver{})) + + bs := NewAssociatedSet[uint8](mi) + + // add new blobs to index after building the set + of, blob2 := makeFakePackedBlob() + mi.StorePack(blob2.PackID, []restic.Blob{blob2.Blob}) + test.OK(t, mi.SaveIndex(context.TODO(), &noopSaver{})) + + // non-existant + test.Equals(t, false, bs.Has(of)) + // set + bs.Set(of, 5) + test.Equals(t, 1, len(bs.overflow)) + test.Equals(t, bs.Len(), 1) + // get + val, ok := bs.Get(of) + test.Equals(t, true, ok) + test.Equals(t, uint8(5), val) + test.Equals(t, bs.List(), restic.BlobHandles{of}) + // update + bs.Set(of, 8) + val, ok = bs.Get(of) + test.Equals(t, true, ok) + test.Equals(t, uint8(8), val) + test.Equals(t, 1, len(bs.overflow)) + // delete + bs.Delete(of) + test.Equals(t, bs.Len(), 0) + test.Equals(t, bs.Has(of), false) + test.Equals(t, bs.List(), restic.BlobHandles{}) + test.Equals(t, 0, len(bs.overflow)) +} diff --git a/internal/index/index.go b/internal/index/index.go index 12ef6b18a65..ab497ad96ff 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -47,7 +47,7 @@ import ( // Index holds lookup tables for id -> pack. type Index struct { - m sync.Mutex + m sync.RWMutex byType [restic.NumBlobTypes]indexMap packs restic.IDs @@ -83,8 +83,8 @@ func (idx *Index) store(packIndex int, blob restic.Blob) { // Final returns true iff the index is already written to the repository, it is // finalized. func (idx *Index) Final() bool { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() return idx.final } @@ -97,8 +97,8 @@ const ( // IndexFull returns true iff the index is "full enough" to be saved as a preliminary index. var IndexFull = func(idx *Index, compress bool) bool { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() debug.Log("checking whether index %p is full", idx) @@ -163,8 +163,8 @@ func (idx *Index) toPackedBlob(e *indexEntry, t restic.BlobType) restic.PackedBl // Lookup queries the index for the blob ID and returns all entries including // duplicates. Adds found entries to blobs and returns the result. func (idx *Index) Lookup(bh restic.BlobHandle, pbs []restic.PackedBlob) []restic.PackedBlob { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() idx.byType[bh.Type].foreachWithID(bh.ID, func(e *indexEntry) { pbs = append(pbs, idx.toPackedBlob(e, bh.Type)) @@ -175,8 +175,8 @@ func (idx *Index) Lookup(bh restic.BlobHandle, pbs []restic.PackedBlob) []restic // Has returns true iff the id is listed in the index. func (idx *Index) Has(bh restic.BlobHandle) bool { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() return idx.byType[bh.Type].get(bh.ID) != nil } @@ -184,8 +184,8 @@ func (idx *Index) Has(bh restic.BlobHandle) bool { // LookupSize returns the length of the plaintext content of the blob with the // given id. func (idx *Index) LookupSize(bh restic.BlobHandle) (plaintextLength uint, found bool) { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() e := idx.byType[bh.Type].get(bh.ID) if e == nil { @@ -200,8 +200,8 @@ func (idx *Index) LookupSize(bh restic.BlobHandle) (plaintextLength uint, found // Each passes all blobs known to the index to the callback fn. This blocks any // modification of the index. func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) error { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() for typ := range idx.byType { m := &idx.byType[typ] @@ -229,12 +229,12 @@ type EachByPackResult struct { // When the context is cancelled, the background goroutine // terminates. This blocks any modification of the index. func (idx *Index) EachByPack(ctx context.Context, packBlacklist restic.IDSet) <-chan EachByPackResult { - idx.m.Lock() + idx.m.RLock() ch := make(chan EachByPackResult) go func() { - defer idx.m.Unlock() + defer idx.m.RUnlock() defer close(ch) byPack := make(map[restic.ID][restic.NumBlobTypes][]*indexEntry) @@ -275,8 +275,8 @@ func (idx *Index) EachByPack(ctx context.Context, packBlacklist restic.IDSet) <- // Packs returns all packs in this index func (idx *Index) Packs() restic.IDSet { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() packs := restic.NewIDSet() for _, packID := range idx.packs { @@ -344,8 +344,8 @@ type jsonIndex struct { // Encode writes the JSON serialization of the index to the writer w. func (idx *Index) Encode(w io.Writer) error { debug.Log("encoding index") - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() list, err := idx.generatePackList() if err != nil { @@ -389,8 +389,8 @@ func (idx *Index) Finalize() { // IDs returns the IDs of the index, if available. If the index is not yet // finalized, an error is returned. func (idx *Index) IDs() (restic.IDs, error) { - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() if !idx.final { return nil, errors.New("index not finalized") @@ -422,8 +422,8 @@ func (idx *Index) SetID(id restic.ID) error { // Dump writes the pretty-printed JSON representation of the index to w. func (idx *Index) Dump(w io.Writer) error { debug.Log("dumping index") - idx.m.Lock() - defer idx.m.Unlock() + idx.m.RLock() + defer idx.m.RUnlock() list, err := idx.generatePackList() if err != nil { @@ -579,3 +579,17 @@ func decodeOldIndex(buf []byte) (idx *Index, err error) { debug.Log("done") return idx, nil } + +func (idx *Index) BlobIndex(bh restic.BlobHandle) int { + idx.m.RLock() + defer idx.m.RUnlock() + + return idx.byType[bh.Type].firstIndex(bh.ID) +} + +func (idx *Index) Len(t restic.BlobType) uint { + idx.m.RLock() + defer idx.m.RUnlock() + + return idx.byType[t].len() +} diff --git a/internal/index/index_test.go b/internal/index/index_test.go index 66cec23f6d2..1a487f82fa2 100644 --- a/internal/index/index_test.go +++ b/internal/index/index_test.go @@ -172,6 +172,9 @@ func TestIndexSize(t *testing.T) { err := idx.Encode(wr) rtest.OK(t, err) + rtest.Equals(t, uint(packs*blobCount), idx.Len(restic.DataBlob)) + rtest.Equals(t, uint(0), idx.Len(restic.TreeBlob)) + t.Logf("Index file size for %d blobs in %d packs is %d", blobCount*packs, packs, wr.Len()) } diff --git a/internal/index/indexmap.go b/internal/index/indexmap.go index 4a78b9f77b4..6db523633b8 100644 --- a/internal/index/indexmap.go +++ b/internal/index/indexmap.go @@ -99,6 +99,32 @@ func (m *indexMap) get(id restic.ID) *indexEntry { return nil } +// firstIndex returns the index of the first entry for ID id. +// This index is guaranteed to never change. +func (m *indexMap) firstIndex(id restic.ID) int { + if len(m.buckets) == 0 { + return -1 + } + + idx := -1 + h := m.hash(id) + ei := m.buckets[h] + for ei != 0 { + e := m.resolve(ei) + cur := ei + ei = e.next + if e.id != id { + continue + } + if int(cur) < idx || idx == -1 { + // casting from uint to int is unproblematic as we'd run out of memory + // before this can result in an overflow. + idx = int(cur) + } + } + return idx +} + func (m *indexMap) grow() { m.buckets = make([]uint, growthFactor*len(m.buckets)) @@ -118,9 +144,10 @@ func (m *indexMap) hash(id restic.ID) uint { // While SHA-256 should be collision-resistant, for hash table indices // we use only a few bits of it and finding collisions for those is // much easier than breaking the whole algorithm. - m.mh.Reset() - _, _ = m.mh.Write(id[:]) - h := uint(m.mh.Sum64()) + mh := maphash.Hash{} + mh.SetSeed(m.mh.Seed()) + _, _ = mh.Write(id[:]) + h := uint(mh.Sum64()) return h & uint(len(m.buckets)-1) } diff --git a/internal/index/indexmap_test.go b/internal/index/indexmap_test.go index a16670c7db7..e0db9cb404f 100644 --- a/internal/index/indexmap_test.go +++ b/internal/index/indexmap_test.go @@ -143,3 +143,45 @@ func BenchmarkIndexMapHash(b *testing.B) { } } } + +func TestIndexMapFirstIndex(t *testing.T) { + t.Parallel() + + var ( + id restic.ID + m indexMap + r = rand.New(rand.NewSource(98765)) + fi = make(map[restic.ID]int) + ) + + for i := 1; i <= 400; i++ { + r.Read(id[:]) + rtest.Equals(t, -1, m.firstIndex(id), "wrong firstIndex for nonexistant id") + + m.add(id, 0, 0, 0, 0) + idx := m.firstIndex(id) + rtest.Equals(t, i, idx, "unexpected index for id") + fi[id] = idx + } + // iterate over blobs, as this is a hashmap the order is effectively random + for id, idx := range fi { + rtest.Equals(t, idx, m.firstIndex(id), "wrong index returned") + } +} + +func TestIndexMapFirstIndexDuplicates(t *testing.T) { + t.Parallel() + + var ( + id restic.ID + m indexMap + r = rand.New(rand.NewSource(98765)) + ) + + r.Read(id[:]) + for i := 1; i <= 10; i++ { + m.add(id, 0, 0, 0, 0) + } + idx := m.firstIndex(id) + rtest.Equals(t, 1, idx, "unexpected index for id") +} diff --git a/internal/index/master_index.go b/internal/index/master_index.go index f9fc4505b76..9e5e3376c35 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -638,3 +638,21 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan }() return out } + +// Only for use by AssociatedSet +func (mi *MasterIndex) blobIndex(h restic.BlobHandle) int { + mi.idxMutex.RLock() + defer mi.idxMutex.RUnlock() + + // other indexes are ignored as their ids can change when merged into the main index + return mi.idx[0].BlobIndex(h) +} + +// Only for use by AssociatedSet +func (mi *MasterIndex) stableLen(t restic.BlobType) uint { + mi.idxMutex.RLock() + defer mi.idxMutex.RUnlock() + + // other indexes are ignored as their ids can change when merged into the main index + return mi.idx[0].Len(t) +} diff --git a/internal/index/master_index_test.go b/internal/index/master_index_test.go index c42484c5516..7a2487cd431 100644 --- a/internal/index/master_index_test.go +++ b/internal/index/master_index_test.go @@ -161,9 +161,12 @@ func TestMasterMergeFinalIndexes(t *testing.T) { mIdx.Insert(idx1) mIdx.Insert(idx2) - finalIndexes, idxCount := index.TestMergeIndex(t, mIdx) + rtest.Equals(t, restic.NewIDSet(), mIdx.IDs()) + + finalIndexes, idxCount, ids := index.TestMergeIndex(t, mIdx) rtest.Equals(t, []*index.Index{idx1, idx2}, finalIndexes) rtest.Equals(t, 1, idxCount) + rtest.Equals(t, ids, mIdx.IDs()) blobCount := 0 rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { @@ -186,9 +189,11 @@ func TestMasterMergeFinalIndexes(t *testing.T) { idx3.StorePack(blob2.PackID, []restic.Blob{blob2.Blob}) mIdx.Insert(idx3) - finalIndexes, idxCount = index.TestMergeIndex(t, mIdx) + finalIndexes, idxCount, newIDs := index.TestMergeIndex(t, mIdx) rtest.Equals(t, []*index.Index{idx3}, finalIndexes) rtest.Equals(t, 1, idxCount) + ids.Merge(newIDs) + rtest.Equals(t, ids, mIdx.IDs()) // Index should have same entries as before! blobs = mIdx.Lookup(bhInIdx1) diff --git a/internal/index/testing.go b/internal/index/testing.go index 7c05ac6511d..0b5084bb02b 100644 --- a/internal/index/testing.go +++ b/internal/index/testing.go @@ -7,12 +7,15 @@ import ( "github.com/restic/restic/internal/test" ) -func TestMergeIndex(t testing.TB, mi *MasterIndex) ([]*Index, int) { +func TestMergeIndex(t testing.TB, mi *MasterIndex) ([]*Index, int, restic.IDSet) { finalIndexes := mi.finalizeNotFinalIndexes() + ids := restic.NewIDSet() for _, idx := range finalIndexes { - test.OK(t, idx.SetID(restic.NewRandomID())) + id := restic.NewRandomID() + ids.Insert(id) + test.OK(t, idx.SetID(id)) } test.OK(t, mi.MergeFinalIndexes()) - return finalIndexes, len(mi.idx) + return finalIndexes, len(mi.idx), ids } diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 25bf3621ab9..f059fc0d052 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -7,6 +7,7 @@ import ( "sort" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" @@ -60,11 +61,11 @@ type PruneStats struct { } type PrunePlan struct { - removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) - repackPacks restic.IDSet // packs to repack - keepBlobs *restic.CountedBlobSet // blobs to keep during repacking - removePacks restic.IDSet // packs to remove - ignorePacks restic.IDSet // packs to ignore when rebuilding the index + removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) + repackPacks restic.IDSet // packs to repack + keepBlobs *index.AssociatedSet[uint8] // blobs to keep during repacking + removePacks restic.IDSet // packs to remove + ignorePacks restic.IDSet // packs to ignore when rebuilding the index repo *Repository stats PruneStats @@ -90,7 +91,7 @@ type packInfoWithID struct { // PlanPrune selects which files to rewrite and which to delete and which blobs to keep. // Also some summary statistics are returned. -func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs *restic.CountedBlobSet, err error), printer progress.Printer) (*PrunePlan, error) { +func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs *index.AssociatedSet[uint8], err error), printer progress.Printer) (*PrunePlan, error) { var stats PruneStats if opts.UnsafeRecovery { @@ -122,7 +123,6 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsed } if len(plan.repackPacks) != 0 { - blobCount := keepBlobs.Len() // when repacking, we do not want to keep blobs which are // already contained in kept packs, so delete them from keepBlobs err := repo.ListBlobs(ctx, func(blob restic.PackedBlob) { @@ -134,11 +134,6 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsed if err != nil { return nil, err } - - if keepBlobs.Len() < blobCount/2 { - // replace with copy to shrink map to necessary size if there's a chance to benefit - keepBlobs = keepBlobs.Copy() - } } else { // keepBlobs is only needed if packs are repacked keepBlobs = nil @@ -152,7 +147,7 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsed return &plan, nil } -func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs *restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (*restic.CountedBlobSet, map[restic.ID]packInfo, error) { +func packInfoFromIndex(ctx context.Context, idx restic.ListBlobser, usedBlobs *index.AssociatedSet[uint8], stats *PruneStats, printer progress.Printer) (*index.AssociatedSet[uint8], map[restic.ID]packInfo, error) { // iterate over all blobs in index to find out which blobs are duplicates // The counter in usedBlobs describes how many instances of the blob exist in the repository index // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go index 2b3b3b0cd44..6158e906625 100644 --- a/internal/repository/prune_test.go +++ b/internal/repository/prune_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -30,8 +31,12 @@ func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { } rtest.OK(t, repo.Flush(context.TODO())) - plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs *restic.CountedBlobSet, err error) { - return restic.NewCountedBlobSet(keep.List()...), nil + plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs *index.AssociatedSet[uint8], err error) { + idx := index.NewAssociatedSet[uint8](repo.Index().(*index.MasterIndex)) + for blob := range keep { + idx.Insert(blob) + } + return idx, nil }, &progress.NoopPrinter{}) rtest.OK(t, err) diff --git a/internal/restic/counted_blob_set.go b/internal/restic/counted_blob_set.go deleted file mode 100644 index f0a43d5d60b..00000000000 --- a/internal/restic/counted_blob_set.go +++ /dev/null @@ -1,87 +0,0 @@ -package restic - -import "sort" - -// CountedBlobSet is a set of blobs. For each blob it also stores a uint8 value -// which can be used to track some information. The CountedBlobSet does not use -// that value in any way. New entries are created with value 0. -type CountedBlobSet struct { - m map[BlobHandle]uint8 -} - -// NewCountedBlobSet returns a new CountedBlobSet, populated with ids. -func NewCountedBlobSet(handles ...BlobHandle) *CountedBlobSet { - m := CountedBlobSet{} - m.m = make(map[BlobHandle]uint8) - for _, h := range handles { - m.m[h] = 0 - } - - return &m -} - -func (s *CountedBlobSet) Get(h BlobHandle) (uint8, bool) { - val, ok := s.m[h] - return val, ok -} - -func (s *CountedBlobSet) Set(h BlobHandle, value uint8) { - s.m[h] = value -} - -// Has returns true iff id is contained in the set. -func (s *CountedBlobSet) Has(h BlobHandle) bool { - _, ok := s.m[h] - return ok -} - -// Insert adds id to the set. -func (s *CountedBlobSet) Insert(h BlobHandle) { - s.m[h] = 0 -} - -// Delete removes id from the set. -func (s *CountedBlobSet) Delete(h BlobHandle) { - delete(s.m, h) -} - -func (s *CountedBlobSet) Len() int { - return len(s.m) -} - -// List returns a sorted slice of all BlobHandle in the set. -func (s *CountedBlobSet) List() BlobHandles { - list := make(BlobHandles, 0, len(s.m)) - for h := range s.m { - list = append(list, h) - } - - sort.Sort(list) - - return list -} - -func (s *CountedBlobSet) String() string { - str := s.List().String() - if len(str) < 2 { - return "{}" - } - - return "{" + str[1:len(str)-1] + "}" -} - -// Copy returns a copy of the CountedBlobSet. -func (s *CountedBlobSet) Copy() *CountedBlobSet { - cp := &CountedBlobSet{} - cp.m = make(map[BlobHandle]uint8, len(s.m)) - for k, v := range s.m { - cp.m[k] = v - } - return cp -} - -func (s *CountedBlobSet) For(cb func(h BlobHandle, value uint8)) { - for k, v := range s.m { - cb(k, v) - } -} diff --git a/internal/restic/counted_blob_set_test.go b/internal/restic/counted_blob_set_test.go deleted file mode 100644 index edd39e65b9b..00000000000 --- a/internal/restic/counted_blob_set_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package restic_test - -import ( - "testing" - - "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/test" -) - -func TestCountedBlobSet(t *testing.T) { - bs := restic.NewCountedBlobSet() - test.Equals(t, bs.Len(), 0) - test.Equals(t, bs.List(), restic.BlobHandles{}) - - bh := restic.NewRandomBlobHandle() - // check non existent - test.Equals(t, bs.Has(bh), false) - - // test insert - bs.Insert(bh) - test.Equals(t, bs.Has(bh), true) - test.Equals(t, bs.Len(), 1) - test.Equals(t, bs.List(), restic.BlobHandles{bh}) - - // test remove - bs.Delete(bh) - test.Equals(t, bs.Len(), 0) - test.Equals(t, bs.Has(bh), false) - test.Equals(t, bs.List(), restic.BlobHandles{}) - - bs = restic.NewCountedBlobSet(bh) - test.Equals(t, bs.Len(), 1) - test.Equals(t, bs.List(), restic.BlobHandles{bh}) - - s := bs.String() - test.Assert(t, len(s) > 10, "invalid string: %v", s) -} - -func TestCountedBlobSetCopy(t *testing.T) { - bs := restic.NewCountedBlobSet(restic.NewRandomBlobHandle(), restic.NewRandomBlobHandle(), restic.NewRandomBlobHandle()) - test.Equals(t, bs.Len(), 3) - cp := bs.Copy() - test.Equals(t, cp.Len(), 3) - test.Equals(t, bs.List(), cp.List()) -} From 77873f5a9d1335965ba8bea39beb297ce639a971 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 20 May 2024 11:47:53 +0200 Subject: [PATCH 290/893] repository: let prune control data structure of usedBlobs set --- cmd/restic/cmd_prune.go | 19 ++++++------------- internal/repository/prune.go | 5 +++-- internal/repository/prune_test.go | 8 +++----- internal/restic/find.go | 4 ++-- 4 files changed, 14 insertions(+), 22 deletions(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index bdf8951ed25..2637fedc806 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -9,7 +9,6 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" @@ -189,8 +188,8 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption RepackUncompressed: opts.RepackUncompressed, } - plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs *index.AssociatedSet[uint8], err error) { - return getUsedBlobs(ctx, repo, ignoreSnapshots, printer) + plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet) error { + return getUsedBlobs(ctx, repo, usedBlobs, ignoreSnapshots, printer) }, printer) if err != nil { return err @@ -256,10 +255,10 @@ func printPruneStats(printer progress.Printer, stats repository.PruneStats) erro return nil } -func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (usedBlobs *index.AssociatedSet[uint8], err error) { +func getUsedBlobs(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet, ignoreSnapshots restic.IDSet, printer progress.Printer) error { var snapshotTrees restic.IDs printer.P("loading all snapshots...\n") - err = restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots, + err := restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots, func(id restic.ID, sn *restic.Snapshot, err error) error { if err != nil { debug.Log("failed to load snapshot %v (error %v)", id, err) @@ -270,20 +269,14 @@ func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots r return nil }) if err != nil { - return nil, errors.Fatalf("failed loading snapshot: %v", err) + return errors.Fatalf("failed loading snapshot: %v", err) } printer.P("finding data that is still in use for %d snapshots\n", len(snapshotTrees)) - usedBlobs = index.NewAssociatedSet[uint8](repo.Index().(*index.MasterIndex)) - bar := printer.NewCounter("snapshots") bar.SetMax(uint64(len(snapshotTrees))) defer bar.Done() - err = restic.FindUsedBlobs(ctx, repo, snapshotTrees, usedBlobs, bar) - if err != nil { - return nil, err - } - return usedBlobs, nil + return restic.FindUsedBlobs(ctx, repo, snapshotTrees, usedBlobs, bar) } diff --git a/internal/repository/prune.go b/internal/repository/prune.go index f059fc0d052..1247a6b3e7a 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -91,7 +91,7 @@ type packInfoWithID struct { // PlanPrune selects which files to rewrite and which to delete and which blobs to keep. // Also some summary statistics are returned. -func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs *index.AssociatedSet[uint8], err error), printer progress.Printer) (*PrunePlan, error) { +func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet) error, printer progress.Printer) (*PrunePlan, error) { var stats PruneStats if opts.UnsafeRecovery { @@ -105,7 +105,8 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo *Repository, getUsed return nil, fmt.Errorf("compression requires at least repository format version 2") } - usedBlobs, err := getUsedBlobs(ctx, repo) + usedBlobs := index.NewAssociatedSet[uint8](repo.idx) + err := getUsedBlobs(ctx, repo, usedBlobs) if err != nil { return nil, err } diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go index 6158e906625..17dfafa4e26 100644 --- a/internal/repository/prune_test.go +++ b/internal/repository/prune_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/restic/restic/internal/checker" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -31,12 +30,11 @@ func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { } rtest.OK(t, repo.Flush(context.TODO())) - plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs *index.AssociatedSet[uint8], err error) { - idx := index.NewAssociatedSet[uint8](repo.Index().(*index.MasterIndex)) + plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet) error { for blob := range keep { - idx.Insert(blob) + usedBlobs.Insert(blob) } - return idx, nil + return nil }, &progress.NoopPrinter{}) rtest.OK(t, err) diff --git a/internal/restic/find.go b/internal/restic/find.go index cefef2196e3..d7b032bf808 100644 --- a/internal/restic/find.go +++ b/internal/restic/find.go @@ -15,14 +15,14 @@ type Loader interface { Connections() uint } -type findBlobSet interface { +type FindBlobSet interface { Has(bh BlobHandle) bool Insert(bh BlobHandle) } // FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data // blobs) to the set blobs. Already seen tree blobs will not be visited again. -func FindUsedBlobs(ctx context.Context, repo Loader, treeIDs IDs, blobs findBlobSet, p *progress.Counter) error { +func FindUsedBlobs(ctx context.Context, repo Loader, treeIDs IDs, blobs FindBlobSet, p *progress.Counter) error { var lock sync.Mutex wg, ctx := errgroup.WithContext(ctx) From 462b82a0602ea68dc54d8bc08167a07da5eedaf7 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 20 May 2024 00:34:54 +0200 Subject: [PATCH 291/893] index: reduce size of compressed indexes use the same index size for compressed and uncompressed indexes. Otherwise, decoding the index of a compressed repository requires significantly more memory. --- cmd/restic/cmd_repair_index_integration_test.go | 2 +- internal/index/index.go | 15 ++++----------- internal/index/master_index.go | 13 ++++--------- internal/repository/repository.go | 8 -------- internal/repository/repository_test.go | 2 +- 5 files changed, 10 insertions(+), 30 deletions(-) diff --git a/cmd/restic/cmd_repair_index_integration_test.go b/cmd/restic/cmd_repair_index_integration_test.go index e1a3dfe0303..e11b2f91b6c 100644 --- a/cmd/restic/cmd_repair_index_integration_test.go +++ b/cmd/restic/cmd_repair_index_integration_test.go @@ -68,7 +68,7 @@ func TestRebuildIndexAlwaysFull(t *testing.T) { defer func() { index.IndexFull = indexFull }() - index.IndexFull = func(*index.Index, bool) bool { return true } + index.IndexFull = func(*index.Index) bool { return true } testRebuildIndex(t, nil) } diff --git a/internal/index/index.go b/internal/index/index.go index ab497ad96ff..36ac2560fce 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -90,13 +90,12 @@ func (idx *Index) Final() bool { } const ( - indexMaxBlobs = 50000 - indexMaxBlobsCompressed = 3 * indexMaxBlobs - indexMaxAge = 10 * time.Minute + indexMaxBlobs = 50000 + indexMaxAge = 10 * time.Minute ) // IndexFull returns true iff the index is "full enough" to be saved as a preliminary index. -var IndexFull = func(idx *Index, compress bool) bool { +var IndexFull = func(idx *Index) bool { idx.m.RLock() defer idx.m.RUnlock() @@ -107,18 +106,12 @@ var IndexFull = func(idx *Index, compress bool) bool { blobs += idx.byType[typ].len() } age := time.Since(idx.created) - var maxBlobs uint - if compress { - maxBlobs = indexMaxBlobsCompressed - } else { - maxBlobs = indexMaxBlobs - } switch { case age >= indexMaxAge: debug.Log("index %p is old enough", idx, age) return true - case blobs >= maxBlobs: + case blobs >= indexMaxBlobs: debug.Log("index %p has %d blobs", idx, blobs) return true } diff --git a/internal/index/master_index.go b/internal/index/master_index.go index 9e5e3376c35..f8e776b2301 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -17,7 +17,6 @@ type MasterIndex struct { idx []*Index pendingBlobs restic.BlobSet idxMutex sync.RWMutex - compress bool } // NewMasterIndex creates a new master index. @@ -33,10 +32,6 @@ func (mi *MasterIndex) clear() { mi.idx[0].Finalize() } -func (mi *MasterIndex) MarkCompressed() { - mi.compress = true -} - // Lookup queries all known Indexes for the ID and returns all matches. func (mi *MasterIndex) Lookup(bh restic.BlobHandle) (pbs []restic.PackedBlob) { mi.idxMutex.RLock() @@ -211,7 +206,7 @@ func (mi *MasterIndex) finalizeFullIndexes() []*Index { continue } - if IndexFull(idx, mi.compress) { + if IndexFull(idx) { debug.Log("index %p is full", idx) idx.Finalize() list = append(list, idx) @@ -417,7 +412,7 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud newIndex := NewIndex() for task := range rewriteCh { // always rewrite indexes using the old format, that include a pack that must be removed or that are not full - if !task.oldFormat && len(task.idx.Packs().Intersect(excludePacks)) == 0 && IndexFull(task.idx, mi.compress) { + if !task.oldFormat && len(task.idx.Packs().Intersect(excludePacks)) == 0 && IndexFull(task.idx) { // make sure that each pack is only stored exactly once in the index excludePacks.Merge(task.idx.Packs()) // index is already up to date @@ -433,7 +428,7 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud for pbs := range task.idx.EachByPack(wgCtx, excludePacks) { newIndex.StorePack(pbs.PackID, pbs.Blobs) - if IndexFull(newIndex, mi.compress) { + if IndexFull(newIndex) { select { case saveCh <- newIndex: case <-wgCtx.Done(): @@ -527,7 +522,7 @@ func (mi *MasterIndex) SaveFallback(ctx context.Context, repo restic.SaverRemove for pbs := range idx.EachByPack(wgCtx, excludePacks) { newIndex.StorePack(pbs.PackID, pbs.Blobs) p.Add(1) - if IndexFull(newIndex, mi.compress) { + if IndexFull(newIndex) { select { case ch <- newIndex: case <-wgCtx.Done(): diff --git a/internal/repository/repository.go b/internal/repository/repository.go index f0ef93ecf89..34a362c5560 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -588,19 +588,11 @@ func (r *Repository) ListPacksFromIndex(ctx context.Context, packs restic.IDSet) // SetIndex instructs the repository to use the given index. func (r *Repository) SetIndex(i restic.MasterIndex) error { r.idx = i.(*index.MasterIndex) - r.configureIndex() return r.prepareCache() } func (r *Repository) clearIndex() { r.idx = index.NewMasterIndex() - r.configureIndex() -} - -func (r *Repository) configureIndex() { - if r.cfg.Version >= 2 { - r.idx.MarkCompressed() - } } // LoadIndex loads all index files from the backend in parallel and stores them diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 05b790e33c3..92eb1bbaee4 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -376,7 +376,7 @@ func TestRepositoryIncrementalIndex(t *testing.T) { func testRepositoryIncrementalIndex(t *testing.T, version uint) { repo, _ := repository.TestRepositoryWithVersion(t, version) - index.IndexFull = func(*index.Index, bool) bool { return true } + index.IndexFull = func(*index.Index) bool { return true } // add a few rounds of packs for j := 0; j < 5; j++ { From 3c7b7efdc94d3b525d0b023dfe3e252e0e236111 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 20 May 2024 15:43:22 +0200 Subject: [PATCH 292/893] repository: remove prune plan parts once they are no longer necessary --- internal/repository/prune.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 1247a6b3e7a..1dae68c1531 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -545,6 +545,8 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) er if len(plan.removePacksFirst) != 0 { printer.P("deleting unreferenced packs\n") _ = deleteFiles(ctx, true, repo, plan.removePacksFirst, restic.PackFile, printer) + // forget unused data + plan.removePacksFirst = nil } if ctx.Err() != nil { return ctx.Err() @@ -562,6 +564,8 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) er // Also remove repacked packs plan.removePacks.Merge(plan.repackPacks) + // forget unused data + plan.repackPacks = nil if plan.keepBlobs.Len() != 0 { printer.E("%v was not repacked\n\n"+ From 436afbff234ca1b9d3ae37ba5bb5b4ceff6e2ea1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 20 May 2024 15:46:22 +0200 Subject: [PATCH 293/893] add changelog for memory efficient prune --- changelog/unreleased/pull-4354 | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 changelog/unreleased/pull-4354 diff --git a/changelog/unreleased/pull-4354 b/changelog/unreleased/pull-4354 new file mode 100644 index 00000000000..dddbbc76530 --- /dev/null +++ b/changelog/unreleased/pull-4354 @@ -0,0 +1,7 @@ +Enhancement: Significantly reduce prune memory usage + +Prune has been optimized to use up to 60% less memory. The memory usage should +now be roughly similar to creating a backup. + +https://github.com/restic/restic/pull/4354 +https://github.com/restic/restic/pull/4812 From d4b0d21199c6bdc2b44b1cdc7551af7928e8c623 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 18 May 2024 18:25:09 +0200 Subject: [PATCH 294/893] key add/passwd: deduplicate options setup and remove globals The current pattern of using a global options variable is problematic. --- cmd/restic/cmd_key_add.go | 25 ++++++++++++++----------- cmd/restic/cmd_key_passwd.go | 14 +++++--------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index 30675462785..c87a99a5e4c 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -9,6 +9,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/spf13/cobra" + "github.com/spf13/pflag" ) var cmdKeyAdd = &cobra.Command{ @@ -23,26 +24,28 @@ EXIT STATUS Exit status is 0 if the command is successful, and non-zero if there was any error. `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - return runKeyAdd(cmd.Context(), globalOptions, keyAddOpts, args) - }, } type KeyAddOptions struct { - NewPasswordFile string - Username string - Hostname string + NewPasswordFile string + Username string + Hostname string } -var keyAddOpts KeyAddOptions +func (opts *KeyAddOptions) Add(flags *pflag.FlagSet) { + flags.StringVarP(&opts.NewPasswordFile, "new-password-file", "", "", "`file` from which to read the new password") + flags.StringVarP(&opts.Username, "user", "", "", "the username for new key") + flags.StringVarP(&opts.Hostname, "host", "", "", "the hostname for new key") +} func init() { cmdKey.AddCommand(cmdKeyAdd) - flags := cmdKeyAdd.Flags() - flags.StringVarP(&keyAddOpts.NewPasswordFile, "new-password-file", "", "", "`file` from which to read the new password") - flags.StringVarP(&keyAddOpts.Username, "user", "", "", "the username for new key") - flags.StringVarP(&keyAddOpts.Hostname, "host", "", "", "the hostname for new key") + var keyAddOpts KeyAddOptions + keyAddOpts.Add(cmdKeyAdd.Flags()) + cmdKeyAdd.RunE = func(cmd *cobra.Command, args []string) error { + return runKeyAdd(cmd.Context(), globalOptions, keyAddOpts, args) + } } func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, args []string) error { diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index 0836c4cfe3a..32822a0bac7 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -22,24 +22,20 @@ EXIT STATUS Exit status is 0 if the command is successful, and non-zero if there was any error. `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - return runKeyPasswd(cmd.Context(), globalOptions, keyPasswdOpts, args) - }, } type KeyPasswdOptions struct { KeyAddOptions } -var keyPasswdOpts KeyPasswdOptions - func init() { cmdKey.AddCommand(cmdKeyPasswd) - flags := cmdKeyPasswd.Flags() - flags.StringVarP(&keyPasswdOpts.NewPasswordFile, "new-password-file", "", "", "`file` from which to read the new password") - flags.StringVarP(&keyPasswdOpts.Username, "user", "", "", "the username for new key") - flags.StringVarP(&keyPasswdOpts.Hostname, "host", "", "", "the hostname for new key") + var keyPasswdOpts KeyPasswdOptions + keyPasswdOpts.KeyAddOptions.Add(cmdKeyPasswd.Flags()) + cmdKeyPasswd.RunE = func(cmd *cobra.Command, args []string) error { + return runKeyPasswd(cmd.Context(), globalOptions, keyPasswdOpts, args) + } } func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOptions, args []string) error { From 1d2277b4c32d8f2259d0960870497aed17751004 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 18 May 2024 18:59:29 +0200 Subject: [PATCH 295/893] Add --insecure-no-password option This also includes two derived options `--from-insecure-no-password` used for commands that require specifying a source repository. And `--new-insecure-no-password` for the `key add` and `key passwd` commands. Specifying `--insecure-no-password` disabled the password prompt and immediately uses an empty password. Passing a password via CLI option or environment variable at the same time is an error. --- cmd/restic/cmd_backup.go | 2 +- cmd/restic/cmd_backup_integration_test.go | 14 +++++++ cmd/restic/cmd_copy_integration_test.go | 25 ++++++++++++- cmd/restic/cmd_key_add.go | 24 ++++++++++-- cmd/restic/cmd_key_integration_test.go | 39 ++++++++++++++++++++ cmd/restic/cmd_key_passwd.go | 2 +- cmd/restic/global.go | 45 ++++++++++++++--------- cmd/restic/global_test.go | 13 +++++++ cmd/restic/secondary_repo.go | 17 ++++++--- 9 files changed, 150 insertions(+), 31 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 19b96e9b0c4..4890f82ff6c 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -257,7 +257,7 @@ func readFilenamesRaw(r io.Reader) (names []string, err error) { // Check returns an error when an invalid combination of options was set. func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error { - if gopts.password == "" { + if gopts.password == "" && !gopts.InsecureNoPassword { if opts.Stdin { return errors.Fatal("cannot read both password and data from stdin") } diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 75de1341cb9..f7372851f42 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -627,3 +627,17 @@ func TestStdinFromCommandFailNoOutputAndExitCode(t *testing.T) { testRunCheck(t, env.gopts) } + +func TestBackupEmptyPassword(t *testing.T) { + // basic sanity test that empty passwords work + env, cleanup := withTestEnvironment(t) + defer cleanup() + + env.gopts.password = "" + env.gopts.InsecureNoPassword = true + + testSetupBackupData(t, env) + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{}, env.gopts) + testListSnapshots(t, env.gopts, 1) + testRunCheck(t, env.gopts) +} diff --git a/cmd/restic/cmd_copy_integration_test.go b/cmd/restic/cmd_copy_integration_test.go index 1c883769088..70461587034 100644 --- a/cmd/restic/cmd_copy_integration_test.go +++ b/cmd/restic/cmd_copy_integration_test.go @@ -13,10 +13,12 @@ func testRunCopy(t testing.TB, srcGopts GlobalOptions, dstGopts GlobalOptions) { gopts := srcGopts gopts.Repo = dstGopts.Repo gopts.password = dstGopts.password + gopts.InsecureNoPassword = dstGopts.InsecureNoPassword copyOpts := CopyOptions{ secondaryRepoOptions: secondaryRepoOptions{ - Repo: srcGopts.Repo, - password: srcGopts.password, + Repo: srcGopts.Repo, + password: srcGopts.password, + InsecureNoPassword: srcGopts.InsecureNoPassword, }, } @@ -134,3 +136,22 @@ func TestCopyUnstableJSON(t *testing.T) { testRunCheck(t, env2.gopts) testListSnapshots(t, env2.gopts, 1) } + +func TestCopyToEmptyPassword(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + env2, cleanup2 := withTestEnvironment(t) + defer cleanup2() + env2.gopts.password = "" + env2.gopts.InsecureNoPassword = true + + testSetupBackupData(t, env) + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, BackupOptions{}, env.gopts) + + testRunInit(t, env2.gopts) + testRunCopy(t, env.gopts, env2.gopts) + + testListSnapshots(t, env.gopts, 1) + testListSnapshots(t, env2.gopts, 1) + testRunCheck(t, env2.gopts) +} diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index c87a99a5e4c..9e50aa67d5a 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -28,12 +28,14 @@ Exit status is 0 if the command is successful, and non-zero if there was any err type KeyAddOptions struct { NewPasswordFile string + InsecureNoPassword bool Username string Hostname string } func (opts *KeyAddOptions) Add(flags *pflag.FlagSet) { flags.StringVarP(&opts.NewPasswordFile, "new-password-file", "", "", "`file` from which to read the new password") + flags.BoolVar(&opts.InsecureNoPassword, "new-insecure-no-password", false, "add an empty password for the repository (insecure)") flags.StringVarP(&opts.Username, "user", "", "", "the username for new key") flags.StringVarP(&opts.Hostname, "host", "", "", "the hostname for new key") } @@ -63,7 +65,7 @@ func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, arg } func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyAddOptions) error { - pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile) + pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile, opts.InsecureNoPassword) if err != nil { return err } @@ -86,19 +88,35 @@ func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOption // testKeyNewPassword is used to set a new password during integration testing. var testKeyNewPassword string -func getNewPassword(ctx context.Context, gopts GlobalOptions, newPasswordFile string) (string, error) { +func getNewPassword(ctx context.Context, gopts GlobalOptions, newPasswordFile string, insecureNoPassword bool) (string, error) { if testKeyNewPassword != "" { return testKeyNewPassword, nil } + if insecureNoPassword { + if newPasswordFile != "" { + return "", fmt.Errorf("only either --new-password-file or --new-insecure-no-password may be specified") + } + return "", nil + } + if newPasswordFile != "" { - return loadPasswordFromFile(newPasswordFile) + password, err := loadPasswordFromFile(newPasswordFile) + if err != nil { + return "", err + } + if password == "" { + return "", fmt.Errorf("an empty password is not allowed by default. Pass the flag `--new-insecure-no-password` to restic to disable this check") + } + return password, nil } // Since we already have an open repository, temporary remove the password // to prompt the user for the passwd. newopts := gopts newopts.password = "" + // empty passwords are already handled above + newopts.InsecureNoPassword = false return ReadPasswordTwice(ctx, newopts, "enter new password: ", diff --git a/cmd/restic/cmd_key_integration_test.go b/cmd/restic/cmd_key_integration_test.go index 16cc1bdad7f..0b453388725 100644 --- a/cmd/restic/cmd_key_integration_test.go +++ b/cmd/restic/cmd_key_integration_test.go @@ -3,6 +3,8 @@ package main import ( "bufio" "context" + "os" + "path/filepath" "regexp" "strings" "testing" @@ -109,6 +111,43 @@ func TestKeyAddRemove(t *testing.T) { testRunKeyAddNewKeyUserHost(t, env.gopts) } +func TestKeyAddInvalid(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + testRunInit(t, env.gopts) + + err := runKeyAdd(context.TODO(), env.gopts, KeyAddOptions{ + NewPasswordFile: "some-file", + InsecureNoPassword: true, + }, []string{}) + rtest.Assert(t, strings.Contains(err.Error(), "only either"), "unexpected error message, got %q", err) + + pwfile := filepath.Join(t.TempDir(), "pwfile") + rtest.OK(t, os.WriteFile(pwfile, []byte{}, 0o666)) + + err = runKeyAdd(context.TODO(), env.gopts, KeyAddOptions{ + NewPasswordFile: pwfile, + }, []string{}) + rtest.Assert(t, strings.Contains(err.Error(), "an empty password is not allowed by default"), "unexpected error message, got %q", err) +} + +func TestKeyAddEmpty(t *testing.T) { + env, cleanup := withTestEnvironment(t) + // must list keys more than once + env.gopts.backendTestHook = nil + defer cleanup() + testRunInit(t, env.gopts) + + rtest.OK(t, runKeyAdd(context.TODO(), env.gopts, KeyAddOptions{ + InsecureNoPassword: true, + }, []string{})) + + env.gopts.password = "" + env.gopts.InsecureNoPassword = true + + testRunCheck(t, env.gopts) +} + type emptySaveBackend struct { backend.Backend } diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index 32822a0bac7..1a12001099c 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -53,7 +53,7 @@ func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOption } func changePassword(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyPasswdOptions) error { - pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile) + pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile, opts.InsecureNoPassword) if err != nil { return err } diff --git a/cmd/restic/global.go b/cmd/restic/global.go index c954a42707c..9671f2a26ff 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -52,22 +52,23 @@ type backendWrapper func(r backend.Backend) (backend.Backend, error) // GlobalOptions hold all global options for restic. type GlobalOptions struct { - Repo string - RepositoryFile string - PasswordFile string - PasswordCommand string - KeyHint string - Quiet bool - Verbose int - NoLock bool - RetryLock time.Duration - JSON bool - CacheDir string - NoCache bool - CleanupCache bool - Compression repository.CompressionMode - PackSize uint - NoExtraVerify bool + Repo string + RepositoryFile string + PasswordFile string + PasswordCommand string + KeyHint string + Quiet bool + Verbose int + NoLock bool + RetryLock time.Duration + JSON bool + CacheDir string + NoCache bool + CleanupCache bool + Compression repository.CompressionMode + PackSize uint + NoExtraVerify bool + InsecureNoPassword bool backend.TransportOptions limiter.Limits @@ -125,6 +126,7 @@ func init() { f.BoolVar(&globalOptions.NoCache, "no-cache", false, "do not use a local cache") f.StringSliceVar(&globalOptions.RootCertFilenames, "cacert", nil, "`file` to load root certificates from (default: use system certificates or $RESTIC_CACERT)") f.StringVar(&globalOptions.TLSClientCertKeyFilename, "tls-client-cert", "", "path to a `file` containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)") + f.BoolVar(&globalOptions.InsecureNoPassword, "insecure-no-password", false, "use an empty password for the repository, must be passed to every restic command (insecure)") f.BoolVar(&globalOptions.InsecureTLS, "insecure-tls", false, "skip TLS certificate verification when connecting to the repository (insecure)") f.BoolVar(&globalOptions.CleanupCache, "cleanup-cache", false, "auto remove old cache directories") f.Var(&globalOptions.Compression, "compression", "compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)") @@ -327,6 +329,13 @@ func readPasswordTerminal(ctx context.Context, in *os.File, out *os.File, prompt // variable RESTIC_PASSWORD or prompts the user. If the context is canceled, // the function leaks the password reading goroutine. func ReadPassword(ctx context.Context, opts GlobalOptions, prompt string) (string, error) { + if opts.InsecureNoPassword { + if opts.password != "" { + return "", errors.Fatal("--insecure-no-password must not be specified together with providing a password via a cli option or environment variable") + } + return "", nil + } + if opts.password != "" { return opts.password, nil } @@ -348,7 +357,7 @@ func ReadPassword(ctx context.Context, opts GlobalOptions, prompt string) (strin } if len(password) == 0 { - return "", errors.Fatal("an empty password is not a password") + return "", errors.Fatal("an empty password is not allowed by default. Pass the flag `--insecure-no-password` to restic to disable this check") } return password, nil @@ -445,7 +454,7 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi } passwordTriesLeft := 1 - if stdinIsTerminal() && opts.password == "" { + if stdinIsTerminal() && opts.password == "" && !opts.InsecureNoPassword { passwordTriesLeft = 3 } diff --git a/cmd/restic/global_test.go b/cmd/restic/global_test.go index 4f5c29e9a77..ce59bba4965 100644 --- a/cmd/restic/global_test.go +++ b/cmd/restic/global_test.go @@ -1,8 +1,10 @@ package main import ( + "context" "os" "path/filepath" + "strings" "testing" rtest "github.com/restic/restic/internal/test" @@ -50,3 +52,14 @@ func TestReadRepo(t *testing.T) { t.Fatal("must not read repository path from invalid file path") } } + +func TestReadEmptyPassword(t *testing.T) { + opts := GlobalOptions{InsecureNoPassword: true} + password, err := ReadPassword(context.TODO(), opts, "test") + rtest.OK(t, err) + rtest.Equals(t, "", password, "got unexpected password") + + opts.password = "invalid" + _, err = ReadPassword(context.TODO(), opts, "test") + rtest.Assert(t, strings.Contains(err.Error(), "must not be specified together with providing a password via a cli option or environment variable"), "unexpected error message, got %v", err) +} diff --git a/cmd/restic/secondary_repo.go b/cmd/restic/secondary_repo.go index 2afd36a81b9..9a3eb5fe2c6 100644 --- a/cmd/restic/secondary_repo.go +++ b/cmd/restic/secondary_repo.go @@ -11,11 +11,12 @@ import ( type secondaryRepoOptions struct { password string // from-repo options - Repo string - RepositoryFile string - PasswordFile string - PasswordCommand string - KeyHint string + Repo string + RepositoryFile string + PasswordFile string + PasswordCommand string + KeyHint string + InsecureNoPassword bool // repo2 options LegacyRepo string LegacyRepositoryFile string @@ -49,6 +50,7 @@ func initSecondaryRepoOptions(f *pflag.FlagSet, opts *secondaryRepoOptions, repo f.StringVarP(&opts.PasswordFile, "from-password-file", "", "", "`file` to read the source repository password from (default: $RESTIC_FROM_PASSWORD_FILE)") f.StringVarP(&opts.KeyHint, "from-key-hint", "", "", "key ID of key to try decrypting the source repository first (default: $RESTIC_FROM_KEY_HINT)") f.StringVarP(&opts.PasswordCommand, "from-password-command", "", "", "shell `command` to obtain the source repository password from (default: $RESTIC_FROM_PASSWORD_COMMAND)") + f.BoolVar(&opts.InsecureNoPassword, "from-insecure-no-password", false, "use an empty password for the source repository, must be passed to every restic command (insecure)") opts.Repo = os.Getenv("RESTIC_FROM_REPOSITORY") opts.RepositoryFile = os.Getenv("RESTIC_FROM_REPOSITORY_FILE") @@ -63,7 +65,7 @@ func fillSecondaryGlobalOpts(ctx context.Context, opts secondaryRepoOptions, gop } hasFromRepo := opts.Repo != "" || opts.RepositoryFile != "" || opts.PasswordFile != "" || - opts.KeyHint != "" || opts.PasswordCommand != "" + opts.KeyHint != "" || opts.PasswordCommand != "" || opts.InsecureNoPassword hasRepo2 := opts.LegacyRepo != "" || opts.LegacyRepositoryFile != "" || opts.LegacyPasswordFile != "" || opts.LegacyKeyHint != "" || opts.LegacyPasswordCommand != "" @@ -85,6 +87,7 @@ func fillSecondaryGlobalOpts(ctx context.Context, opts secondaryRepoOptions, gop dstGopts.PasswordFile = opts.PasswordFile dstGopts.PasswordCommand = opts.PasswordCommand dstGopts.KeyHint = opts.KeyHint + dstGopts.InsecureNoPassword = opts.InsecureNoPassword pwdEnv = "RESTIC_FROM_PASSWORD" repoPrefix = "source" @@ -98,6 +101,8 @@ func fillSecondaryGlobalOpts(ctx context.Context, opts secondaryRepoOptions, gop dstGopts.PasswordFile = opts.LegacyPasswordFile dstGopts.PasswordCommand = opts.LegacyPasswordCommand dstGopts.KeyHint = opts.LegacyKeyHint + // keep existing bevhaior for legacy options + dstGopts.InsecureNoPassword = false pwdEnv = "RESTIC_PASSWORD2" } From 130506250f04c86f8487819eaf801c40f1d4990b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 18 May 2024 19:20:04 +0200 Subject: [PATCH 296/893] document insecure-no-password --- changelog/unreleased/issue-1786 | 19 +++++++++++++++++++ doc/030_preparing_a_new_repo.rst | 23 +++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 changelog/unreleased/issue-1786 diff --git a/changelog/unreleased/issue-1786 b/changelog/unreleased/issue-1786 new file mode 100644 index 00000000000..bdd0d48c37d --- /dev/null +++ b/changelog/unreleased/issue-1786 @@ -0,0 +1,19 @@ +Enhancement: Support repositories with empty password + +Restic refused to create or operate on repositories with an emtpy password. +Using the new option `--insecure-no-password` it is now possible to disable +this check. Restic will not prompt for a password when using this option. +For security reasons, the option must always be specified when operating on +repositories with an empty password. + +Specifying `--insecure-no-password` while also passing a password to restic +via a CLI option or via environment variable results in an error. + +The `init` and `copy` command also support the option `--from-insecure-no-password` +which applies to the source repository. The `key add` and `key passwd` comands +include the `--new-insecure-no-password` option to add or set an emtpy password. + +https://github.com/restic/restic/issues/1786 +https://github.com/restic/restic/issues/4326 +https://github.com/restic/restic/pull/4698 +https://github.com/restic/restic/pull/4808 diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index ee0a0df5e8d..5f3f3ff157d 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -852,3 +852,26 @@ and then grants read/write permissions for group access. .. note:: To manage who has access to the repository you can use ``usermod`` on Linux systems, to change which group controls repository access ``chgrp -R`` is your friend. + + +Repositories with empty password +******************************** + +Restic by default refuses to create or operate on repositories that use an +empty password. Since restic 0.17.0, the option ``--insecure-no-password`` allows +disabling this check. Restic will not prompt for a password when using this option. +Specifying ``--insecure-no-password`` while also passing a password to restic +via a CLI option or via environment variable results in an error. + +For security reasons, the option must always be specified when operating on +repositories with an empty password. For example to create a new repository +with an empty password, use the following command. + +.. code-block:: console + + restic init --insecure-no-password + + +The ``init`` and ``copy`` command also support the option ``--from-insecure-no-password`` +which applies to the source repository. The ``key add`` and ``key passwd`` comands +include the ``--new-insecure-no-password`` option to add or set and emtpy password. From 8e5d7d719c6df33bbc5b275c15115f33f17557d1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 24 May 2024 23:04:06 +0200 Subject: [PATCH 297/893] cache: move to backend package --- cmd/restic/cmd_cache.go | 2 +- cmd/restic/cmd_check.go | 2 +- cmd/restic/global.go | 2 +- internal/{ => backend}/cache/backend.go | 0 internal/{ => backend}/cache/backend_test.go | 0 internal/{ => backend}/cache/cache.go | 0 internal/{ => backend}/cache/cache_test.go | 0 internal/{ => backend}/cache/dir.go | 0 internal/{ => backend}/cache/dir_test.go | 0 internal/{ => backend}/cache/file.go | 0 internal/{ => backend}/cache/file_test.go | 0 internal/{ => backend}/cache/testing.go | 0 internal/repository/raw_test.go | 2 +- internal/repository/repository.go | 2 +- internal/repository/repository_test.go | 2 +- 15 files changed, 6 insertions(+), 6 deletions(-) rename internal/{ => backend}/cache/backend.go (100%) rename internal/{ => backend}/cache/backend_test.go (100%) rename internal/{ => backend}/cache/cache.go (100%) rename internal/{ => backend}/cache/cache_test.go (100%) rename internal/{ => backend}/cache/dir.go (100%) rename internal/{ => backend}/cache/dir_test.go (100%) rename internal/{ => backend}/cache/file.go (100%) rename internal/{ => backend}/cache/file_test.go (100%) rename internal/{ => backend}/cache/testing.go (100%) diff --git a/cmd/restic/cmd_cache.go b/cmd/restic/cmd_cache.go index 354cec2882c..651c65fcf27 100644 --- a/cmd/restic/cmd_cache.go +++ b/cmd/restic/cmd_cache.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/restic/restic/internal/cache" + "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/ui" diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 671cab0e615..11a1c7cb55e 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -11,7 +11,7 @@ import ( "github.com/spf13/cobra" - "github.com/restic/restic/internal/cache" + "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 9671f2a26ff..144445cc0a4 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -15,6 +15,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/azure" "github.com/restic/restic/internal/backend/b2" + "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/backend/gs" "github.com/restic/restic/internal/backend/limiter" "github.com/restic/restic/internal/backend/local" @@ -27,7 +28,6 @@ import ( "github.com/restic/restic/internal/backend/sema" "github.com/restic/restic/internal/backend/sftp" "github.com/restic/restic/internal/backend/swift" - "github.com/restic/restic/internal/cache" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/options" diff --git a/internal/cache/backend.go b/internal/backend/cache/backend.go similarity index 100% rename from internal/cache/backend.go rename to internal/backend/cache/backend.go diff --git a/internal/cache/backend_test.go b/internal/backend/cache/backend_test.go similarity index 100% rename from internal/cache/backend_test.go rename to internal/backend/cache/backend_test.go diff --git a/internal/cache/cache.go b/internal/backend/cache/cache.go similarity index 100% rename from internal/cache/cache.go rename to internal/backend/cache/cache.go diff --git a/internal/cache/cache_test.go b/internal/backend/cache/cache_test.go similarity index 100% rename from internal/cache/cache_test.go rename to internal/backend/cache/cache_test.go diff --git a/internal/cache/dir.go b/internal/backend/cache/dir.go similarity index 100% rename from internal/cache/dir.go rename to internal/backend/cache/dir.go diff --git a/internal/cache/dir_test.go b/internal/backend/cache/dir_test.go similarity index 100% rename from internal/cache/dir_test.go rename to internal/backend/cache/dir_test.go diff --git a/internal/cache/file.go b/internal/backend/cache/file.go similarity index 100% rename from internal/cache/file.go rename to internal/backend/cache/file.go diff --git a/internal/cache/file_test.go b/internal/backend/cache/file_test.go similarity index 100% rename from internal/cache/file_test.go rename to internal/backend/cache/file_test.go diff --git a/internal/cache/testing.go b/internal/backend/cache/testing.go similarity index 100% rename from internal/cache/testing.go rename to internal/backend/cache/testing.go diff --git a/internal/repository/raw_test.go b/internal/repository/raw_test.go index 28786dbcd8f..ac65a8dc8f1 100644 --- a/internal/repository/raw_test.go +++ b/internal/repository/raw_test.go @@ -7,9 +7,9 @@ import ( "testing" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/backend/mock" - "github.com/restic/restic/internal/cache" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 34a362c5560..7233c785432 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -14,8 +14,8 @@ import ( "github.com/klauspost/compress/zstd" "github.com/restic/chunker" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/backend/dryrun" - "github.com/restic/restic/internal/cache" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 92eb1bbaee4..679eaaaabf7 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -14,9 +14,9 @@ import ( "time" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/backend/local" "github.com/restic/restic/internal/backend/mem" - "github.com/restic/restic/internal/cache" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/index" From f20bf5eeba059015bd6ad208e2abaa443760ff35 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 25 May 2024 12:35:28 +0200 Subject: [PATCH 298/893] update golang.org/x packages --- go.mod | 18 ++++++++---------- go.sum | 42 ++++++++++++++++-------------------------- 2 files changed, 24 insertions(+), 36 deletions(-) diff --git a/go.mod b/go.mod index 8138f494ab5..7cdde8370d9 100644 --- a/go.mod +++ b/go.mod @@ -26,21 +26,20 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.22.0 - golang.org/x/net v0.24.0 - golang.org/x/oauth2 v0.18.0 - golang.org/x/sync v0.6.0 - golang.org/x/sys v0.19.0 - golang.org/x/term v0.19.0 - golang.org/x/text v0.14.0 + golang.org/x/crypto v0.23.0 + golang.org/x/net v0.25.0 + golang.org/x/oauth2 v0.20.0 + golang.org/x/sync v0.7.0 + golang.org/x/sys v0.20.0 + golang.org/x/term v0.20.0 + golang.org/x/text v0.15.0 golang.org/x/time v0.5.0 google.golang.org/api v0.170.0 ) require ( cloud.google.com/go v0.112.1 // indirect - cloud.google.com/go/compute v1.24.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/iam v1.1.7 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect @@ -77,7 +76,6 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 // indirect diff --git a/go.sum b/go.sum index fd1cd86ce4d..98d7b59db08 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= -cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= -cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= @@ -80,8 +78,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -90,7 +86,6 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -206,8 +201,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -227,19 +222,19 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -255,21 +250,20 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -289,8 +283,6 @@ google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48= google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= @@ -316,8 +308,6 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From b2f7a878cdc089bac0cef27cc9b9927208bd3e1b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 25 May 2024 12:36:16 +0200 Subject: [PATCH 299/893] update backend libraries minio-go is kept at 7.0.67 as later versions require Go 1.21. --- go.mod | 30 +++++++++++++------------- go.sum | 66 +++++++++++++++++++++++++++++++--------------------------- 2 files changed, 51 insertions(+), 45 deletions(-) diff --git a/go.mod b/go.mod index 7cdde8370d9..b57acb1d5ba 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,9 @@ module github.com/restic/restic require ( - cloud.google.com/go/storage v1.40.0 + cloud.google.com/go/storage v1.41.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.2.0 @@ -14,7 +14,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/klauspost/compress v1.17.8 - github.com/minio/minio-go/v7 v7.0.66 + github.com/minio/minio-go/v7 v7.0.67 github.com/minio/sha256-simd v1.0.1 github.com/ncw/swift/v2 v2.0.2 github.com/peterbourgon/unixtransport v0.0.4 @@ -34,29 +34,31 @@ require ( golang.org/x/term v0.20.0 golang.org/x/text v0.15.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.170.0 + google.golang.org/api v0.181.0 ) require ( - cloud.google.com/go v0.112.1 // indirect + cloud.google.com/go v0.113.0 // indirect + cloud.google.com/go/auth v0.4.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect - cloud.google.com/go/iam v1.1.7 // indirect + cloud.google.com/go/iam v1.1.8 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v5 v5.2.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.3 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect @@ -76,11 +78,11 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 // indirect - google.golang.org/grpc v1.62.1 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 // indirect + google.golang.org/grpc v1.63.2 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 98d7b59db08..5d5bdd9c5de 100644 --- a/go.sum +++ b/go.sum @@ -1,23 +1,27 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= -cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= +cloud.google.com/go v0.113.0 h1:g3C70mn3lWfckKBiCVsAshabrDg01pQ0pnX1MNtnMkA= +cloud.google.com/go v0.113.0/go.mod h1:glEqlogERKYeePz6ZdkcLJ28Q2I6aERgDDErBg9GzO8= +cloud.google.com/go/auth v0.4.1 h1:Z7YNIhlWRtrnKlZke7z3GMqzvuYzdc2z98F9D1NV5Hg= +cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= -cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= -cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= -cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= +cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= +cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= +cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s= github.com/Backblaze/blazer v0.6.1/go.mod h1:7/jrGx4O6OKOto6av+hLwelPR8rwZ+PLxQ5ZOiYAjwY= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -62,8 +66,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= -github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -89,7 +93,7 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= @@ -100,8 +104,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= @@ -124,8 +128,8 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= -github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs= +github.com/minio/minio-go/v7 v7.0.67 h1:BeBvZWAS+kRJm1vGTMJYVjKUNoo0FoEt/wUWdUtfmh8= +github.com/minio/minio-go/v7 v7.0.67/go.mod h1:+UXocnUeZ3wHvVh5s95gcrA4YjMIbccT6ubB+1m054A= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -176,7 +180,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -191,7 +195,7 @@ go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= @@ -279,26 +283,26 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48= -google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8= +google.golang.org/api v0.181.0 h1:rPdjwnWgiPPOJx3IcSAQ2III5aX5tCer6wMpa/xmZi4= +google.golang.org/api v0.181.0/go.mod h1:MnQ+M0CFsfUwA5beZ+g/vCBCPXvtmZwRz2qzZk8ih1k= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= -google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= -google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc= -google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 h1:9IZDv+/GcI6u+a4jRFRLxQs0RUCfavGfoOgEW6jpkI0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= +google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= +google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 h1:mxSlqyb8ZAHsYDCfiXN1EDdNTdvjUJSLY+OnAUtYNYA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -308,8 +312,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= From c5260373caa10551dbd188fd44e110fa8efe02f0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 25 May 2024 12:37:14 +0200 Subject: [PATCH 300/893] update backoff/xxhash libraries --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index b57acb1d5ba..9a3d91a8a35 100644 --- a/go.mod +++ b/go.mod @@ -7,8 +7,8 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.2.0 - github.com/cenkalti/backoff/v4 v4.2.1 - github.com/cespare/xxhash/v2 v2.2.0 + github.com/cenkalti/backoff/v4 v4.3.0 + github.com/cespare/xxhash/v2 v2.3.0 github.com/elithrar/simple-scrypt v1.3.0 github.com/go-ole/go-ole v1.3.0 github.com/google/go-cmp v0.6.0 diff --git a/go.sum b/go.sum index 5d5bdd9c5de..1cd3ff0b9fb 100644 --- a/go.sum +++ b/go.sum @@ -28,11 +28,11 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74= github.com/anacrolix/fuse v0.2.0 h1:pc+To78kI2d/WUjIyrsdqeJQAesuwpGxlI3h1nAv3Do= github.com/anacrolix/fuse v0.2.0/go.mod h1:Kfu02xBwnySDpH3N23BmrP3MDfwAQGRLUCj6XyeOvBQ= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= From 58bbb27b4c3e8b3915ff32ec8d696df5c10b5fa9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 25 May 2024 12:52:08 +0200 Subject: [PATCH 301/893] downgrade minio to keep go 1.19 support --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9a3d91a8a35..a19d84f0e7f 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/klauspost/compress v1.17.8 - github.com/minio/minio-go/v7 v7.0.67 + github.com/minio/minio-go/v7 v7.0.66 github.com/minio/sha256-simd v1.0.1 github.com/ncw/swift/v2 v2.0.2 github.com/peterbourgon/unixtransport v0.0.4 diff --git a/go.sum b/go.sum index 1cd3ff0b9fb..3ec38b3dad2 100644 --- a/go.sum +++ b/go.sum @@ -128,8 +128,8 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.67 h1:BeBvZWAS+kRJm1vGTMJYVjKUNoo0FoEt/wUWdUtfmh8= -github.com/minio/minio-go/v7 v7.0.67/go.mod h1:+UXocnUeZ3wHvVh5s95gcrA4YjMIbccT6ubB+1m054A= +github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= +github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= From 50ec408302dabf43b00aab44bfa897f83193788f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 24 May 2024 23:06:44 +0200 Subject: [PATCH 302/893] index: move to repository package --- cmd/restic/cmd_debug.go | 2 +- cmd/restic/cmd_list.go | 2 +- cmd/restic/cmd_repair_index_integration_test.go | 2 +- internal/checker/checker.go | 2 +- internal/{ => repository}/index/associated_data.go | 0 internal/{ => repository}/index/associated_data_test.go | 0 internal/{ => repository}/index/index.go | 0 internal/{ => repository}/index/index_parallel.go | 0 internal/{ => repository}/index/index_parallel_test.go | 4 ++-- internal/{ => repository}/index/index_test.go | 2 +- internal/{ => repository}/index/indexmap.go | 0 internal/{ => repository}/index/indexmap_test.go | 0 internal/{ => repository}/index/master_index.go | 0 internal/{ => repository}/index/master_index_test.go | 2 +- internal/{ => repository}/index/testing.go | 0 internal/repository/prune.go | 2 +- internal/repository/repair_index.go | 2 +- internal/repository/repository.go | 2 +- internal/repository/repository_test.go | 2 +- 19 files changed, 12 insertions(+), 12 deletions(-) rename internal/{ => repository}/index/associated_data.go (100%) rename internal/{ => repository}/index/associated_data_test.go (100%) rename internal/{ => repository}/index/index.go (100%) rename internal/{ => repository}/index/index_parallel.go (100%) rename internal/{ => repository}/index/index_parallel_test.go (90%) rename internal/{ => repository}/index/index_test.go (99%) rename internal/{ => repository}/index/indexmap.go (100%) rename internal/{ => repository}/index/indexmap_test.go (100%) rename internal/{ => repository}/index/master_index.go (100%) rename internal/{ => repository}/index/master_index_test.go (99%) rename internal/{ => repository}/index/testing.go (100%) diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 7b0cdb53e1c..ff1e4c28bea 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -22,9 +22,9 @@ import ( "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" ) diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 27f59b4ab17..6f443042077 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -4,7 +4,7 @@ import ( "context" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" "github.com/spf13/cobra" diff --git a/cmd/restic/cmd_repair_index_integration_test.go b/cmd/restic/cmd_repair_index_integration_test.go index e11b2f91b6c..9bfc93b401a 100644 --- a/cmd/restic/cmd_repair_index_integration_test.go +++ b/cmd/restic/cmd_repair_index_integration_test.go @@ -10,7 +10,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/termstatus" diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 61c017414b7..fb6fbacd56b 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -12,9 +12,9 @@ import ( "github.com/restic/restic/internal/backend/s3" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" "golang.org/x/sync/errgroup" diff --git a/internal/index/associated_data.go b/internal/repository/index/associated_data.go similarity index 100% rename from internal/index/associated_data.go rename to internal/repository/index/associated_data.go diff --git a/internal/index/associated_data_test.go b/internal/repository/index/associated_data_test.go similarity index 100% rename from internal/index/associated_data_test.go rename to internal/repository/index/associated_data_test.go diff --git a/internal/index/index.go b/internal/repository/index/index.go similarity index 100% rename from internal/index/index.go rename to internal/repository/index/index.go diff --git a/internal/index/index_parallel.go b/internal/repository/index/index_parallel.go similarity index 100% rename from internal/index/index_parallel.go rename to internal/repository/index/index_parallel.go diff --git a/internal/index/index_parallel_test.go b/internal/repository/index/index_parallel_test.go similarity index 90% rename from internal/index/index_parallel_test.go rename to internal/repository/index/index_parallel_test.go index 61b0aad63c3..38dafb50750 100644 --- a/internal/index/index_parallel_test.go +++ b/internal/repository/index/index_parallel_test.go @@ -6,13 +6,13 @@ import ( "testing" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) -var repoFixture = filepath.Join("..", "repository", "testdata", "test-repo.tar.gz") +var repoFixture = filepath.Join("..", "testdata", "test-repo.tar.gz") func TestRepositoryForAllIndexes(t *testing.T) { repo, _, cleanup := repository.TestFromFixture(t, repoFixture) diff --git a/internal/index/index_test.go b/internal/repository/index/index_test.go similarity index 99% rename from internal/index/index_test.go rename to internal/repository/index/index_test.go index 1a487f82fa2..bf752d3d31f 100644 --- a/internal/index/index_test.go +++ b/internal/repository/index/index_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/restic/restic/internal/feature" - "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) diff --git a/internal/index/indexmap.go b/internal/repository/index/indexmap.go similarity index 100% rename from internal/index/indexmap.go rename to internal/repository/index/indexmap.go diff --git a/internal/index/indexmap_test.go b/internal/repository/index/indexmap_test.go similarity index 100% rename from internal/index/indexmap_test.go rename to internal/repository/index/indexmap_test.go diff --git a/internal/index/master_index.go b/internal/repository/index/master_index.go similarity index 100% rename from internal/index/master_index.go rename to internal/repository/index/master_index.go diff --git a/internal/index/master_index_test.go b/internal/repository/index/master_index_test.go similarity index 99% rename from internal/index/master_index_test.go rename to internal/repository/index/master_index_test.go index 7a2487cd431..23185962e7b 100644 --- a/internal/index/master_index_test.go +++ b/internal/repository/index/master_index_test.go @@ -10,8 +10,8 @@ import ( "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/crypto" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) diff --git a/internal/index/testing.go b/internal/repository/index/testing.go similarity index 100% rename from internal/index/testing.go rename to internal/repository/index/testing.go diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 1dae68c1531..6f2182ac53b 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -7,8 +7,8 @@ import ( "sort" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" ) diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index e011319232d..868b0a894a6 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -3,8 +3,8 @@ package repository import ( "context" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" ) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 7233c785432..97fc0f50620 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -19,8 +19,8 @@ import ( "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 679eaaaabf7..c06d4f25bda 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -19,8 +19,8 @@ import ( "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" From 5e0ea8fcfa2c762e56cd6846fe3f00a56945872f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 24 May 2024 23:09:58 +0200 Subject: [PATCH 303/893] pack: move to repository package --- cmd/restic/cmd_debug.go | 2 +- internal/checker/checker.go | 2 +- internal/repository/check.go | 2 +- internal/{ => repository}/pack/doc.go | 0 internal/{ => repository}/pack/pack.go | 0 internal/{ => repository}/pack/pack_internal_test.go | 0 internal/{ => repository}/pack/pack_test.go | 2 +- internal/repository/packer_manager.go | 2 +- internal/repository/prune.go | 2 +- internal/repository/repair_index.go | 2 +- internal/repository/repository.go | 2 +- 11 files changed, 8 insertions(+), 8 deletions(-) rename internal/{ => repository}/pack/doc.go (100%) rename internal/{ => repository}/pack/pack.go (100%) rename internal/{ => repository}/pack/pack_internal_test.go (100%) rename internal/{ => repository}/pack/pack_test.go (98%) diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index ff1e4c28bea..1a42995fdaa 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -22,9 +22,9 @@ import ( "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/repository/index" + "github.com/restic/restic/internal/repository/pack" "github.com/restic/restic/internal/restic" ) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index fb6fbacd56b..82348c7ea62 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -12,9 +12,9 @@ import ( "github.com/restic/restic/internal/backend/s3" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/repository/index" + "github.com/restic/restic/internal/repository/pack" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" "golang.org/x/sync/errgroup" diff --git a/internal/repository/check.go b/internal/repository/check.go index f16cd749230..0bfc390844d 100644 --- a/internal/repository/check.go +++ b/internal/repository/check.go @@ -14,7 +14,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/hashing" - "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/repository/pack" "github.com/restic/restic/internal/restic" ) diff --git a/internal/pack/doc.go b/internal/repository/pack/doc.go similarity index 100% rename from internal/pack/doc.go rename to internal/repository/pack/doc.go diff --git a/internal/pack/pack.go b/internal/repository/pack/pack.go similarity index 100% rename from internal/pack/pack.go rename to internal/repository/pack/pack.go diff --git a/internal/pack/pack_internal_test.go b/internal/repository/pack/pack_internal_test.go similarity index 100% rename from internal/pack/pack_internal_test.go rename to internal/repository/pack/pack_internal_test.go diff --git a/internal/pack/pack_test.go b/internal/repository/pack/pack_test.go similarity index 98% rename from internal/pack/pack_test.go rename to internal/repository/pack/pack_test.go index 76ff5c12733..5ac14634834 100644 --- a/internal/pack/pack_test.go +++ b/internal/repository/pack/pack_test.go @@ -12,7 +12,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/crypto" - "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/repository/pack" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) diff --git a/internal/repository/packer_manager.go b/internal/repository/packer_manager.go index 76734fb87ef..8c7c0b9d57f 100644 --- a/internal/repository/packer_manager.go +++ b/internal/repository/packer_manager.go @@ -16,7 +16,7 @@ import ( "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/fs" - "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/repository/pack" "github.com/minio/sha256-simd" ) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 6f2182ac53b..b8d3ba97bd0 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -7,8 +7,8 @@ import ( "sort" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository/index" + "github.com/restic/restic/internal/repository/pack" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" ) diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index 868b0a894a6..77080925461 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -3,8 +3,8 @@ package repository import ( "context" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository/index" + "github.com/restic/restic/internal/repository/pack" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" ) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 97fc0f50620..f2c28cecee5 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -19,8 +19,8 @@ import ( "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository/index" + "github.com/restic/restic/internal/repository/pack" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" From 496e57f956fd1804f798cf4f531048cac0fcd994 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 24 May 2024 23:11:53 +0200 Subject: [PATCH 304/893] hashing: move to repository package --- internal/checker/checker_test.go | 2 +- internal/repository/check.go | 2 +- internal/{ => repository}/hashing/reader.go | 0 internal/{ => repository}/hashing/reader_test.go | 0 internal/{ => repository}/hashing/writer.go | 0 internal/{ => repository}/hashing/writer_test.go | 0 internal/repository/packer_manager.go | 2 +- 7 files changed, 3 insertions(+), 3 deletions(-) rename internal/{ => repository}/hashing/reader.go (100%) rename internal/{ => repository}/hashing/reader_test.go (100%) rename internal/{ => repository}/hashing/writer.go (100%) rename internal/{ => repository}/hashing/writer_test.go (100%) diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index 1219f4e2bad..5eaf550ba31 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -17,8 +17,8 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/hashing" "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/repository/hashing" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" diff --git a/internal/repository/check.go b/internal/repository/check.go index 0bfc390844d..27eb11d716c 100644 --- a/internal/repository/check.go +++ b/internal/repository/check.go @@ -13,7 +13,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/hashing" + "github.com/restic/restic/internal/repository/hashing" "github.com/restic/restic/internal/repository/pack" "github.com/restic/restic/internal/restic" ) diff --git a/internal/hashing/reader.go b/internal/repository/hashing/reader.go similarity index 100% rename from internal/hashing/reader.go rename to internal/repository/hashing/reader.go diff --git a/internal/hashing/reader_test.go b/internal/repository/hashing/reader_test.go similarity index 100% rename from internal/hashing/reader_test.go rename to internal/repository/hashing/reader_test.go diff --git a/internal/hashing/writer.go b/internal/repository/hashing/writer.go similarity index 100% rename from internal/hashing/writer.go rename to internal/repository/hashing/writer.go diff --git a/internal/hashing/writer_test.go b/internal/repository/hashing/writer_test.go similarity index 100% rename from internal/hashing/writer_test.go rename to internal/repository/hashing/writer_test.go diff --git a/internal/repository/packer_manager.go b/internal/repository/packer_manager.go index 8c7c0b9d57f..213a3df4138 100644 --- a/internal/repository/packer_manager.go +++ b/internal/repository/packer_manager.go @@ -10,7 +10,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/hashing" + "github.com/restic/restic/internal/repository/hashing" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/crypto" From 2c7ebf4c7fdb124132dfc9b2b8b15eac0eb88a7f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 25 May 2024 15:10:21 +0200 Subject: [PATCH 305/893] forget: explain oldest snapshot special case in command help --- cmd/restic/cmd_forget.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 9c40b1d093d..4fd931ff075 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -21,6 +21,9 @@ var cmdForget = &cobra.Command{ The "forget" command removes snapshots according to a policy. All snapshots are first divided into groups according to "--group-by", and after that the policy specified by the "--keep-*" options is applied to each group individually. +If there are not enough snapshots to keep one for each duration related +"--keep-{within-,}*" option, the oldest snapshot in the group is kept +additionally. Please note that this command really only deletes the snapshot object in the repository, which is a reference to data stored there. In order to remove the From 0271bb97f8dcab28eff41153d6c80093c9316582 Mon Sep 17 00:00:00 2001 From: Shivashis Padhi Date: Sat, 25 May 2024 19:15:42 +0530 Subject: [PATCH 306/893] check: enhance check command to create cache directory if it doesn't exist --- changelog/unreleased/issue-4437 | 9 +++++ cmd/restic/cmd_check.go | 6 +++ cmd/restic/cmd_check_test.go | 65 +++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+) create mode 100644 changelog/unreleased/issue-4437 diff --git a/changelog/unreleased/issue-4437 b/changelog/unreleased/issue-4437 new file mode 100644 index 00000000000..199c04cbf19 --- /dev/null +++ b/changelog/unreleased/issue-4437 @@ -0,0 +1,9 @@ +Enhancement: `check` command creates cache directory if it does not exist + +If a custom cache directory was specified for the `check` command but the directory did not exist, +then `check` continued with cache disabled. + +The `check` command now attempts to create the cache directory before initializing the cache. + +https://github.com/restic/restic/issues/4437 +https://github.com/restic/restic/pull/4805 diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 671cab0e615..d953f1bc9a1 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -173,6 +173,12 @@ func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions) (cleanup func()) } // use a cache in a temporary directory + err := os.MkdirAll(cachedir, 0755) + if err != nil { + Warnf("unable to create cache directory %s, disabling cache: %v\n", cachedir, err) + gopts.NoCache = true + return cleanup + } tempdir, err := os.MkdirTemp(cachedir, "restic-check-cache-") if err != nil { // if an error occurs, don't use any cache diff --git a/cmd/restic/cmd_check_test.go b/cmd/restic/cmd_check_test.go index 4d54488cdd5..15003ee48ae 100644 --- a/cmd/restic/cmd_check_test.go +++ b/cmd/restic/cmd_check_test.go @@ -1,8 +1,11 @@ package main import ( + "io/fs" "math" + "os" "reflect" + "strings" "testing" "github.com/restic/restic/internal/restic" @@ -163,3 +166,65 @@ func TestSelectNoRandomPacksByFileSize(t *testing.T) { selectedPacks := selectRandomPacksByFileSize(testPacks, 10, 500) rtest.Assert(t, len(selectedPacks) == 0, "Expected 0 selected packs") } + +func checkIfFileWithSimilarNameExists(files []fs.DirEntry, fileName string) bool { + found := false + for _, file := range files { + if file.IsDir() { + dirName := file.Name() + if strings.Contains(dirName, fileName) { + found = true + } + } + } + return found +} + +func TestPrepareCheckCache(t *testing.T) { + // Create a temporary directory for the cache + tmpDirBase := t.TempDir() + + testCases := []struct { + opts CheckOptions + withValidCache bool + }{ + {CheckOptions{WithCache: true}, true}, // Shouldn't create temp directory + {CheckOptions{WithCache: false}, true}, // Should create temp directory + {CheckOptions{WithCache: false}, false}, // Should create cache directory first, then temp directory + } + + for _, testCase := range testCases { + t.Run("", func(t *testing.T) { + if !testCase.withValidCache { + // remove tmpDirBase to simulate non-existing cache directory + err := os.Remove(tmpDirBase) + rtest.OK(t, err) + } + gopts := GlobalOptions{CacheDir: tmpDirBase} + cleanup := prepareCheckCache(testCase.opts, &gopts) + files, err := os.ReadDir(tmpDirBase) + rtest.OK(t, err) + + if !testCase.opts.WithCache { + // If using a temporary cache directory, the cache directory should exist + // listing all directories inside tmpDirBase (cacheDir) + // one directory should be tmpDir created by prepareCheckCache with 'restic-check-cache-' in path + found := checkIfFileWithSimilarNameExists(files, "restic-check-cache-") + if !found { + t.Errorf("Expected temporary directory to exist, but it does not") + } + } else { + // If not using the cache, the temp directory should not exist + rtest.Assert(t, len(files) == 0, "expected cache directory not to exist, but it does: %v", files) + } + + // Call the cleanup function to remove the temporary cache directory + cleanup() + + // Verify that the cache directory has been removed + files, err = os.ReadDir(tmpDirBase) + rtest.OK(t, err) + rtest.Assert(t, len(files) == 0, "Expected cache directory to be removed, but it still exists: %v", files) + }) + } +} From 04c181dbd01a5b41acf01fa98ae7183f1ea0f26e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 25 May 2024 16:42:55 +0200 Subject: [PATCH 307/893] update-repair-packs-changelog --- changelog/unreleased/{pull-4644 => issue-828} | 1 + 1 file changed, 1 insertion(+) rename changelog/unreleased/{pull-4644 => issue-828} (91%) diff --git a/changelog/unreleased/pull-4644 b/changelog/unreleased/issue-828 similarity index 91% rename from changelog/unreleased/pull-4644 rename to changelog/unreleased/issue-828 index 8000bce7eda..6e13542258c 100644 --- a/changelog/unreleased/pull-4644 +++ b/changelog/unreleased/issue-828 @@ -6,5 +6,6 @@ on using the command if necessary to repair a repository. See the guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html for further instructions. +https://github.com/restic/restic/issues/828 https://github.com/restic/restic/pull/4644 https://github.com/restic/restic/pull/4655 From 18990dbed8bb3fca31be4f0c39f0cbdb52e2d924 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 25 May 2024 20:47:22 +0200 Subject: [PATCH 308/893] backup: refer to backed up data as backup source not target A backup reads from the source and writes it to the backup target (repository). The name "target" is kept within the source code as it's used there excessively. --- cmd/restic/cmd_backup.go | 6 +++--- doc/faq.rst | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 4890f82ff6c..e5369f7b936 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -101,7 +101,7 @@ func init() { f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: latest snapshot in the group determined by --group-by and not newer than the timestamp determined by --time)") backupOptions.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true} f.VarP(&backupOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')") - f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`) + f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the source files/directories (overrides the "parent" flag)`) initExcludePatternOptions(f, &backupOptions.excludePatternOptions) @@ -158,7 +158,7 @@ func filterExisting(items []string) (result []string, err error) { } if len(result) == 0 { - return nil, errors.Fatal("all target directories/files do not exist") + return nil, errors.Fatal("all source directories/files do not exist") } return @@ -403,7 +403,7 @@ func collectTargets(opts BackupOptions, args []string) (targets []string, err er // and have the ability to use both files-from and args at the same time. targets = append(targets, args...) if len(targets) == 0 && !opts.Stdin { - return nil, errors.Fatal("nothing to backup, please specify target files/dirs") + return nil, errors.Fatal("nothing to backup, please specify source files/dirs") } targets, err = filterExisting(targets) diff --git a/doc/faq.rst b/doc/faq.rst index 8e56b5d9e16..19879d8171e 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -100,7 +100,7 @@ Restic handles globbing and expansion in the following ways: - Globbing is only expanded for lines read via ``--files-from`` - Environment variables are not expanded in the file read via ``--files-from`` - ``*`` is expanded for paths read via ``--files-from`` -- e.g. For backup targets given to restic as arguments on the shell, neither glob expansion nor shell variable replacement is done. If restic is called as ``restic backup '*' '$HOME'``, it will try to backup the literal file(s)/dir(s) ``*`` and ``$HOME`` +- e.g. For backup sources given to restic as arguments on the shell, neither glob expansion nor shell variable replacement is done. If restic is called as ``restic backup '*' '$HOME'``, it will try to backup the literal file(s)/dir(s) ``*`` and ``$HOME`` - Double-asterisk ``**`` only works in exclude patterns as this is a custom extension built into restic; the shell must not expand it From 107f18849ceadfaa03a66815ab95cab1189e8b89 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 25 May 2024 23:16:22 +0200 Subject: [PATCH 309/893] doc: update help output in manual_rest.rst --- doc/manual_rest.rst | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst index 34116fe86b5..188ebcb7099 100644 --- a/doc/manual_rest.rst +++ b/doc/manual_rest.rst @@ -13,6 +13,8 @@ Usage help is available: restic is a backup program which allows saving multiple revisions of files and directories in an encrypted repository stored on different backends. + The full documentation can be found at https://restic.readthedocs.io/ . + Usage: restic [command] @@ -47,17 +49,19 @@ Usage help is available: version Print version information Flags: - --cacert file file to load root certificates from (default: use system certificates) + --cacert file file to load root certificates from (default: use system certificates or $RESTIC_CACERT) --cache-dir directory set the cache directory. (default: use system default cache directory) --cleanup-cache auto remove old cache directories --compression mode compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) (default auto) -h, --help help for restic + --insecure-no-password use an empty password for the repository, must be passed to every restic command (insecure) --insecure-tls skip TLS certificate verification when connecting to the repository (insecure) --json set output mode to JSON for commands that support it --key-hint key key ID of key to try decrypting first (default: $RESTIC_KEY_HINT) --limit-download rate limits downloads to a maximum rate in KiB/s. (default: unlimited) --limit-upload rate limits uploads to a maximum rate in KiB/s. (default: unlimited) --no-cache do not use a local cache + --no-extra-verify skip additional verification of data before upload (see documentation) --no-lock do not lock the repository, this allows some operations on read-only repositories -o, --option key=value set extended option (key=value, can be specified multiple times) --pack-size size set target pack size in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) @@ -67,7 +71,7 @@ Usage help is available: -r, --repo repository repository to backup to or restore from (default: $RESTIC_REPOSITORY) --repository-file file file to read the repository location from (default: $RESTIC_REPOSITORY_FILE) --retry-lock duration retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) - --tls-client-cert file path to a file containing PEM encoded TLS client certificate and private key + --tls-client-cert file path to a file containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) -v, --verbose be verbose (specify multiple times or a level using --verbose=n, max level/times is 2) Use "restic [command] --help" for more information about a command. @@ -105,10 +109,10 @@ command: --files-from file read the files to backup from file (can be combined with file args; can be specified multiple times) --files-from-raw file read the files to backup from file (can be combined with file args; can be specified multiple times) --files-from-verbatim file read the files to backup from file (can be combined with file args; can be specified multiple times) - -f, --force force re-reading the target files/directories (overrides the "parent" flag) + -f, --force force re-reading the source files/directories (overrides the "parent" flag) -g, --group-by group group snapshots by host, paths and/or tags, separated by comma (disable grouping with '') (default host,paths) -h, --help help for backup - -H, --host hostname set the hostname for the snapshot manually. To prevent an expensive rescan use the "parent" flag + -H, --host hostname set the hostname for the snapshot manually (default: $RESTIC_HOST). To prevent an expensive rescan use the "parent" flag --iexclude pattern same as --exclude pattern but ignores the casing of filenames --iexclude-file file same as --exclude-file but ignores casing of filenames in patterns --ignore-ctime ignore ctime changes when checking for modified files @@ -119,22 +123,25 @@ command: --read-concurrency n read n files concurrently (default: $RESTIC_READ_CONCURRENCY or 2) --stdin read backup from stdin --stdin-filename filename filename to use when reading from stdin (default "stdin") + --stdin-from-command interpret arguments as command to execute and store its stdout --tag tags add tags for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times) (default []) --time time time of the backup (ex. '2012-11-01 22:08:41') (default: now) --use-fs-snapshot use filesystem snapshot where possible (currently only Windows VSS) --with-atime store the atime for all files and directories Global Flags: - --cacert file file to load root certificates from (default: use system certificates) + --cacert file file to load root certificates from (default: use system certificates or $RESTIC_CACERT) --cache-dir directory set the cache directory. (default: use system default cache directory) --cleanup-cache auto remove old cache directories --compression mode compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) (default auto) + --insecure-no-password use an empty password for the repository, must be passed to every restic command (insecure) --insecure-tls skip TLS certificate verification when connecting to the repository (insecure) --json set output mode to JSON for commands that support it --key-hint key key ID of key to try decrypting first (default: $RESTIC_KEY_HINT) --limit-download rate limits downloads to a maximum rate in KiB/s. (default: unlimited) --limit-upload rate limits uploads to a maximum rate in KiB/s. (default: unlimited) --no-cache do not use a local cache + --no-extra-verify skip additional verification of data before upload (see documentation) --no-lock do not lock the repository, this allows some operations on read-only repositories -o, --option key=value set extended option (key=value, can be specified multiple times) --pack-size size set target pack size in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) @@ -144,7 +151,7 @@ command: -r, --repo repository repository to backup to or restore from (default: $RESTIC_REPOSITORY) --repository-file file file to read the repository location from (default: $RESTIC_REPOSITORY_FILE) --retry-lock duration retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) - --tls-client-cert file path to a file containing PEM encoded TLS client certificate and private key + --tls-client-cert file path to a file containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) -v, --verbose be verbose (specify multiple times or a level using --verbose=n, max level/times is 2) Subcommands that support showing progress information such as ``backup``, From 0ad0b7ca7cf6044de04a70d7415666be204712a9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 26 May 2024 12:37:24 +0200 Subject: [PATCH 310/893] bloblru: fix race condition that can compute value multiple times --- internal/bloblru/cache.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/internal/bloblru/cache.go b/internal/bloblru/cache.go index 4477e37a94b..1ff52094b9a 100644 --- a/internal/bloblru/cache.go +++ b/internal/bloblru/cache.go @@ -114,10 +114,19 @@ func (c *Cache) GetOrCompute(id restic.ID, compute func() ([]byte, error)) ([]by if isDownloading { // wait for result of parallel download <-waitForResult - blob, ok := c.Get(id) - if ok { - return blob, nil - } + } + + // try again. This is necessary independent of whether isDownloading is true or not. + // The calls to `c.Get()` and checking/adding the entry in `c.inProgress` are not atomic, + // thus the item might have been computed in the meantime. + // The following scenario would compute() the value multiple times otherwise: + // Goroutine A does not find a value in the initial call to `c.Get`, then goroutine B + // takes over, caches the computed value and cleans up its channel in c.inProgress. + // Then goroutine A continues, does not detect a parallel computation and would try + // to call compute() again. + blob, ok = c.Get(id) + if ok { + return blob, nil } // download it From 21ce03cff27d4460450237a3a52975f97ccf7974 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 26 May 2024 12:38:20 +0200 Subject: [PATCH 311/893] bloblru: move defer outside critical section --- internal/bloblru/cache.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/bloblru/cache.go b/internal/bloblru/cache.go index 1ff52094b9a..161f1537594 100644 --- a/internal/bloblru/cache.go +++ b/internal/bloblru/cache.go @@ -100,7 +100,13 @@ func (c *Cache) GetOrCompute(id restic.ID, compute func() ([]byte, error)) ([]by waitForResult, isDownloading := c.inProgress[id] if !isDownloading { c.inProgress[id] = finish + } + c.mu.Unlock() + if isDownloading { + // wait for result of parallel download + <-waitForResult + } else { // remove progress channel once finished here defer func() { c.mu.Lock() @@ -109,12 +115,6 @@ func (c *Cache) GetOrCompute(id restic.ID, compute func() ([]byte, error)) ([]by close(finish) }() } - c.mu.Unlock() - - if isDownloading { - // wait for result of parallel download - <-waitForResult - } // try again. This is necessary independent of whether isDownloading is true or not. // The calls to `c.Get()` and checking/adding the entry in `c.inProgress` are not atomic, From 1c6067d93d61b33778d25f17da12481bdf7a5b84 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 26 May 2024 12:38:41 +0200 Subject: [PATCH 312/893] bloblru: variable name cleanup --- internal/bloblru/cache.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/bloblru/cache.go b/internal/bloblru/cache.go index 161f1537594..9981f8a87be 100644 --- a/internal/bloblru/cache.go +++ b/internal/bloblru/cache.go @@ -97,13 +97,13 @@ func (c *Cache) GetOrCompute(id restic.ID, compute func() ([]byte, error)) ([]by // check for parallel download or start our own finish := make(chan struct{}) c.mu.Lock() - waitForResult, isDownloading := c.inProgress[id] - if !isDownloading { + waitForResult, isComputing := c.inProgress[id] + if !isComputing { c.inProgress[id] = finish } c.mu.Unlock() - if isDownloading { + if isComputing { // wait for result of parallel download <-waitForResult } else { @@ -116,7 +116,7 @@ func (c *Cache) GetOrCompute(id restic.ID, compute func() ([]byte, error)) ([]by }() } - // try again. This is necessary independent of whether isDownloading is true or not. + // try again. This is necessary independent of whether isComputing is true or not. // The calls to `c.Get()` and checking/adding the entry in `c.inProgress` are not atomic, // thus the item might have been computed in the meantime. // The following scenario would compute() the value multiple times otherwise: From a651af70d692a80108540a50610b947183cbcd8f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 26 May 2024 14:32:09 +0200 Subject: [PATCH 313/893] doc: fix warings --- doc/040_backup.rst | 6 +++--- doc/050_restore.rst | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 782aa703060..efaa73255eb 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -514,10 +514,10 @@ written, and the next backup needs to write new metadata again. If you really want to save the access time for files and directories, you can pass the ``--with-atime`` option to the ``backup`` command. -Backing up full security descriptors on Windows is only possible when the user -has ``SeBackupPrivilege``privilege or is running as admin. This is a restriction +Backing up full security descriptors on Windows is only possible when the user +has ``SeBackupPrivilege`` privilege or is running as admin. This is a restriction of Windows not restic. -If either of these conditions are not met, only the owner, group and DACL will +If either of these conditions are not met, only the owner, group and DACL will be backed up. Note that ``restic`` does not back up some metadata associated with files. Of diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 193a00870f3..ce17a1cf736 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -184,4 +184,5 @@ It is also possible to ``dump`` the contents of a selected snapshot and folder structure to a file using the ``--target`` flag. .. code-block:: console - $ restic -r /srv/restic-repo dump latest / --target /home/linux.user/output.tar -a tar \ No newline at end of file + + $ restic -r /srv/restic-repo dump latest / --target /home/linux.user/output.tar -a tar From 46f04bc5f0c884ccbf513b7a6d097649b62cc5fb Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Tue, 21 May 2024 12:15:22 +0200 Subject: [PATCH 314/893] termstatus: Do formatting in Message, not Terminal --- internal/ui/message.go | 14 +++++++++----- internal/ui/termstatus/status.go | 12 ------------ 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/internal/ui/message.go b/internal/ui/message.go index 75e54b01920..38cdaf30117 100644 --- a/internal/ui/message.go +++ b/internal/ui/message.go @@ -1,6 +1,10 @@ package ui -import "github.com/restic/restic/internal/ui/termstatus" +import ( + "fmt" + + "github.com/restic/restic/internal/ui/termstatus" +) // Message reports progress with messages of different verbosity. type Message struct { @@ -19,27 +23,27 @@ func NewMessage(term *termstatus.Terminal, verbosity uint) *Message { // E reports an error func (m *Message) E(msg string, args ...interface{}) { - m.term.Errorf(msg, args...) + m.term.Error(fmt.Sprintf(msg, args...)) } // P prints a message if verbosity >= 1, this is used for normal messages which // are not errors. func (m *Message) P(msg string, args ...interface{}) { if m.v >= 1 { - m.term.Printf(msg, args...) + m.term.Print(fmt.Sprintf(msg, args...)) } } // V prints a message if verbosity >= 2, this is used for verbose messages. func (m *Message) V(msg string, args ...interface{}) { if m.v >= 2 { - m.term.Printf(msg, args...) + m.term.Print(fmt.Sprintf(msg, args...)) } } // VV prints a message if verbosity >= 3, this is used for debug messages. func (m *Message) VV(msg string, args ...interface{}) { if m.v >= 3 { - m.term.Printf(msg, args...) + m.term.Print(fmt.Sprintf(msg, args...)) } } diff --git a/internal/ui/termstatus/status.go b/internal/ui/termstatus/status.go index fc731b02339..4a73ce5b3fb 100644 --- a/internal/ui/termstatus/status.go +++ b/internal/ui/termstatus/status.go @@ -252,23 +252,11 @@ func (t *Terminal) Print(line string) { t.print(line, false) } -// Printf uses fmt.Sprintf to write a line to the terminal. -func (t *Terminal) Printf(msg string, args ...interface{}) { - s := fmt.Sprintf(msg, args...) - t.Print(s) -} - // Error writes an error to the terminal. func (t *Terminal) Error(line string) { t.print(line, true) } -// Errorf uses fmt.Sprintf to write an error line to the terminal. -func (t *Terminal) Errorf(msg string, args ...interface{}) { - s := fmt.Sprintf(msg, args...) - t.Error(s) -} - // Truncate s to fit in width (number of terminal cells) w. // If w is negative, returns the empty string. func Truncate(s string, w int) string { From 939b537c8037c49a888670932927754750dde5a6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 25 May 2024 21:20:23 +0200 Subject: [PATCH 315/893] check/migrate: convert to use termstatus to prevent mangled output Errors reported by check would result in corrupted output. --- cmd/restic/cmd_check.go | 84 +++++++++++++----------- cmd/restic/cmd_check_integration_test.go | 8 ++- cmd/restic/cmd_check_test.go | 3 +- cmd/restic/cmd_migrate.go | 39 ++++++----- cmd/restic/cmd_prune_integration_test.go | 8 ++- cmd/restic/integration_test.go | 5 +- 6 files changed, 85 insertions(+), 62 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index c4ae1c94179..e5aebc6e4c9 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -18,6 +18,8 @@ import ( "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" + "github.com/restic/restic/internal/ui/progress" + "github.com/restic/restic/internal/ui/termstatus" ) var cmdCheck = &cobra.Command{ @@ -37,7 +39,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return runCheck(cmd.Context(), checkOptions, globalOptions, args) + term, cancel := setupTermstatus() + defer cancel() + return runCheck(cmd.Context(), checkOptions, globalOptions, args, term) }, PreRunE: func(_ *cobra.Command, _ []string) error { return checkFlags(checkOptions) @@ -155,7 +159,7 @@ func parsePercentage(s string) (float64, error) { // - if the user explicitly requested --no-cache, we don't use any cache // - if the user provides --cache-dir, we use a cache in a temporary sub-directory of the specified directory and the sub-directory is deleted after the check // - by default, we use a cache in a temporary directory that is deleted after the check -func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions) (cleanup func()) { +func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions, printer progress.Printer) (cleanup func()) { cleanup = func() {} if opts.WithCache { // use the default cache, no setup needed @@ -182,34 +186,36 @@ func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions) (cleanup func()) tempdir, err := os.MkdirTemp(cachedir, "restic-check-cache-") if err != nil { // if an error occurs, don't use any cache - Warnf("unable to create temporary directory for cache during check, disabling cache: %v\n", err) + printer.E("unable to create temporary directory for cache during check, disabling cache: %v\n", err) gopts.NoCache = true return cleanup } gopts.CacheDir = tempdir - Verbosef("using temporary cache in %v\n", tempdir) + printer.P("using temporary cache in %v\n", tempdir) cleanup = func() { err := fs.RemoveAll(tempdir) if err != nil { - Warnf("error removing temporary cache directory: %v\n", err) + printer.E("error removing temporary cache directory: %v\n", err) } } return cleanup } -func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args []string) error { +func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args []string, term *termstatus.Terminal) error { if len(args) != 0 { return errors.Fatal("the check command expects no arguments, only options - please see `restic help check` for usage and flags") } - cleanup := prepareCheckCache(opts, &gopts) + printer := newTerminalProgressPrinter(gopts.verbosity, term) + + cleanup := prepareCheckCache(opts, &gopts, printer) defer cleanup() if !gopts.NoLock { - Verbosef("create exclusive lock for repository\n") + printer.P("create exclusive lock for repository\n") } ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, gopts.NoLock) if err != nil { @@ -223,8 +229,8 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args return err } - Verbosef("load indexes\n") - bar := newIndexProgress(gopts.Quiet, gopts.JSON) + printer.P("load indexes\n") + bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) hints, errs := chkr.LoadIndex(ctx, bar) if ctx.Err() != nil { return ctx.Err() @@ -237,34 +243,34 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args for _, hint := range hints { switch hint.(type) { case *checker.ErrDuplicatePacks: - Printf("%v\n", hint) + term.Print(hint.Error()) suggestIndexRebuild = true case *checker.ErrOldIndexFormat: - Warnf("error: %v\n", hint) + printer.E("error: %v\n", hint) suggestLegacyIndexRebuild = true errorsFound = true case *checker.ErrMixedPack: - Printf("%v\n", hint) + term.Print(hint.Error()) mixedFound = true default: - Warnf("error: %v\n", hint) + printer.E("error: %v\n", hint) errorsFound = true } } if suggestIndexRebuild { - Printf("Duplicate packs are non-critical, you can run `restic repair index' to correct this.\n") + term.Print("Duplicate packs are non-critical, you can run `restic repair index' to correct this.\n") } if suggestLegacyIndexRebuild { - Warnf("Found indexes using the legacy format, you must run `restic repair index' to correct this.\n") + printer.E("Found indexes using the legacy format, you must run `restic repair index' to correct this.\n") } if mixedFound { - Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n") + term.Print("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n") } if len(errs) > 0 { for _, err := range errs { - Warnf("error: %v\n", err) + printer.E("error: %v\n", err) } return errors.Fatal("LoadIndex returned errors") } @@ -272,36 +278,36 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args orphanedPacks := 0 errChan := make(chan error) - Verbosef("check all packs\n") + printer.P("check all packs\n") go chkr.Packs(ctx, errChan) for err := range errChan { if checker.IsOrphanedPack(err) { orphanedPacks++ - Verbosef("%v\n", err) + printer.P("%v\n", err) } else if err == checker.ErrLegacyLayout { - Verbosef("repository still uses the S3 legacy layout\nPlease run `restic migrate s3legacy` to correct this.\n") + printer.P("repository still uses the S3 legacy layout\nPlease run `restic migrate s3legacy` to correct this.\n") } else { errorsFound = true - Warnf("%v\n", err) + printer.E("%v\n", err) } } if orphanedPacks > 0 { - Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) + printer.P("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) } if ctx.Err() != nil { return ctx.Err() } - Verbosef("check snapshots, trees and blobs\n") + printer.P("check snapshots, trees and blobs\n") errChan = make(chan error) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() - bar := newProgressMax(!gopts.Quiet, 0, "snapshots") + bar := newTerminalProgressMax(!gopts.Quiet, 0, "snapshots", term) defer bar.Done() chkr.Structure(ctx, bar, errChan) }() @@ -313,12 +319,12 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args if stdoutCanUpdateStatus() { clean = clearLine(0) } - Warnf(clean+"error for tree %v:\n", e.ID.Str()) + printer.E(clean+"error for tree %v:\n", e.ID.Str()) for _, treeErr := range e.Errors { - Warnf(" %v\n", treeErr) + printer.E(" %v\n", treeErr) } } else { - Warnf("error: %v\n", err) + printer.E("error: %v\n", err) } } @@ -336,7 +342,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args return err } for _, id := range unused { - Verbosef("unused blob %v\n", id) + printer.P("unused blob %v\n", id) errorsFound = true } } @@ -344,7 +350,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args doReadData := func(packs map[restic.ID]int64) { packCount := uint64(len(packs)) - p := newProgressMax(!gopts.Quiet, packCount, "packs") + p := newTerminalProgressMax(!gopts.Quiet, packCount, "packs", term) errChan := make(chan error) go chkr.ReadPacks(ctx, packs, p, errChan) @@ -353,7 +359,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args for err := range errChan { errorsFound = true - Warnf("%v\n", err) + printer.E("%v\n", err) if err, ok := err.(*repository.ErrPackData); ok { salvagePacks = append(salvagePacks, err.PackID) } @@ -361,19 +367,19 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args p.Done() if len(salvagePacks) > 0 { - Warnf("\nThe repository contains pack files with damaged blobs. These blobs must be removed to repair the repository. This can be done using the following commands. Please read the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html first.\n\n") + printer.E("\nThe repository contains pack files with damaged blobs. These blobs must be removed to repair the repository. This can be done using the following commands. Please read the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html first.\n\n") var strIDs []string for _, id := range salvagePacks { strIDs = append(strIDs, id.String()) } - Warnf("restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIDs, " ")) - Warnf("Corrupted blobs are either caused by hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n") + printer.E("restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIDs, " ")) + printer.E("Corrupted blobs are either caused by hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n") } } switch { case opts.ReadData: - Verbosef("read all data\n") + printer.P("read all data\n") doReadData(selectPacksByBucket(chkr.GetPacks(), 1, 1)) case opts.ReadDataSubset != "": var packs map[restic.ID]int64 @@ -383,12 +389,12 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args totalBuckets := dataSubset[1] packs = selectPacksByBucket(chkr.GetPacks(), bucket, totalBuckets) packCount := uint64(len(packs)) - Verbosef("read group #%d of %d data packs (out of total %d packs in %d groups)\n", bucket, packCount, chkr.CountPacks(), totalBuckets) + printer.P("read group #%d of %d data packs (out of total %d packs in %d groups)\n", bucket, packCount, chkr.CountPacks(), totalBuckets) } else if strings.HasSuffix(opts.ReadDataSubset, "%") { percentage, err := parsePercentage(opts.ReadDataSubset) if err == nil { packs = selectRandomPacksByPercentage(chkr.GetPacks(), percentage) - Verbosef("read %.1f%% of data packs\n", percentage) + printer.P("read %.1f%% of data packs\n", percentage) } } else { repoSize := int64(0) @@ -404,7 +410,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args subsetSize = repoSize } packs = selectRandomPacksByFileSize(chkr.GetPacks(), subsetSize, repoSize) - Verbosef("read %d bytes of data packs\n", subsetSize) + printer.P("read %d bytes of data packs\n", subsetSize) } if packs == nil { return errors.Fatal("internal error: failed to select packs to check") @@ -419,7 +425,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args if errorsFound { return errors.Fatal("repository contains errors") } - Verbosef("no errors were found\n") + printer.P("no errors were found\n") return nil } diff --git a/cmd/restic/cmd_check_integration_test.go b/cmd/restic/cmd_check_integration_test.go index 9eb4fec62a1..f1e6517e093 100644 --- a/cmd/restic/cmd_check_integration_test.go +++ b/cmd/restic/cmd_check_integration_test.go @@ -1,10 +1,12 @@ package main import ( + "bytes" "context" "testing" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunCheck(t testing.TB, gopts GlobalOptions) { @@ -23,12 +25,14 @@ func testRunCheckMustFail(t testing.TB, gopts GlobalOptions) { } func testRunCheckOutput(gopts GlobalOptions, checkUnused bool) (string, error) { - buf, err := withCaptureStdout(func() error { + buf := bytes.NewBuffer(nil) + gopts.stdout = buf + err := withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { opts := CheckOptions{ ReadData: true, CheckUnused: checkUnused, } - return runCheck(context.TODO(), opts, gopts, nil) + return runCheck(context.TODO(), opts, gopts, nil, term) }) return buf.String(), err } diff --git a/cmd/restic/cmd_check_test.go b/cmd/restic/cmd_check_test.go index 15003ee48ae..9516ce4203f 100644 --- a/cmd/restic/cmd_check_test.go +++ b/cmd/restic/cmd_check_test.go @@ -10,6 +10,7 @@ import ( "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" ) func TestParsePercentage(t *testing.T) { @@ -201,7 +202,7 @@ func TestPrepareCheckCache(t *testing.T) { rtest.OK(t, err) } gopts := GlobalOptions{CacheDir: tmpDirBase} - cleanup := prepareCheckCache(testCase.opts, &gopts) + cleanup := prepareCheckCache(testCase.opts, &gopts, &progress.NoopPrinter{}) files, err := os.ReadDir(tmpDirBase) rtest.OK(t, err) diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index c3f82b8dd91..0f9bdde1ac3 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -5,6 +5,8 @@ import ( "github.com/restic/restic/internal/migrations" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" ) @@ -24,7 +26,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return runMigrate(cmd.Context(), migrateOptions, globalOptions, args) + term, cancel := setupTermstatus() + defer cancel() + return runMigrate(cmd.Context(), migrateOptions, globalOptions, args, term) }, } @@ -41,8 +45,8 @@ func init() { f.BoolVarP(&migrateOptions.Force, "force", "f", false, `apply a migration a second time`) } -func checkMigrations(ctx context.Context, repo restic.Repository) error { - Printf("available migrations:\n") +func checkMigrations(ctx context.Context, repo restic.Repository, printer progress.Printer) error { + printer.P("available migrations:\n") found := false for _, m := range migrations.All { @@ -52,19 +56,19 @@ func checkMigrations(ctx context.Context, repo restic.Repository) error { } if ok { - Printf(" %v\t%v\n", m.Name(), m.Desc()) + printer.P(" %v\t%v\n", m.Name(), m.Desc()) found = true } } if !found { - Printf("no migrations found\n") + printer.P("no migrations found\n") } return nil } -func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, repo restic.Repository, args []string) error { +func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, repo restic.Repository, args []string, term *termstatus.Terminal, printer progress.Printer) error { var firsterr error for _, name := range args { for _, m := range migrations.All { @@ -79,36 +83,37 @@ func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptio if reason == "" { reason = "check failed" } - Warnf("migration %v cannot be applied: %v\nIf you want to apply this migration anyway, re-run with option --force\n", m.Name(), reason) + printer.E("migration %v cannot be applied: %v\nIf you want to apply this migration anyway, re-run with option --force\n", m.Name(), reason) continue } - Warnf("check for migration %v failed, continuing anyway\n", m.Name()) + printer.E("check for migration %v failed, continuing anyway\n", m.Name()) } if m.RepoCheck() { - Printf("checking repository integrity...\n") + printer.P("checking repository integrity...\n") checkOptions := CheckOptions{} checkGopts := gopts // the repository is already locked checkGopts.NoLock = true - err = runCheck(ctx, checkOptions, checkGopts, []string{}) + + err = runCheck(ctx, checkOptions, checkGopts, []string{}, term) if err != nil { return err } } - Printf("applying migration %v...\n", m.Name()) + printer.P("applying migration %v...\n", m.Name()) if err = m.Apply(ctx, repo); err != nil { - Warnf("migration %v failed: %v\n", m.Name(), err) + printer.E("migration %v failed: %v\n", m.Name(), err) if firsterr == nil { firsterr = err } continue } - Printf("migration %v: success\n", m.Name()) + printer.P("migration %v: success\n", m.Name()) } } } @@ -116,7 +121,9 @@ func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptio return firsterr } -func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, args []string) error { +func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, args []string, term *termstatus.Terminal) error { + printer := newTerminalProgressPrinter(gopts.verbosity, term) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err @@ -124,8 +131,8 @@ func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, a defer unlock() if len(args) == 0 { - return checkMigrations(ctx, repo) + return checkMigrations(ctx, repo, printer) } - return applyMigrations(ctx, opts, gopts, repo, args) + return applyMigrations(ctx, opts, gopts, repo, args, term, printer) } diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index 5eb16a2ea0e..19c430716fc 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -111,7 +111,9 @@ func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) { createPrunableRepo(t, env) testRunPrune(t, env.gopts, pruneOpts) - rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil)) + rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runCheck(context.TODO(), checkOpts, env.gopts, nil, term) + })) } var pruneDefaultOptions = PruneOptions{MaxUnused: "5%"} @@ -218,7 +220,9 @@ func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, o if checkOK { testRunCheck(t, env.gopts) } else { - rtest.Assert(t, runCheck(context.TODO(), optionsCheck, env.gopts, nil) != nil, + rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runCheck(context.TODO(), optionsCheck, env.gopts, nil, term) + }) != nil, "check should have reported an error") } diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index a7b66add895..4cecec6bc06 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -87,8 +87,9 @@ func TestListOnce(t *testing.T) { createPrunableRepo(t, env) testRunPrune(t, env.gopts, pruneOpts) - rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil)) - + rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runCheck(context.TODO(), checkOpts, env.gopts, nil, term) + })) rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term) })) From 13127f3f6bf2e88683ae5795a90415974e68f901 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 25 May 2024 21:29:18 +0200 Subject: [PATCH 316/893] check: remove output mangling workaround --- cmd/restic/cmd_check.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index e5aebc6e4c9..642a46c0006 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -315,11 +315,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args for err := range errChan { errorsFound = true if e, ok := err.(*checker.TreeError); ok { - var clean string - if stdoutCanUpdateStatus() { - clean = clearLine(0) - } - printer.E(clean+"error for tree %v:\n", e.ID.Str()) + printer.E("error for tree %v:\n", e.ID.Str()) for _, treeErr := range e.Errors { printer.E(" %v\n", treeErr) } From 6869bdaaa8b86ae99afd7ce2dc3d467c00e186fa Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 22 May 2024 16:38:00 +0200 Subject: [PATCH 317/893] backup: implement --skip-if-unchanged --- cmd/restic/cmd_backup.go | 20 ++++++++++---------- cmd/restic/cmd_backup_integration_test.go | 15 +++++++++++++++ doc/075_scripting.rst | 3 ++- internal/archiver/archiver.go | 9 +++++++++ internal/ui/backup/json.go | 9 +++++++-- internal/ui/backup/text.go | 10 +++++++++- 6 files changed, 52 insertions(+), 14 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index e5369f7b936..434469683a0 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -87,6 +87,7 @@ type BackupOptions struct { DryRun bool ReadConcurrency uint NoScan bool + SkipIfUnchanged bool } var backupOptions BackupOptions @@ -133,6 +134,7 @@ func init() { if runtime.GOOS == "windows" { f.BoolVar(&backupOptions.UseFsSnapshot, "use-fs-snapshot", false, "use filesystem snapshot where possible (currently only Windows VSS)") } + f.BoolVar(&backupOptions.SkipIfUnchanged, "skip-if-unchanged", false, "skip snapshot creation if identical to parent snapshot") // parse read concurrency from env, on error the default value will be used readConcurrency, _ := strconv.ParseUint(os.Getenv("RESTIC_READ_CONCURRENCY"), 10, 32) @@ -638,13 +640,14 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } snapshotOpts := archiver.SnapshotOptions{ - Excludes: opts.Excludes, - Tags: opts.Tags.Flatten(), - BackupStart: backupStart, - Time: timeStamp, - Hostname: opts.Host, - ParentSnapshot: parentSnapshot, - ProgramVersion: "restic " + version, + Excludes: opts.Excludes, + Tags: opts.Tags.Flatten(), + BackupStart: backupStart, + Time: timeStamp, + Hostname: opts.Host, + ParentSnapshot: parentSnapshot, + ProgramVersion: "restic " + version, + SkipIfUnchanged: opts.SkipIfUnchanged, } if !gopts.JSON { @@ -665,9 +668,6 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter // Report finished execution progressReporter.Finish(id, summary, opts.DryRun) - if !gopts.JSON && !opts.DryRun { - progressPrinter.P("snapshot %s saved\n", id.Str()) - } if !success { return ErrInvalidSourceData } diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index f7372851f42..5e00b84b0e5 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -641,3 +641,18 @@ func TestBackupEmptyPassword(t *testing.T) { testListSnapshots(t, env.gopts, 1) testRunCheck(t, env.gopts) } + +func TestBackupSkipIfUnchanged(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + opts := BackupOptions{SkipIfUnchanged: true} + + for i := 0; i < 3; i++ { + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testListSnapshots(t, env.gopts, 1) + } + + testRunCheck(t, env.gopts) +} diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 28419c29273..e413e349f90 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -173,7 +173,8 @@ Summary is the last output line in a successful backup. +---------------------------+---------------------------------------------------------+ | ``total_duration`` | Total time it took for the operation to complete | +---------------------------+---------------------------------------------------------+ -| ``snapshot_id`` | ID of the new snapshot | +| ``snapshot_id`` | ID of the new snapshot. Field is omitted if snapshot | +| | creation was skipped | +---------------------------+---------------------------------------------------------+ diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 86b329a9a20..9a31911b972 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -767,6 +767,8 @@ type SnapshotOptions struct { Time time.Time ParentSnapshot *restic.Snapshot ProgramVersion string + // SkipIfUnchanged omits the snapshot creation if it is identical to the parent snapshot. + SkipIfUnchanged bool } // loadParentTree loads a tree referenced by snapshot id. If id is null, nil is returned. @@ -880,6 +882,13 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps return nil, restic.ID{}, nil, err } + if opts.ParentSnapshot != nil && opts.SkipIfUnchanged { + ps := opts.ParentSnapshot + if ps.Tree != nil && rootTreeID.Equal(*ps.Tree) { + return nil, restic.ID{}, arch.summary, nil + } + } + sn, err := restic.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time) if err != nil { return nil, restic.ID{}, nil, err diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index a14c7ccecaf..64b5de13bd1 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -164,6 +164,11 @@ func (b *JSONProgress) ReportTotal(start time.Time, s archiver.ScanStats) { // Finish prints the finishing messages. func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { + id := "" + // empty if snapshot creation was skipped + if !snapshotID.IsNull() { + id = snapshotID.String() + } b.print(summaryOutput{ MessageType: "summary", FilesNew: summary.Files.New, @@ -179,7 +184,7 @@ func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *ar TotalFilesProcessed: summary.Files.New + summary.Files.Changed + summary.Files.Unchanged, TotalBytesProcessed: summary.ProcessedBytes, TotalDuration: time.Since(start).Seconds(), - SnapshotID: snapshotID.String(), + SnapshotID: id, DryRun: dryRun, }) } @@ -235,6 +240,6 @@ type summaryOutput struct { TotalFilesProcessed uint `json:"total_files_processed"` TotalBytesProcessed uint64 `json:"total_bytes_processed"` TotalDuration float64 `json:"total_duration"` // in seconds - SnapshotID string `json:"snapshot_id"` + SnapshotID string `json:"snapshot_id,omitempty"` DryRun bool `json:"dry_run,omitempty"` } diff --git a/internal/ui/backup/text.go b/internal/ui/backup/text.go index 00d025e51f8..43e963b82d5 100644 --- a/internal/ui/backup/text.go +++ b/internal/ui/backup/text.go @@ -126,7 +126,7 @@ func (b *TextProgress) Reset() { } // Finish prints the finishing messages. -func (b *TextProgress) Finish(_ restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { +func (b *TextProgress) Finish(id restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { b.P("\n") b.P("Files: %5d new, %5d changed, %5d unmodified\n", summary.Files.New, summary.Files.Changed, summary.Files.Unchanged) b.P("Dirs: %5d new, %5d changed, %5d unmodified\n", summary.Dirs.New, summary.Dirs.Changed, summary.Dirs.Unchanged) @@ -145,4 +145,12 @@ func (b *TextProgress) Finish(_ restic.ID, start time.Time, summary *archiver.Su ui.FormatBytes(summary.ProcessedBytes), ui.FormatDuration(time.Since(start)), ) + + if !dryRun { + if id.IsNull() { + b.P("skipped creating snapshot\n") + } else { + b.P("snapshot %s saved\n", id.Str()) + } + } } From 5649334099ec4b9113182085f0eb70ac82706e05 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 22 May 2024 16:38:39 +0200 Subject: [PATCH 318/893] backup: document --skip-if-unchanged --- doc/040_backup.rst | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index efaa73255eb..88b8962808d 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -231,6 +231,40 @@ On **Windows**, a file is considered unchanged when its path, size and modification time match, and only ``--force`` has any effect. The other options are recognized but ignored. +Skip creating snapshots if unchanged +************************************ + +By default, restic always creates a new snapshot even if nothing has changed +compared to the parent snapshot. To omit the creation of a new snapshot in this +case, specify the ``--skip-if-unchanged`` option. + +Note that when using absolute paths to specify the backup target, then also +changes to the parent folders result in a changed snapshot. For example, a backup +of ``/home/user/work`` will create a new snapshot if the metadata of either + ``/``, ``/home`` or ``/home/user`` change. To avoid this problem run restic from +the corresponding folder and use relative paths. + +.. code-block:: console + + $ cd /home/user/work && restic -r /srv/restic-repo backup . --skip-if-unchanged + + open repository + enter password for repository: + repository a14e5863 opened (version 2, compression level auto) + load index files + using parent snapshot 40dc1520 + start scan on [.] + start backup on [.] + scan finished in 1.814s: 5307 files, 1.200GiB + + Files: 0 new, 0 changed, 5307 unmodified + Dirs: 0 new, 0 changed, 1867 unmodified + Added to the repository: 0 B (0 B stored) + + processed 5307 files, 1.720 GiB in 0:03 + skipped creating snapshot + + Dry Runs ******** From 513135b2389309429dedbb1a03bb7823eb2e2555 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 22 May 2024 16:39:10 +0200 Subject: [PATCH 319/893] doc: slightly modernize backup examples --- doc/040_backup.rst | 38 +++++++++++++++++----------------- doc/045_working_with_repos.rst | 8 +++---- doc/060_forget.rst | 6 +++--- doc/manual_rest.rst | 3 --- 4 files changed, 26 insertions(+), 29 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 88b8962808d..a07413ad9c5 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -24,16 +24,17 @@ again: $ restic -r /srv/restic-repo --verbose backup ~/work open repository enter password for repository: - password is correct - lock repository + repository a14e5863 opened (version 2, compression level auto) load index files - start scan - start backup - scan finished in 1.837s - processed 1.720 GiB in 0:12 + start scan on [/home/user/work] + start backup on [/home/user/work] + scan finished in 1.837s: 5307 files, 1.720 GiB + Files: 5307 new, 0 changed, 0 unmodified Dirs: 1867 new, 0 changed, 0 unmodified - Added: 1.200 GiB + Added to the repository: 1.200 GiB (1.103 GiB stored) + + processed 5307 files, 1.720 GiB in 0:12 snapshot 40dc1520 saved As you can see, restic created a backup of the directory and was pretty @@ -44,6 +45,7 @@ You can see that restic tells us it processed 1.720 GiB of data, this is the size of the files and directories in ``~/work`` on the local file system. It also tells us that only 1.200 GiB was added to the repository. This means that some of the data was duplicate and restic was able to efficiently reduce it. +The data compression also managed to compress the data down to 1.103 GiB. If you don't pass the ``--verbose`` option, restic will print less data. You'll still get a nice live status display. Be aware that the live status shows the @@ -109,17 +111,18 @@ repository (since all data is already there). This is de-duplication at work! $ restic -r /srv/restic-repo --verbose backup ~/work open repository enter password for repository: - password is correct - lock repository + repository a14e5863 opened (version 2, compression level auto) load index files - using parent snapshot d875ae93 - start scan - start backup - scan finished in 1.881s - processed 1.720 GiB in 0:03 + using parent snapshot 40dc1520 + start scan on [/home/user/work] + start backup on [/home/user/work] + scan finished in 1.881s: 5307 files, 1.720 GiB + Files: 0 new, 0 changed, 5307 unmodified Dirs: 0 new, 0 changed, 1867 unmodified - Added: 0 B + Added to the repository: 0 B (0 B stored) + + processed 5307 files, 1.720 GiB in 0:03 snapshot 79766175 saved You can even backup individual files in the same repository (not passing @@ -129,7 +132,6 @@ You can even backup individual files in the same repository (not passing $ restic -r /srv/restic-repo backup ~/work.txt enter password for repository: - password is correct snapshot 249d0210 saved If you're interested in what restic does, pass ``--verbose`` twice (or @@ -143,7 +145,6 @@ restic encounters: $ restic -r /srv/restic-repo --verbose --verbose backup ~/work.txt open repository enter password for repository: - password is correct lock repository load index files using parent snapshot f3f8d56b @@ -255,7 +256,7 @@ the corresponding folder and use relative paths. using parent snapshot 40dc1520 start scan on [.] start backup on [.] - scan finished in 1.814s: 5307 files, 1.200GiB + scan finished in 1.814s: 5307 files, 1.720 GiB Files: 0 new, 0 changed, 5307 unmodified Dirs: 0 new, 0 changed, 1867 unmodified @@ -503,7 +504,6 @@ and displays a small statistic, just pass the command two snapshot IDs: .. code-block:: console $ restic -r /srv/restic-repo diff 5845b002 2ab627a6 - password is correct comparing snapshot ea657ce5 to 2ab627a6: C /restic/cmd_diff.go diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 85c022580cc..9d6167895cd 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -163,8 +163,8 @@ example from a local to a remote repository, you can use the ``copy`` command: .. code-block:: console $ restic -r /srv/restic-repo-copy copy --from-repo /srv/restic-repo - repository d6504c63 opened successfully, password is correct - repository 3dd0878c opened successfully, password is correct + repository d6504c63 opened successfully + repository 3dd0878c opened successfully snapshot 410b18a2 of [/home/user/work] at 2020-06-09 23:15:57.305305 +0200 CEST by user@kasimir copy started, this may take a while... @@ -263,7 +263,7 @@ the unwanted files from affected snapshots by rewriting them using the .. code-block:: console $ restic -r /srv/restic-repo rewrite --exclude secret-file - repository c881945a opened (repository version 2) successfully, password is correct + repository c881945a opened (repository version 2) successfully snapshot 6160ddb2 of [/home/user/work] at 2022-06-12 16:01:28.406630608 +0200 CEST by user@kasimir excluding /home/user/work/secret-file @@ -274,7 +274,7 @@ the unwanted files from affected snapshots by rewriting them using the modified 1 snapshots $ restic -r /srv/restic-repo rewrite --exclude secret-file 6160ddb2 - repository c881945a opened (repository version 2) successfully, password is correct + repository c881945a opened (repository version 2) successfully snapshot 6160ddb2 of [/home/user/work] at 2022-06-12 16:01:28.406630608 +0200 CEST by user@kasimir excluding /home/user/work/secret-file diff --git a/doc/060_forget.rst b/doc/060_forget.rst index b8d206c97c1..fe0236f124d 100644 --- a/doc/060_forget.rst +++ b/doc/060_forget.rst @@ -80,7 +80,7 @@ command must be run: $ restic -r /srv/restic-repo prune enter password for repository: - repository 33002c5e opened successfully, password is correct + repository 33002c5e opened successfully loading all snapshots... loading indexes... finding data that is still in use for 4 snapshots @@ -265,7 +265,7 @@ Sunday for 12 weeks: .. code-block:: console $ restic snapshots - repository f00c6e2a opened successfully, password is correct + repository f00c6e2a opened successfully ID Time Host Tags Paths --------------------------------------------------------------- 0a1f9759 2019-09-01 11:00:00 mopped /home/user/work @@ -289,7 +289,7 @@ four Sundays, and remove the other snapshots: .. code-block:: console $ restic forget --keep-daily 4 --dry-run - repository f00c6e2a opened successfully, password is correct + repository f00c6e2a opened successfully Applying Policy: keep the last 4 daily snapshots keep 4 snapshots: ID Time Host Tags Reasons Paths diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst index 188ebcb7099..3f8b3a2c7da 100644 --- a/doc/manual_rest.rst +++ b/doc/manual_rest.rst @@ -329,7 +329,6 @@ required to restore the latest snapshot (from any host that made it): .. code-block:: console $ restic stats latest - password is correct Total File Count: 10538 Total Size: 37.824 GiB @@ -340,7 +339,6 @@ host by using the ``--host`` flag: .. code-block:: console $ restic stats --host myserver latest - password is correct Total File Count: 21766 Total Size: 481.783 GiB @@ -357,7 +355,6 @@ has restic's deduplication helped? We can check: .. code-block:: console $ restic stats --host myserver --mode raw-data latest - password is correct Total Blob Count: 340847 Total Size: 458.663 GiB From c103c0830e41a7fe9aba599094c87b33f27be517 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 22 May 2024 16:45:09 +0200 Subject: [PATCH 320/893] add changelog for skip-if-unchanged --- changelog/unreleased/issue-662 | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 changelog/unreleased/issue-662 diff --git a/changelog/unreleased/issue-662 b/changelog/unreleased/issue-662 new file mode 100644 index 00000000000..e5a7c6fe80d --- /dev/null +++ b/changelog/unreleased/issue-662 @@ -0,0 +1,11 @@ +Enhancement: `backup` can omit snapshot creation if there was no change + +The `backup` command always created a snapshot even if nothing changed +compared to the parent snapshot. + +Restic now supports the `--skip-if-unchanged` option for the `backup` +command to omit creating a snapshot if the new snapshot's content would +be identical to that of the parent snapshot. + +https://github.com/restic/restic/issues/662 +https://github.com/restic/restic/pull/4816 From de7b418bbe2b351e0d7c159428edc4daaace12be Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Mon, 27 May 2024 03:33:11 +0530 Subject: [PATCH 321/893] http: allow custom `User-Agent` for outgoing HTTP requests --- changelog/unreleased/issue-4768 | 8 +++ cmd/restic/global.go | 5 ++ doc/manual_rest.rst | 2 + internal/backend/http_transport.go | 10 ++++ .../backend/httpuseragent_roundtripper.go | 25 ++++++++++ .../httpuseragent_roundtripper_test.go | 50 +++++++++++++++++++ 6 files changed, 100 insertions(+) create mode 100644 changelog/unreleased/issue-4768 create mode 100644 internal/backend/httpuseragent_roundtripper.go create mode 100644 internal/backend/httpuseragent_roundtripper_test.go diff --git a/changelog/unreleased/issue-4768 b/changelog/unreleased/issue-4768 new file mode 100644 index 00000000000..dd52089e1d3 --- /dev/null +++ b/changelog/unreleased/issue-4768 @@ -0,0 +1,8 @@ +Enhancement: Allow custom User-Agent to be specified for outgoing requests + +Restic now permits setting a custom `User-Agent` for outgoing HTTP requests +using the global flag `--http-user-agent` or the `RESTIC_HTTP_USER_AGENT` +environment variable. + +https://github.com/restic/restic/issues/4768 +https://github.com/restic/restic/pull/4810 \ No newline at end of file diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 144445cc0a4..a5250ca38ec 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -135,6 +135,7 @@ func init() { f.IntVar(&globalOptions.Limits.DownloadKb, "limit-download", 0, "limits downloads to a maximum `rate` in KiB/s. (default: unlimited)") f.UintVar(&globalOptions.PackSize, "pack-size", 0, "set target pack `size` in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE)") f.StringSliceVarP(&globalOptions.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)") + f.StringVar(&globalOptions.HTTPUserAgent, "http-user-agent", "", "set a http user agent for outgoing http requests") // Use our "generate" command instead of the cobra provided "completion" command cmdRoot.CompletionOptions.DisableDefaultCmd = true @@ -155,6 +156,10 @@ func init() { // parse target pack size from env, on error the default value will be used targetPackSize, _ := strconv.ParseUint(os.Getenv("RESTIC_PACK_SIZE"), 10, 32) globalOptions.PackSize = uint(targetPackSize) + + if os.Getenv("RESTIC_HTTP_USER_AGENT") != "" { + globalOptions.HTTPUserAgent = os.Getenv("RESTIC_HTTP_USER_AGENT") + } } func stdinIsTerminal() bool { diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst index 188ebcb7099..3b342f86876 100644 --- a/doc/manual_rest.rst +++ b/doc/manual_rest.rst @@ -54,6 +54,7 @@ Usage help is available: --cleanup-cache auto remove old cache directories --compression mode compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) (default auto) -h, --help help for restic + --http-user-agent value set a custom user agent for outgoing http requests --insecure-no-password use an empty password for the repository, must be passed to every restic command (insecure) --insecure-tls skip TLS certificate verification when connecting to the repository (insecure) --json set output mode to JSON for commands that support it @@ -134,6 +135,7 @@ command: --cache-dir directory set the cache directory. (default: use system default cache directory) --cleanup-cache auto remove old cache directories --compression mode compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) (default auto) + --http-user-agent value set a custom user agent for outgoing http requests --insecure-no-password use an empty password for the repository, must be passed to every restic command (insecure) --insecure-tls skip TLS certificate verification when connecting to the repository (insecure) --json set output mode to JSON for commands that support it diff --git a/internal/backend/http_transport.go b/internal/backend/http_transport.go index 97fd521e358..19613e8105c 100644 --- a/internal/backend/http_transport.go +++ b/internal/backend/http_transport.go @@ -28,6 +28,9 @@ type TransportOptions struct { // Skip TLS certificate verification InsecureTLS bool + + // Specify Custom User-Agent for the http Client + HTTPUserAgent string } // readPEMCertKey reads a file and returns the PEM encoded certificate and key @@ -132,6 +135,13 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { } rt := http.RoundTripper(tr) + + // if the userAgent is set in the Transport Options, wrap the + // http.RoundTripper + if opts.HTTPUserAgent != "" { + rt = newCustomUserAgentRoundTripper(rt, opts.HTTPUserAgent) + } + if feature.Flag.Enabled(feature.BackendErrorRedesign) { rt = newWatchdogRoundtripper(rt, 120*time.Second, 128*1024) } diff --git a/internal/backend/httpuseragent_roundtripper.go b/internal/backend/httpuseragent_roundtripper.go new file mode 100644 index 00000000000..6272aa41ace --- /dev/null +++ b/internal/backend/httpuseragent_roundtripper.go @@ -0,0 +1,25 @@ +package backend + +import "net/http" + +// httpUserAgentRoundTripper is a custom http.RoundTripper that modifies the User-Agent header +// of outgoing HTTP requests. +type httpUserAgentRoundTripper struct { + userAgent string + rt http.RoundTripper +} + +func newCustomUserAgentRoundTripper(rt http.RoundTripper, userAgent string) *httpUserAgentRoundTripper { + return &httpUserAgentRoundTripper{ + rt: rt, + userAgent: userAgent, + } +} + +// RoundTrip modifies the User-Agent header of the request and then delegates the request +// to the underlying RoundTripper. +func (c *httpUserAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = req.Clone(req.Context()) + req.Header.Set("User-Agent", c.userAgent) + return c.rt.RoundTrip(req) +} diff --git a/internal/backend/httpuseragent_roundtripper_test.go b/internal/backend/httpuseragent_roundtripper_test.go new file mode 100644 index 00000000000..0a81c418af1 --- /dev/null +++ b/internal/backend/httpuseragent_roundtripper_test.go @@ -0,0 +1,50 @@ +package backend + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestCustomUserAgentTransport(t *testing.T) { + // Create a mock HTTP handler that checks the User-Agent header + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + userAgent := r.Header.Get("User-Agent") + if userAgent != "TestUserAgent" { + t.Errorf("Expected User-Agent: TestUserAgent, got: %s", userAgent) + } + w.WriteHeader(http.StatusOK) + }) + + // Create a test server with the mock handler + server := httptest.NewServer(handler) + defer server.Close() + + // Create a custom user agent transport + customUserAgent := "TestUserAgent" + transport := &httpUserAgentRoundTripper{ + userAgent: customUserAgent, + rt: http.DefaultTransport, + } + + // Create an HTTP client with the custom transport + client := &http.Client{ + Transport: transport, + } + + // Make a request to the test server + resp, err := client.Get(server.URL) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer func() { + if err := resp.Body.Close(); err != nil { + t.Log("failed to close response body") + } + }() + + // Check the response status code + if resp.StatusCode != http.StatusOK { + t.Errorf("Expected status code: %d, got: %d", http.StatusOK, resp.StatusCode) + } +} From 38654a3bd7a740fb363eff68223f10072af035c6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 30 May 2024 18:48:52 +0200 Subject: [PATCH 322/893] backend/retry: do not log final error if context was canceled Calls to `List(ctx, ...)` are usually stopped by canceling the context once no further entries are required by the caller. Thus, don't log the final error if the used context was canceled. --- internal/backend/retry/backend_retry.go | 6 ++--- internal/backend/retry/backend_retry_test.go | 25 +++++++++++++++++--- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index d5134d43338..8d0f42bfdc3 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -43,7 +43,7 @@ func New(be backend.Backend, maxElapsedTime time.Duration, report func(string, e // retryNotifyErrorWithSuccess is an extension of backoff.RetryNotify with notification of success after an error. // success is NOT notified on the first run of operation (only after an error). -func retryNotifyErrorWithSuccess(operation backoff.Operation, b backoff.BackOff, notify backoff.Notify, success func(retries int)) error { +func retryNotifyErrorWithSuccess(operation backoff.Operation, b backoff.BackOffContext, notify backoff.Notify, success func(retries int)) error { var operationWrapper backoff.Operation if success == nil { operationWrapper = operation @@ -61,8 +61,8 @@ func retryNotifyErrorWithSuccess(operation backoff.Operation, b backoff.BackOff, } err := backoff.RetryNotify(operationWrapper, b, notify) - if err != nil && notify != nil { - // log final error + if err != nil && notify != nil && b.Context().Err() == nil { + // log final error, unless the context was canceled notify(err, -1) } return err diff --git a/internal/backend/retry/backend_retry_test.go b/internal/backend/retry/backend_retry_test.go index cd0c4d48b8a..fd76200d406 100644 --- a/internal/backend/retry/backend_retry_test.go +++ b/internal/backend/retry/backend_retry_test.go @@ -460,7 +460,7 @@ func TestNotifyWithSuccessIsNotCalled(t *testing.T) { t.Fatal("Success should not have been called") } - err := retryNotifyErrorWithSuccess(operation, &backoff.ZeroBackOff{}, notify, success) + err := retryNotifyErrorWithSuccess(operation, backoff.WithContext(&backoff.ZeroBackOff{}, context.Background()), notify, success) if err != nil { t.Fatal("retry should not have returned an error") } @@ -486,7 +486,7 @@ func TestNotifyWithSuccessIsCalled(t *testing.T) { successCalled++ } - err := retryNotifyErrorWithSuccess(operation, &backoff.ZeroBackOff{}, notify, success) + err := retryNotifyErrorWithSuccess(operation, backoff.WithContext(&backoff.ZeroBackOff{}, context.Background()), notify, success) if err != nil { t.Fatal("retry should not have returned an error") } @@ -515,12 +515,31 @@ func TestNotifyWithSuccessFinalError(t *testing.T) { successCalled++ } - err := retryNotifyErrorWithSuccess(operation, backoff.WithMaxRetries(&backoff.ZeroBackOff{}, 5), notify, success) + err := retryNotifyErrorWithSuccess(operation, backoff.WithContext(backoff.WithMaxRetries(&backoff.ZeroBackOff{}, 5), context.Background()), notify, success) test.Assert(t, err.Error() == "expected error in test", "wrong error message %v", err) test.Equals(t, 6, notifyCalled, "notify should have been called 6 times") test.Equals(t, 0, successCalled, "success should not have been called") } +func TestNotifyWithCancelError(t *testing.T) { + operation := func() error { + return errors.New("expected error in test") + } + + notify := func(error, time.Duration) { + t.Error("unexpected call to notify") + } + + success := func(retries int) { + t.Error("unexpected call to success") + } + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + err := retryNotifyErrorWithSuccess(operation, backoff.WithContext(&backoff.ZeroBackOff{}, ctx), notify, success) + test.Assert(t, err == context.Canceled, "wrong error message %v", err) +} + type testClock struct { Time time.Time } From 05d7885a8706292ca31c8435aff815b7a7b674bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jun 2024 01:02:01 +0000 Subject: [PATCH 323/893] build(deps): bump google.golang.org/api from 0.181.0 to 0.182.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.181.0 to 0.182.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.181.0...v0.182.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index a19d84f0e7f..93f00d28f91 100644 --- a/go.mod +++ b/go.mod @@ -34,12 +34,12 @@ require ( golang.org/x/term v0.20.0 golang.org/x/text v0.15.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.181.0 + google.golang.org/api v0.182.0 ) require ( - cloud.google.com/go v0.113.0 // indirect - cloud.google.com/go/auth v0.4.1 // indirect + cloud.google.com/go v0.114.0 // indirect + cloud.google.com/go/auth v0.4.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/iam v1.1.8 // indirect @@ -79,9 +79,9 @@ require ( go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 // indirect - google.golang.org/grpc v1.63.2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect + google.golang.org/grpc v1.64.0 // indirect google.golang.org/protobuf v1.34.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 3ec38b3dad2..b0f71f05c09 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.113.0 h1:g3C70mn3lWfckKBiCVsAshabrDg01pQ0pnX1MNtnMkA= -cloud.google.com/go v0.113.0/go.mod h1:glEqlogERKYeePz6ZdkcLJ28Q2I6aERgDDErBg9GzO8= -cloud.google.com/go/auth v0.4.1 h1:Z7YNIhlWRtrnKlZke7z3GMqzvuYzdc2z98F9D1NV5Hg= -cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro= +cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= +cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= +cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg= +cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= @@ -283,8 +283,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -google.golang.org/api v0.181.0 h1:rPdjwnWgiPPOJx3IcSAQ2III5aX5tCer6wMpa/xmZi4= -google.golang.org/api v0.181.0/go.mod h1:MnQ+M0CFsfUwA5beZ+g/vCBCPXvtmZwRz2qzZk8ih1k= +google.golang.org/api v0.182.0 h1:if5fPvudRQ78GeRx3RayIoiuV7modtErPIZC/T2bIvE= +google.golang.org/api v0.182.0/go.mod h1:cGhjy4caqA5yXRzEhkHI8Y9mfyC2VLTlER2l08xaqtM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -292,17 +292,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 h1:mxSlqyb8ZAHsYDCfiXN1EDdNTdvjUJSLY+OnAUtYNYA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 h1:W5Xj/70xIA4x60O/IFyXivR5MGqblAb8R3w26pnD6No= +google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From fced29b4797f27dc8b8f90146587ae7776fa5a35 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jun 2024 01:48:43 +0000 Subject: [PATCH 324/893] build(deps): bump docker/login-action from 3.1.0 to 3.2.0 Bumps [docker/login-action](https://github.com/docker/login-action) from 3.1.0 to 3.2.0. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/e92390c5fb421da1463c202d546fed0ec5c39f20...0d4c9c5ea7693da7b068278f7b52bda2a190a446) --- updated-dependencies: - dependency-name: docker/login-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 71cfe1691e3..771bf9f9a2a 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -25,7 +25,7 @@ jobs: uses: actions/checkout@v4 - name: Log in to the Container registry - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} From 46b0fac11e24a1a3e933f5a083a32f7fe69befa4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Jun 2024 01:48:46 +0000 Subject: [PATCH 325/893] build(deps): bump golangci/golangci-lint-action from 5 to 6 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 5 to 6. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v5...v6) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 43480f9e257..796a614b18e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -261,7 +261,7 @@ jobs: uses: actions/checkout@v4 - name: golangci-lint - uses: golangci/golangci-lint-action@v5 + uses: golangci/golangci-lint-action@v6 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. version: v1.57.1 From 4a874000b7129330416ffb4ed322d6ba64e54c7c Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Sat, 1 Jun 2024 15:04:35 +0200 Subject: [PATCH 326/893] gs: Replace some errors.Wrap calls The first one in Create is already a WithStack error. The rest were referencing code that hasn't existed for quite some time. Note that errors from Google SDKs tends to start with "google:" or "googleapi:". Also, use restic/internal/errors. --- internal/backend/gs/gs.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/internal/backend/gs/gs.go b/internal/backend/gs/gs.go index 305e9b9c108..0af226f5d5f 100644 --- a/internal/backend/gs/gs.go +++ b/internal/backend/gs/gs.go @@ -12,12 +12,13 @@ import ( "strings" "cloud.google.com/go/storage" - "github.com/pkg/errors" + "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/layout" "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" "golang.org/x/oauth2" "golang.org/x/oauth2/google" @@ -134,7 +135,7 @@ func Open(_ context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) { be, err := open(cfg, rt) if err != nil { - return nil, errors.Wrap(err, "open") + return nil, err } // Try to determine if the bucket exists. If it does not, try to create it. @@ -145,7 +146,7 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Back // however, the client doesn't have storage.bucket.get permission return be, nil } - return nil, errors.Wrap(err, "service.Buckets.Get") + return nil, errors.WithStack(err) } if !exists { @@ -155,7 +156,7 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Back // Bucket doesn't exist, try to create it. if err := be.bucket.Create(ctx, be.projectID, bucketAttrs); err != nil { // Always an error, as the bucket definitely doesn't exist. - return nil, errors.Wrap(err, "service.Buckets.Insert") + return nil, errors.WithStack(err) } } @@ -251,7 +252,7 @@ func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.Rewind } if err != nil { - return errors.Wrap(err, "service.Objects.Insert") + return errors.WithStack(err) } // sanity check @@ -298,7 +299,7 @@ func (be *Backend) Stat(ctx context.Context, h backend.Handle) (bi backend.FileI attr, err := be.bucket.Object(objName).Attrs(ctx) if err != nil { - return backend.FileInfo{}, errors.Wrap(err, "service.Objects.Get") + return backend.FileInfo{}, errors.WithStack(err) } return backend.FileInfo{Size: attr.Size, Name: h.Name}, nil @@ -314,7 +315,7 @@ func (be *Backend) Remove(ctx context.Context, h backend.Handle) error { err = nil } - return errors.Wrap(err, "client.RemoveObject") + return errors.WithStack(err) } // List runs fn for each file in the backend which has the type t. When an From 6bf3d4859fd935aff1a1ba10bf5375a02cede2ad Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 1 Jun 2024 18:52:39 +0200 Subject: [PATCH 327/893] backend: improve error on http request timeout Now yields a "request timeout" error instead of "context canceled". --- internal/backend/watchdog_roundtriper.go | 21 ++++++++++++++++--- internal/backend/watchdog_roundtriper_test.go | 7 +++++-- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/internal/backend/watchdog_roundtriper.go b/internal/backend/watchdog_roundtriper.go index fb7863002c8..e3e10d7fedf 100644 --- a/internal/backend/watchdog_roundtriper.go +++ b/internal/backend/watchdog_roundtriper.go @@ -2,11 +2,16 @@ package backend import ( "context" + "errors" + "fmt" "io" "net/http" + "sync/atomic" "time" ) +var errRequestTimeout = fmt.Errorf("request timeout") + // watchdogRoundtripper cancels an http request if an upload or download did not make progress // within timeout. The time between fully sending the request and receiving an response is also // limited by this timeout. This ensures that stuck requests are cancelled after some time. @@ -32,12 +37,14 @@ func newWatchdogRoundtripper(rt http.RoundTripper, timeout time.Duration, chunkS func (w *watchdogRoundtripper) RoundTrip(req *http.Request) (*http.Response, error) { timer := time.NewTimer(w.timeout) ctx, cancel := context.WithCancel(req.Context()) + timedOut := &atomic.Bool{} // cancel context if timer expires go func() { defer timer.Stop() select { case <-timer.C: + timedOut.Store(true) cancel() case <-ctx.Done(): } @@ -46,11 +53,14 @@ func (w *watchdogRoundtripper) RoundTrip(req *http.Request) (*http.Response, err kick := func() { timer.Reset(w.timeout) } + isTimeout := func(err error) bool { + return timedOut.Load() && errors.Is(err, context.Canceled) + } req = req.Clone(ctx) if req.Body != nil { // kick watchdog timer as long as uploading makes progress - req.Body = newWatchdogReadCloser(req.Body, w.chunkSize, kick, nil) + req.Body = newWatchdogReadCloser(req.Body, w.chunkSize, kick, nil, isTimeout) } resp, err := w.rt.RoundTrip(req) @@ -60,16 +70,17 @@ func (w *watchdogRoundtripper) RoundTrip(req *http.Request) (*http.Response, err // kick watchdog timer as long as downloading makes progress // cancel context to stop goroutine once response body is closed - resp.Body = newWatchdogReadCloser(resp.Body, w.chunkSize, kick, cancel) + resp.Body = newWatchdogReadCloser(resp.Body, w.chunkSize, kick, cancel, isTimeout) return resp, nil } -func newWatchdogReadCloser(rc io.ReadCloser, chunkSize int, kick func(), close func()) *watchdogReadCloser { +func newWatchdogReadCloser(rc io.ReadCloser, chunkSize int, kick func(), close func(), isTimeout func(err error) bool) *watchdogReadCloser { return &watchdogReadCloser{ rc: rc, chunkSize: chunkSize, kick: kick, close: close, + isTimeout: isTimeout, } } @@ -78,6 +89,7 @@ type watchdogReadCloser struct { chunkSize int kick func() close func() + isTimeout func(err error) bool } var _ io.ReadCloser = &watchdogReadCloser{} @@ -93,6 +105,9 @@ func (w *watchdogReadCloser) Read(p []byte) (n int, err error) { n, err = w.rc.Read(p) w.kick() + if err != nil && w.isTimeout(err) { + err = errRequestTimeout + } return n, err } diff --git a/internal/backend/watchdog_roundtriper_test.go b/internal/backend/watchdog_roundtriper_test.go index b1f589bc01b..bc43447e192 100644 --- a/internal/backend/watchdog_roundtriper_test.go +++ b/internal/backend/watchdog_roundtriper_test.go @@ -23,8 +23,11 @@ func TestRead(t *testing.T) { onClose := func() { closed = true } + isTimeout := func(err error) bool { + return false + } - wd := newWatchdogReadCloser(io.NopCloser(bytes.NewReader(data)), 1, kick, onClose) + wd := newWatchdogReadCloser(io.NopCloser(bytes.NewReader(data)), 1, kick, onClose, isTimeout) out, err := io.ReadAll(wd) rtest.OK(t, err) @@ -196,6 +199,6 @@ func TestDownloadTimeout(t *testing.T) { rtest.Equals(t, 200, resp.StatusCode, "unexpected status code") _, err = io.ReadAll(resp.Body) - rtest.Equals(t, context.Canceled, err, "response download not canceled") + rtest.Equals(t, errRequestTimeout, err, "response download not canceled") rtest.OK(t, resp.Body.Close()) } From db2398f35ba05d08edeb0cf7a4d340d394ac46a5 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 1 Jun 2024 18:54:01 +0200 Subject: [PATCH 328/893] backend: increase request progress timeout to 5 minutes Apparently, 2 minutes are too short in some cases and can result in canceled List requests. --- changelog/unreleased/issue-4627 | 1 + internal/backend/http_transport.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/changelog/unreleased/issue-4627 b/changelog/unreleased/issue-4627 index 6f6a00098e6..e9c66c17544 100644 --- a/changelog/unreleased/issue-4627 +++ b/changelog/unreleased/issue-4627 @@ -29,3 +29,4 @@ https://github.com/restic/restic/issues/1523 https://github.com/restic/restic/pull/4520 https://github.com/restic/restic/pull/4800 https://github.com/restic/restic/pull/4784 +https://github.com/restic/restic/pull/4844 diff --git a/internal/backend/http_transport.go b/internal/backend/http_transport.go index 19613e8105c..5162d35715e 100644 --- a/internal/backend/http_transport.go +++ b/internal/backend/http_transport.go @@ -143,7 +143,7 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { } if feature.Flag.Enabled(feature.BackendErrorRedesign) { - rt = newWatchdogRoundtripper(rt, 120*time.Second, 128*1024) + rt = newWatchdogRoundtripper(rt, 5*time.Minute, 128*1024) } // wrap in the debug round tripper (if active) From 10fdb914df657048a8ed04c9c745785efe418e70 Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Sat, 1 Jun 2024 15:15:06 +0200 Subject: [PATCH 329/893] cmd: Return error in readPassword The returned error was always nil. Replaced Wrap by WithStack because the function name was stale. --- cmd/restic/global.go | 2 +- cmd/restic/global_test.go | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 6920caa8d82..cc24b74c2bf 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -280,7 +280,7 @@ func readPassword(in io.Reader) (password string, err error) { sc := bufio.NewScanner(in) sc.Scan() - return sc.Text(), errors.Wrap(err, "Scan") + return sc.Text(), errors.WithStack(sc.Err()) } // readPasswordTerminal reads the password from the given reader which must be a diff --git a/cmd/restic/global_test.go b/cmd/restic/global_test.go index 4f5c29e9a77..b43fdd1f37d 100644 --- a/cmd/restic/global_test.go +++ b/cmd/restic/global_test.go @@ -5,6 +5,7 @@ import ( "path/filepath" "testing" + "github.com/restic/restic/internal/errors" rtest "github.com/restic/restic/internal/test" ) @@ -22,6 +23,16 @@ func Test_PrintFunctionsRespectsGlobalStdout(t *testing.T) { } } +type errorReader struct{ err error } + +func (r *errorReader) Read([]byte) (int, error) { return 0, r.err } + +func TestReadPassword(t *testing.T) { + want := errors.New("foo") + _, err := readPassword(&errorReader{want}) + rtest.Assert(t, errors.Is(err, want), "wrong error %v", err) +} + func TestReadRepo(t *testing.T) { tempDir := rtest.TempDir(t) From b2bbbe805f68d4b125488e6719f76cb1ed815fb6 Mon Sep 17 00:00:00 2001 From: "Leo R. Lundgren" Date: Mon, 3 Jun 2024 23:37:17 +0200 Subject: [PATCH 330/893] azure: Improve error message in azure.Create() --- internal/backend/azure/azure.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 92f64243bbb..737cf0e1479 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -161,7 +161,7 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, er return nil, errors.Wrap(err, "container.Create") } } else if err != nil { - return be, err + return be, errors.Wrap(err, "container.GetProperties") } return be, nil From e9de9684f4da9638450c2e5088a365e020097ea3 Mon Sep 17 00:00:00 2001 From: Florian Thoma Date: Wed, 5 Jun 2024 09:33:15 +0200 Subject: [PATCH 331/893] Use character display width for table padding Using len(...) for table cell padding produced wrong results for unicode chracters leading to misaligned tables. Implementation changed to take the actual terminal display width into consideration. --- internal/ui/format.go | 23 +++++++++++++++++++++++ internal/ui/format_test.go | 18 ++++++++++++++++++ internal/ui/table/table.go | 14 ++++++++------ internal/ui/table/table_test.go | 4 ++-- 4 files changed, 51 insertions(+), 8 deletions(-) diff --git a/internal/ui/format.go b/internal/ui/format.go index d2e0a4d2b43..de650607d5b 100644 --- a/internal/ui/format.go +++ b/internal/ui/format.go @@ -8,6 +8,8 @@ import ( "math/bits" "strconv" "time" + + "golang.org/x/text/width" ) func FormatBytes(c uint64) string { @@ -105,3 +107,24 @@ func ToJSONString(status interface{}) string { } return buf.String() } + +// TerminalDisplayWidth returns the number of terminal cells needed to display s +func TerminalDisplayWidth(s string) int { + width := 0 + for _, r := range s { + width += terminalDisplayRuneWidth(r) + } + + return width +} + +func terminalDisplayRuneWidth(r rune) int { + switch width.LookupRune(r).Kind() { + case width.EastAsianWide, width.EastAsianFullwidth: + return 2 + case width.EastAsianNarrow, width.EastAsianHalfwidth, width.EastAsianAmbiguous, width.Neutral: + return 1 + default: + return 0 + } +} diff --git a/internal/ui/format_test.go b/internal/ui/format_test.go index 4223d4e20bc..d595026c407 100644 --- a/internal/ui/format_test.go +++ b/internal/ui/format_test.go @@ -84,3 +84,21 @@ func TestParseBytesInvalid(t *testing.T) { test.Equals(t, int64(0), v) } } + +func TestTerminalDisplayWidth(t *testing.T) { + for _, c := range []struct { + input string + want int + }{ + {"foo", 3}, + {"aéb", 3}, + {"ab", 3}, + {"a’b", 3}, + {"aあb", 4}, + } { + if got := TerminalDisplayWidth(c.input); got != c.want { + t.Errorf("wrong display width for '%s', want %d, got %d", c.input, c.want, got) + } + } + +} diff --git a/internal/ui/table/table.go b/internal/ui/table/table.go index c3ae47f5462..ae09063be01 100644 --- a/internal/ui/table/table.go +++ b/internal/ui/table/table.go @@ -6,6 +6,8 @@ import ( "strings" "text/template" + + "github.com/restic/restic/internal/ui" ) // Table contains data for a table to be printed. @@ -89,7 +91,7 @@ func printLine(w io.Writer, print func(io.Writer, string) error, sep string, dat } // apply padding - pad := widths[fieldNum] - len(v) + pad := widths[fieldNum] - ui.TerminalDisplayWidth(v) if pad > 0 { v += strings.Repeat(" ", pad) } @@ -139,16 +141,16 @@ func (t *Table) Write(w io.Writer) error { columnWidths := make([]int, columns) for i, desc := range t.columns { for _, line := range strings.Split(desc, "\n") { - if columnWidths[i] < len(line) { - columnWidths[i] = len(desc) + if columnWidths[i] < ui.TerminalDisplayWidth(line) { + columnWidths[i] = ui.TerminalDisplayWidth(desc) } } } for _, line := range lines { for i, content := range line { for _, l := range strings.Split(content, "\n") { - if columnWidths[i] < len(l) { - columnWidths[i] = len(l) + if columnWidths[i] < ui.TerminalDisplayWidth(l) { + columnWidths[i] = ui.TerminalDisplayWidth(l) } } } @@ -159,7 +161,7 @@ func (t *Table) Write(w io.Writer) error { for _, width := range columnWidths { totalWidth += width } - totalWidth += (columns - 1) * len(t.CellSeparator) + totalWidth += (columns - 1) * ui.TerminalDisplayWidth(t.CellSeparator) // write header if len(t.columns) > 0 { diff --git a/internal/ui/table/table_test.go b/internal/ui/table/table_test.go index db116bbc512..7a94b7f9bca 100644 --- a/internal/ui/table/table_test.go +++ b/internal/ui/table/table_test.go @@ -126,7 +126,7 @@ foo 2018-08-19 22:22:22 xxx other /home/user/other Time string Tags, Dirs []string } - table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"work", "go"}, []string{"/home/user/work", "/home/user/go"}}) + table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"work", "go’s"}, []string{"/home/user/work", "/home/user/go"}}) table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"other"}, []string{"/home/user/other"}}) table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"other", "bar"}, []string{"/home/user/other"}}) return table @@ -135,7 +135,7 @@ foo 2018-08-19 22:22:22 xxx other /home/user/other host name time zz tags dirs ------------------------------------------------------------ foo 2018-08-19 22:22:22 xxx work /home/user/work - go /home/user/go + go’s /home/user/go foo 2018-08-19 22:22:22 xxx other /home/user/other foo 2018-08-19 22:22:22 xxx other /home/user/other bar From c55665be2c85de74fd58118ab6e14a38abcb50b3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 5 Jun 2024 22:14:45 +0200 Subject: [PATCH 332/893] key add/passwd: handle UTF-16 encoding correctly Just use the exact some function for load a password from a file everywhere. --- changelog/unreleased/issue-4850 | 8 ++++++++ cmd/restic/cmd_key_add.go | 10 ---------- cmd/restic/global.go | 16 +++++++++++----- 3 files changed, 19 insertions(+), 15 deletions(-) create mode 100644 changelog/unreleased/issue-4850 diff --git a/changelog/unreleased/issue-4850 b/changelog/unreleased/issue-4850 new file mode 100644 index 00000000000..ad065cc2188 --- /dev/null +++ b/changelog/unreleased/issue-4850 @@ -0,0 +1,8 @@ +Bugfix: correctly handle UTF-16 password files in `key add/passwd` + +`key add` and `key passwd` did not properly decode UTF-16 encoded password read +from a password file. This has been fix to match the decoding when opening a +repository. + +https://github.com/restic/restic/issues/4850 +https://github.com/restic/restic/pull/4851 diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index 9e50aa67d5a..d38991f090c 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -3,8 +3,6 @@ package main import ( "context" "fmt" - "os" - "strings" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" @@ -123,14 +121,6 @@ func getNewPassword(ctx context.Context, gopts GlobalOptions, newPasswordFile st "enter password again: ") } -func loadPasswordFromFile(pwdFile string) (string, error) { - s, err := os.ReadFile(pwdFile) - if os.IsNotExist(err) { - return "", errors.Fatalf("%s does not exist", pwdFile) - } - return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile") -} - func switchToNewKeyAndRemoveIfBroken(ctx context.Context, repo *repository.Repository, key *repository.Key, pw string) error { // Verify new key to make sure it really works. A broken key can render the // whole repository inaccessible diff --git a/cmd/restic/global.go b/cmd/restic/global.go index a5250ca38ec..44b427b304a 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -268,11 +268,7 @@ func resolvePassword(opts GlobalOptions, envStr string) (string, error) { return (strings.TrimSpace(string(output))), nil } if opts.PasswordFile != "" { - s, err := textfile.Read(opts.PasswordFile) - if errors.Is(err, os.ErrNotExist) { - return "", errors.Fatalf("%s does not exist", opts.PasswordFile) - } - return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile") + return loadPasswordFromFile(opts.PasswordFile) } if pwd := os.Getenv(envStr); pwd != "" { @@ -282,6 +278,16 @@ func resolvePassword(opts GlobalOptions, envStr string) (string, error) { return "", nil } +// loadPasswordFromFile loads a password from a file while stripping a BOM and +// converting the password to UTF-8. +func loadPasswordFromFile(pwdFile string) (string, error) { + s, err := textfile.Read(pwdFile) + if errors.Is(err, os.ErrNotExist) { + return "", errors.Fatalf("%s does not exist", pwdFile) + } + return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile") +} + // readPassword reads the password from the given reader directly. func readPassword(in io.Reader) (password string, err error) { sc := bufio.NewScanner(in) From b82ecc64252b442c9b330fa33b9ff8b3e90477f9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 5 Jun 2024 22:33:20 +0200 Subject: [PATCH 333/893] crypto: cleanup ciphertext verification error --- internal/crypto/crypto.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/crypto/crypto.go b/internal/crypto/crypto.go index 58c82e78cc5..d7ac9c3d4a8 100644 --- a/internal/crypto/crypto.go +++ b/internal/crypto/crypto.go @@ -27,7 +27,7 @@ const ( var ( // ErrUnauthenticated is returned when ciphertext verification has failed. - ErrUnauthenticated = errors.New("ciphertext verification failed") + ErrUnauthenticated = fmt.Errorf("ciphertext verification failed") ) // Key holds encryption and message authentication keys for a repository. It is stored From 7a48c9ebd7191124ce13b4c73804840f0e2c15c1 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Wed, 5 Jun 2024 16:06:57 -0600 Subject: [PATCH 334/893] Fix review comments --- internal/fs/ea_windows.go | 7 +- internal/fs/ea_windows_test.go | 149 +++++++++++++-------------- internal/restic/node_test.go | 6 +- internal/restic/node_windows.go | 83 ++++++--------- internal/restic/node_windows_test.go | 24 ++--- 5 files changed, 119 insertions(+), 150 deletions(-) diff --git a/internal/fs/ea_windows.go b/internal/fs/ea_windows.go index e4b23d35a6c..08466c33f0e 100644 --- a/internal/fs/ea_windows.go +++ b/internal/fs/ea_windows.go @@ -211,8 +211,9 @@ var ( ) const ( - // noExtendedAttribsStatus is a constant value which indicates no extended attributes were found - noExtendedAttribsStatus = -1073741742 + // STATUS_NO_EAS_ON_FILE is a constant value which indicates EAs were requested for the file but it has no EAs. + // Windows NTSTATUS value: STATUS_NO_EAS_ON_FILE=0xC0000052 + STATUS_NO_EAS_ON_FILE = -1073741742 ) // GetFileEA retrieves the extended attributes for the file represented by `handle`. The @@ -228,7 +229,7 @@ func GetFileEA(handle windows.Handle) ([]ExtendedAttribute, error) { for { status := getFileEA(handle, &iosb, &buf[0], uint32(bufLen), false, 0, 0, nil, true) - if status == noExtendedAttribsStatus { + if status == STATUS_NO_EAS_ON_FILE { //If status is -1073741742, no extended attributes were found return nil, nil } diff --git a/internal/fs/ea_windows_test.go b/internal/fs/ea_windows_test.go index f9a4d9adf58..14b7a245289 100644 --- a/internal/fs/ea_windows_test.go +++ b/internal/fs/ea_windows_test.go @@ -135,114 +135,103 @@ func TestSetFileEa(t *testing.T) { } } +// The code below was refactored from github.com/Microsoft/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/ea_test.go +// under MIT license. func TestSetGetFileEA(t *testing.T) { - tempDir := t.TempDir() - testfilePath := filepath.Join(tempDir, "testfile.txt") - // create temp file - testfile, err := os.Create(testfilePath) - if err != nil { - t.Fatalf("failed to create temporary file: %s", err) - } - defer func() { - err := testfile.Close() - if err != nil { - t.Logf("Error closing file %s: %v\n", testfile.Name(), err) - } - }() + testfilePath := setupTestFile(t) + defer cleanupTestFile(t, testfilePath) - nAttrs := 3 - testEAs := make([]ExtendedAttribute, 3) - // generate random extended attributes for test - for i := 0; i < nAttrs; i++ { - // EA name is automatically converted to upper case before storing, so - // when reading it back it returns the upper case name. To avoid test - // failures because of that keep the name upper cased. - testEAs[i].Name = fmt.Sprintf("TESTEA%d", i+1) - testEAs[i].Value = make([]byte, getRandomInt()) - _, err := rand.Read(testEAs[i].Value) - if err != nil { - t.Logf("Error reading rand for file %s: %v\n", testfilePath, err) - } - } + testEAs := generateTestEAs(t, 3, testfilePath) + fileHandle := openFile(t, testfilePath, windows.FILE_ATTRIBUTE_NORMAL) + defer closeFileHandle(t, fileHandle, testfilePath) - utf16Path := windows.StringToUTF16Ptr(testfilePath) - fileAccessRightReadWriteEA := (0x8 | 0x10) - fileHandle, err := windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) - if err != nil { - t.Fatalf("open file failed with: %s", err) - } - defer func() { - err := windows.Close(fileHandle) - if err != nil { - t.Logf("Error closing file handle %s: %v\n", testfilePath, err) - } - }() + testSetGetEA(t, fileHandle, testEAs, testfilePath) +} - if err := SetFileEA(fileHandle, testEAs); err != nil { - t.Fatalf("set EA for file failed: %s", err) - } +// The code is new code and reuses code refactored from github.com/Microsoft/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/ea_test.go +// under MIT license. +func TestSetGetFolderEA(t *testing.T) { + testfolderPath := setupTestFolder(t) + defer cleanupTestFolder(t, testfolderPath) - var readEAs []ExtendedAttribute - if readEAs, err = GetFileEA(fileHandle); err != nil { - t.Fatalf("get EA for file failed: %s", err) - } + testEAs := generateTestEAs(t, 3, testfolderPath) + fileHandle := openFile(t, testfolderPath, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS) + defer closeFileHandle(t, fileHandle, testfolderPath) - if !reflect.DeepEqual(readEAs, testEAs) { - t.Logf("expected: %+v, found: %+v\n", testEAs, readEAs) - t.Fatalf("EAs read from testfile don't match") + testSetGetEA(t, fileHandle, testEAs, testfolderPath) +} + +func setupTestFile(t *testing.T) string { + tempDir := t.TempDir() + testfilePath := filepath.Join(tempDir, "testfile.txt") + if _, err := os.Create(testfilePath); err != nil { + t.Fatalf("failed to create temporary file: %s", err) } + return testfilePath } -func TestSetGetFolderEA(t *testing.T) { +func setupTestFolder(t *testing.T) string { tempDir := t.TempDir() testfolderPath := filepath.Join(tempDir, "testfolder") - // create temp folder - err := os.Mkdir(testfolderPath, os.ModeDir) - if err != nil { - t.Fatalf("failed to create temporary file: %s", err) + if err := os.Mkdir(testfolderPath, os.ModeDir); err != nil { + t.Fatalf("failed to create temporary folder: %s", err) } + return testfolderPath +} - nAttrs := 3 - testEAs := make([]ExtendedAttribute, 3) - // generate random extended attributes for test +func generateTestEAs(t *testing.T, nAttrs int, path string) []ExtendedAttribute { + testEAs := make([]ExtendedAttribute, nAttrs) for i := 0; i < nAttrs; i++ { - // EA name is automatically converted to upper case before storing, so - // when reading it back it returns the upper case name. To avoid test - // failures because of that keep the name upper cased. testEAs[i].Name = fmt.Sprintf("TESTEA%d", i+1) testEAs[i].Value = make([]byte, getRandomInt()) - _, err := rand.Read(testEAs[i].Value) - if err != nil { - t.Logf("Error reading rand for file %s: %v\n", testfolderPath, err) + if _, err := rand.Read(testEAs[i].Value); err != nil { + t.Logf("Error reading rand for path %s: %v\n", path, err) } } + return testEAs +} - utf16Path := windows.StringToUTF16Ptr(testfolderPath) - fileAccessRightReadWriteEA := (0x8 | 0x10) - fileHandle, err := windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - +func openFile(t *testing.T, path string, attributes uint32) windows.Handle { + utf16Path := windows.StringToUTF16Ptr(path) + fileAccessRightReadWriteEA := uint32(0x8 | 0x10) + fileHandle, err := windows.CreateFile(utf16Path, fileAccessRightReadWriteEA, 0, nil, windows.OPEN_EXISTING, attributes, 0) if err != nil { - t.Fatalf("open folder failed with: %s", err) + t.Fatalf("open file failed with: %s", err) } - defer func() { - err := windows.Close(fileHandle) - if err != nil { - t.Logf("Error closing file handle %s: %v\n", testfolderPath, err) - } - }() + return fileHandle +} + +func closeFileHandle(t *testing.T, handle windows.Handle, path string) { + if err := windows.Close(handle); err != nil { + t.Logf("Error closing file handle %s: %v\n", path, err) + } +} - if err := SetFileEA(fileHandle, testEAs); err != nil { - t.Fatalf("set EA for folder failed: %s", err) +func testSetGetEA(t *testing.T, handle windows.Handle, testEAs []ExtendedAttribute, path string) { + if err := SetFileEA(handle, testEAs); err != nil { + t.Fatalf("set EA for path %s failed: %s", path, err) } - var readEAs []ExtendedAttribute - if readEAs, err = GetFileEA(fileHandle); err != nil { - t.Fatalf("get EA for folder failed: %s", err) + readEAs, err := GetFileEA(handle) + if err != nil { + t.Fatalf("get EA for path %s failed: %s", path, err) } if !reflect.DeepEqual(readEAs, testEAs) { t.Logf("expected: %+v, found: %+v\n", testEAs, readEAs) - t.Fatalf("EAs read from test folder don't match") + t.Fatalf("EAs read from path %s don't match", path) + } +} + +func cleanupTestFile(t *testing.T, path string) { + if err := os.Remove(path); err != nil { + t.Logf("Error removing file %s: %v\n", path, err) + } +} + +func cleanupTestFolder(t *testing.T, path string) { + if err := os.Remove(path); err != nil { + t.Logf("Error removing folder %s: %v\n", path, err) } } diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index 99ea48bbb00..6e0f31e2136 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -212,11 +212,7 @@ func TestNodeRestoreAt(t *testing.T) { extAttrArr := test.ExtendedAttributes // Iterate through the array using pointers for i := 0; i < len(extAttrArr); i++ { - // Get the pointer to the current element - namePtr := &extAttrArr[i].Name - - // Modify the value through the pointer - *namePtr = strings.ToUpper(*namePtr) + extAttrArr[i].Name = strings.ToUpper(extAttrArr[i].Name) } } diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 881c394be75..3e1dcae18c0 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -35,12 +35,12 @@ var ( ) // mknod is not supported on Windows. -func mknod(_ string, mode uint32, dev uint64) (err error) { +func mknod(_ string, _ uint32, _ uint64) (err error) { return errors.New("device nodes cannot be created on windows") } // Windows doesn't need lchown -func lchown(_ string, uid int, gid int) (err error) { +func lchown(_ string, _ int, _ int) (err error) { return nil } @@ -72,14 +72,12 @@ func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespe // restore extended attributes for windows func (node Node) restoreExtendedAttributes(path string) (err error) { - eas := []fs.ExtendedAttribute{} - for _, attr := range node.ExtendedAttributes { - extr := new(fs.ExtendedAttribute) - extr.Name = attr.Name - extr.Value = attr.Value - eas = append(eas, *extr) - } - if len(eas) > 0 { + count := len(node.ExtendedAttributes) + if count > 0 { + eas := make([]fs.ExtendedAttribute, count) + for i, attr := range node.ExtendedAttributes { + eas[i] = fs.ExtendedAttribute{Name: attr.Name, Value: attr.Value} + } if errExt := restoreExtendedAttributes(node.Type, path, eas); errExt != nil { return errExt } @@ -90,25 +88,9 @@ func (node Node) restoreExtendedAttributes(path string) (err error) { // fill extended attributes in the node. This also includes the Generic attributes for windows. func (node *Node) fillExtendedAttributes(path string, _ bool) (err error) { var fileHandle windows.Handle - - //Get file handle for file or dir - if node.Type == "file" { - if strings.HasSuffix(filepath.Clean(path), `\`) { - return nil - } - utf16Path := windows.StringToUTF16Ptr(path) - fileAccessRightReadWriteEA := (0x8 | 0x10) - fileHandle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) - } else if node.Type == "dir" { - utf16Path := windows.StringToUTF16Ptr(path) - fileAccessRightReadWriteEA := (0x8 | 0x10) - fileHandle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - } else { - return nil - } + fileHandle, err = getFileHandleForEA(node.Type, path) if err != nil { - err = errors.Errorf("open file failed for path: %s, with: %v", path, err) - return err + return errors.Errorf("get EA failed while opening file handle for path %v, with: %v", path, err) } defer func() { err := windows.CloseHandle(fileHandle) @@ -116,23 +98,19 @@ func (node *Node) fillExtendedAttributes(path string, _ bool) (err error) { debug.Log("Error closing file handle for %s: %v\n", path, err) } }() - //Get the windows Extended Attributes using the file handle - extAtts, err := fs.GetFileEA(fileHandle) + var extAtts []fs.ExtendedAttribute + extAtts, err = fs.GetFileEA(fileHandle) debug.Log("fillExtendedAttributes(%v) %v", path, extAtts) if err != nil { - debug.Log("open file failed for path: %s : %v", path, err) - return err - } else if len(extAtts) == 0 { + return errors.Errorf("get EA failed for path %v, with: %v", path, err) + } + if len(extAtts) == 0 { return nil } //Fill the ExtendedAttributes in the node using the name/value pairs in the windows EA for _, attr := range extAtts { - if err != nil { - err = errors.Errorf("can not obtain extended attribute for path %v, attr: %v, err: %v\n,", path, attr, err) - continue - } extendedAttr := ExtendedAttribute{ Name: attr.Name, Value: attr.Value, @@ -143,21 +121,30 @@ func (node *Node) fillExtendedAttributes(path string, _ bool) (err error) { return nil } -// restoreExtendedAttributes handles restore of the Windows Extended Attributes to the specified path. -// The Windows API requires setting of all the Extended Attributes in one call. -func restoreExtendedAttributes(nodeType, path string, eas []fs.ExtendedAttribute) (err error) { - var fileHandle windows.Handle +// Get file handle for file or dir for setting/getting EAs +func getFileHandleForEA(nodeType, path string) (handle windows.Handle, err error) { switch nodeType { case "file": utf16Path := windows.StringToUTF16Ptr(path) fileAccessRightReadWriteEA := (0x8 | 0x10) - fileHandle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) + handle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) case "dir": utf16Path := windows.StringToUTF16Ptr(path) fileAccessRightReadWriteEA := (0x8 | 0x10) - fileHandle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + handle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) default: - return nil + return 0, nil + } + return handle, err +} + +// restoreExtendedAttributes handles restore of the Windows Extended Attributes to the specified path. +// The Windows API requires setting of all the Extended Attributes in one call. +func restoreExtendedAttributes(nodeType, path string, eas []fs.ExtendedAttribute) (err error) { + var fileHandle windows.Handle + fileHandle, err = getFileHandleForEA(nodeType, path) + if err != nil { + return errors.Errorf("set EA failed while opening file handle for path %v, with: %v", path, err) } defer func() { err := windows.CloseHandle(fileHandle) @@ -165,12 +152,10 @@ func restoreExtendedAttributes(nodeType, path string, eas []fs.ExtendedAttribute debug.Log("Error closing file handle for %s: %v\n", path, err) } }() - if err != nil { - err = errors.Errorf("open file failed for path %v, with: %v:\n", path, err) - } else if err = fs.SetFileEA(fileHandle, eas); err != nil { - err = errors.Errorf("set EA failed for path %v, with: %v:\n", path, err) + if err = fs.SetFileEA(fileHandle, eas); err != nil { + return errors.Errorf("set EA failed for path %v, with: %v", path, err) } - return err + return nil } type statT syscall.Win32FileAttributeData diff --git a/internal/restic/node_windows_test.go b/internal/restic/node_windows_test.go index f89e2aeee77..29a42e9e246 100644 --- a/internal/restic/node_windows_test.go +++ b/internal/restic/node_windows_test.go @@ -311,23 +311,21 @@ func TestRestoreExtendedAttributes(t *testing.T) { test.OK(t, errors.Wrapf(err, "Error closing file for: %s", testPath)) }() - if len(node.ExtendedAttributes) > 0 { - extAttr, err := fs.GetFileEA(handle) - test.OK(t, errors.Wrapf(err, "Error getting extended attributes for: %s", testPath)) - test.Equals(t, len(node.ExtendedAttributes), len(extAttr)) + extAttr, err := fs.GetFileEA(handle) + test.OK(t, errors.Wrapf(err, "Error getting extended attributes for: %s", testPath)) + test.Equals(t, len(node.ExtendedAttributes), len(extAttr)) - for _, expectedExtAttr := range node.ExtendedAttributes { - var foundExtAttr *fs.ExtendedAttribute - for _, ea := range extAttr { - if strings.EqualFold(ea.Name, expectedExtAttr.Name) { - foundExtAttr = &ea - break + for _, expectedExtAttr := range node.ExtendedAttributes { + var foundExtAttr *fs.ExtendedAttribute + for _, ea := range extAttr { + if strings.EqualFold(ea.Name, expectedExtAttr.Name) { + foundExtAttr = &ea + break - } } - test.Assert(t, foundExtAttr != nil, "Expected extended attribute not found") - test.Equals(t, expectedExtAttr.Value, foundExtAttr.Value) } + test.Assert(t, foundExtAttr != nil, "Expected extended attribute not found") + test.Equals(t, expectedExtAttr.Value, foundExtAttr.Value) } } } From 2101dfe448b739fc284c29718447036edf991179 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Wed, 5 Jun 2024 22:40:21 -0600 Subject: [PATCH 335/893] Add missing return to fix failing test --- internal/restic/node_windows.go | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 3e1dcae18c0..9117c6a1071 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -88,16 +88,13 @@ func (node Node) restoreExtendedAttributes(path string) (err error) { // fill extended attributes in the node. This also includes the Generic attributes for windows. func (node *Node) fillExtendedAttributes(path string, _ bool) (err error) { var fileHandle windows.Handle - fileHandle, err = getFileHandleForEA(node.Type, path) + if fileHandle, err = getFileHandleForEA(node.Type, path); fileHandle == 0 { + return nil + } if err != nil { return errors.Errorf("get EA failed while opening file handle for path %v, with: %v", path, err) } - defer func() { - err := windows.CloseHandle(fileHandle) - if err != nil { - debug.Log("Error closing file handle for %s: %v\n", path, err) - } - }() + defer closeFileHandle(fileHandle, path) // Replaced inline defer with named function call //Get the windows Extended Attributes using the file handle var extAtts []fs.ExtendedAttribute extAtts, err = fs.GetFileEA(fileHandle) @@ -138,20 +135,26 @@ func getFileHandleForEA(nodeType, path string) (handle windows.Handle, err error return handle, err } +// closeFileHandle safely closes a file handle and logs any errors. +func closeFileHandle(fileHandle windows.Handle, path string) { + err := windows.CloseHandle(fileHandle) + if err != nil { + debug.Log("Error closing file handle for %s: %v\n", path, err) + } +} + // restoreExtendedAttributes handles restore of the Windows Extended Attributes to the specified path. // The Windows API requires setting of all the Extended Attributes in one call. func restoreExtendedAttributes(nodeType, path string, eas []fs.ExtendedAttribute) (err error) { var fileHandle windows.Handle - fileHandle, err = getFileHandleForEA(nodeType, path) + if fileHandle, err = getFileHandleForEA(nodeType, path); fileHandle == 0 { + return nil + } if err != nil { return errors.Errorf("set EA failed while opening file handle for path %v, with: %v", path, err) } - defer func() { - err := windows.CloseHandle(fileHandle) - if err != nil { - debug.Log("Error closing file handle for %s: %v\n", path, err) - } - }() + defer closeFileHandle(fileHandle, path) // Replaced inline defer with named function call + if err = fs.SetFileEA(fileHandle, eas); err != nil { return errors.Errorf("set EA failed for path %v, with: %v", path, err) } From 4f053da06a8c69767e3783e1affe2849836e4777 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Wed, 5 Jun 2024 23:10:29 -0600 Subject: [PATCH 336/893] Fix test case by correcting cleanup order --- internal/fs/ea_windows_test.go | 87 +++++++++++++++++----------------- 1 file changed, 43 insertions(+), 44 deletions(-) diff --git a/internal/fs/ea_windows_test.go b/internal/fs/ea_windows_test.go index 14b7a245289..b249f43c405 100644 --- a/internal/fs/ea_windows_test.go +++ b/internal/fs/ea_windows_test.go @@ -138,36 +138,34 @@ func TestSetFileEa(t *testing.T) { // The code below was refactored from github.com/Microsoft/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/ea_test.go // under MIT license. func TestSetGetFileEA(t *testing.T) { - testfilePath := setupTestFile(t) - defer cleanupTestFile(t, testfilePath) + testFilePath, testFile := setupTestFile(t) + testEAs := generateTestEAs(t, 3, testFilePath) + fileHandle := openFile(t, testFilePath, windows.FILE_ATTRIBUTE_NORMAL) + defer closeFileHandle(t, testFilePath, testFile, fileHandle) - testEAs := generateTestEAs(t, 3, testfilePath) - fileHandle := openFile(t, testfilePath, windows.FILE_ATTRIBUTE_NORMAL) - defer closeFileHandle(t, fileHandle, testfilePath) - - testSetGetEA(t, fileHandle, testEAs, testfilePath) + testSetGetEA(t, testFilePath, fileHandle, testEAs) } // The code is new code and reuses code refactored from github.com/Microsoft/go-winio/blob/a7564fd482feb903f9562a135f1317fd3b480739/ea_test.go // under MIT license. func TestSetGetFolderEA(t *testing.T) { - testfolderPath := setupTestFolder(t) - defer cleanupTestFolder(t, testfolderPath) + testFolderPath := setupTestFolder(t) - testEAs := generateTestEAs(t, 3, testfolderPath) - fileHandle := openFile(t, testfolderPath, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS) - defer closeFileHandle(t, fileHandle, testfolderPath) + testEAs := generateTestEAs(t, 3, testFolderPath) + fileHandle := openFile(t, testFolderPath, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS) + defer closeFileHandle(t, testFolderPath, nil, fileHandle) - testSetGetEA(t, fileHandle, testEAs, testfolderPath) + testSetGetEA(t, testFolderPath, fileHandle, testEAs) } -func setupTestFile(t *testing.T) string { +func setupTestFile(t *testing.T) (testFilePath string, testFile *os.File) { tempDir := t.TempDir() - testfilePath := filepath.Join(tempDir, "testfile.txt") - if _, err := os.Create(testfilePath); err != nil { + testFilePath = filepath.Join(tempDir, "testfile.txt") + var err error + if testFile, err = os.Create(testFilePath); err != nil { t.Fatalf("failed to create temporary file: %s", err) } - return testfilePath + return testFilePath, testFile } func setupTestFolder(t *testing.T) string { @@ -191,6 +189,18 @@ func generateTestEAs(t *testing.T, nAttrs int, path string) []ExtendedAttribute return testEAs } +func getRandomInt() int64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(27)) + if err != nil { + panic(err) + } + n := nBig.Int64() + if n == 0 { + n = getRandomInt() + } + return n +} + func openFile(t *testing.T, path string, attributes uint32) windows.Handle { utf16Path := windows.StringToUTF16Ptr(path) fileAccessRightReadWriteEA := uint32(0x8 | 0x10) @@ -201,13 +211,26 @@ func openFile(t *testing.T, path string, attributes uint32) windows.Handle { return fileHandle } -func closeFileHandle(t *testing.T, handle windows.Handle, path string) { +func closeFileHandle(t *testing.T, testfilePath string, testFile *os.File, handle windows.Handle) { + if testFile != nil { + err := testFile.Close() + if err != nil { + t.Logf("Error closing file %s: %v\n", testFile.Name(), err) + } + } if err := windows.Close(handle); err != nil { - t.Logf("Error closing file handle %s: %v\n", path, err) + t.Logf("Error closing file handle %s: %v\n", testfilePath, err) + } + cleanupTestFile(t, testfilePath) +} + +func cleanupTestFile(t *testing.T, path string) { + if err := os.Remove(path); err != nil { + t.Logf("Error removing file/folder %s: %v\n", path, err) } } -func testSetGetEA(t *testing.T, handle windows.Handle, testEAs []ExtendedAttribute, path string) { +func testSetGetEA(t *testing.T, path string, handle windows.Handle, testEAs []ExtendedAttribute) { if err := SetFileEA(handle, testEAs); err != nil { t.Fatalf("set EA for path %s failed: %s", path, err) } @@ -222,27 +245,3 @@ func testSetGetEA(t *testing.T, handle windows.Handle, testEAs []ExtendedAttribu t.Fatalf("EAs read from path %s don't match", path) } } - -func cleanupTestFile(t *testing.T, path string) { - if err := os.Remove(path); err != nil { - t.Logf("Error removing file %s: %v\n", path, err) - } -} - -func cleanupTestFolder(t *testing.T, path string) { - if err := os.Remove(path); err != nil { - t.Logf("Error removing folder %s: %v\n", path, err) - } -} - -func getRandomInt() int64 { - nBig, err := rand.Int(rand.Reader, big.NewInt(27)) - if err != nil { - panic(err) - } - n := nBig.Int64() - if n == 0 { - n = getRandomInt() - } - return n -} From 08c43d2c7e851b61720ccdfd0f179f142cb9717e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 7 Jun 2024 21:00:10 +0200 Subject: [PATCH 337/893] update release verification script for latest docker --- doc/developer_information.rst | 2 +- helpers/verify-release-binaries.sh | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/developer_information.rst b/doc/developer_information.rst index c7757e087e8..f0fe28c32b2 100644 --- a/doc/developer_information.rst +++ b/doc/developer_information.rst @@ -123,7 +123,7 @@ The specified go compiler version must match the one used to build the official binaries. For example, for restic 0.16.2 the command would be ``helpers/verify-release-binaries.sh 0.16.2 1.21.3``. -The script requires bash, curl, docker, git, gpg, shasum and tar. +The script requires bash, curl, docker (version >= 25.0), git, gpg, shasum and tar. The script first downloads all release binaries, checks the SHASUM256 file and its signature. Afterwards it checks that the tarball matches the restic git repository diff --git a/helpers/verify-release-binaries.sh b/helpers/verify-release-binaries.sh index 4e80528e2ba..5ac57027bbb 100755 --- a/helpers/verify-release-binaries.sh +++ b/helpers/verify-release-binaries.sh @@ -89,13 +89,14 @@ extract_docker() { restic_platform=$3 out=restic_${restic_version}_linux_${restic_platform}.bz2 + # requires at least docker 25.0 docker image pull --platform "linux/${docker_platform}" ${image}:${restic_version} > /dev/null docker image save ${image}:${restic_version} -o docker.tar mkdir img - tar xvf docker.tar -C img --wildcards \*/layer.tar > /dev/null + tar xvf docker.tar -C img --wildcards blobs/sha256/\* > /dev/null rm docker.tar - for i in img/*/layer.tar; do + for i in img/blobs/sha256/*; do tar -xvf "$i" -C img usr/bin/restic 2> /dev/null 1>&2 || true if [[ -f img/usr/bin/restic ]]; then if [[ -f restic-docker ]]; then From edd3e214c2337ae69758c46506a1e94c81b67d68 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 7 Jun 2024 21:44:49 +0200 Subject: [PATCH 338/893] ui/table: fix width calculation of multi-line column headers --- internal/ui/table/table.go | 2 +- internal/ui/table/table_test.go | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/internal/ui/table/table.go b/internal/ui/table/table.go index ae09063be01..0423ddb4838 100644 --- a/internal/ui/table/table.go +++ b/internal/ui/table/table.go @@ -142,7 +142,7 @@ func (t *Table) Write(w io.Writer) error { for i, desc := range t.columns { for _, line := range strings.Split(desc, "\n") { if columnWidths[i] < ui.TerminalDisplayWidth(line) { - columnWidths[i] = ui.TerminalDisplayWidth(desc) + columnWidths[i] = ui.TerminalDisplayWidth(line) } } } diff --git a/internal/ui/table/table_test.go b/internal/ui/table/table_test.go index 7a94b7f9bca..2902860b90e 100644 --- a/internal/ui/table/table_test.go +++ b/internal/ui/table/table_test.go @@ -29,6 +29,21 @@ first column ---------------------- data: first data field ---------------------- +`, + }, + { + func(t testing.TB) *Table { + table := New() + table.AddColumn("first\ncolumn", "{{.First}}") + table.AddRow(struct{ First string }{"data"}) + return table + }, + ` +first +column +------ +data +------ `, }, { From 7cd324fe2638a99c4fc1853782bcdc838eacf8a2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 7 Jun 2024 21:45:40 +0200 Subject: [PATCH 339/893] ui/table: avoid duplicate table cell width calculation --- internal/ui/table/table.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/internal/ui/table/table.go b/internal/ui/table/table.go index 0423ddb4838..1c535cadbec 100644 --- a/internal/ui/table/table.go +++ b/internal/ui/table/table.go @@ -141,16 +141,18 @@ func (t *Table) Write(w io.Writer) error { columnWidths := make([]int, columns) for i, desc := range t.columns { for _, line := range strings.Split(desc, "\n") { - if columnWidths[i] < ui.TerminalDisplayWidth(line) { - columnWidths[i] = ui.TerminalDisplayWidth(line) + width := ui.TerminalDisplayWidth(line) + if columnWidths[i] < width { + columnWidths[i] = width } } } for _, line := range lines { for i, content := range line { for _, l := range strings.Split(content, "\n") { - if columnWidths[i] < ui.TerminalDisplayWidth(l) { - columnWidths[i] = ui.TerminalDisplayWidth(l) + width := ui.TerminalDisplayWidth(l) + if columnWidths[i] < width { + columnWidths[i] = width } } } From b02117ef0b9aeb06781e470b45480e0705be938a Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 19 May 2024 23:30:14 +0530 Subject: [PATCH 340/893] restore: read includes, insensitive includes, excludes and insensitive excludes from a file feature for gh-4781 --- cmd/restic/cmd_restore.go | 60 +++++++++++++++++++++++++++++++++++---- cmd/restic/exclude.go | 20 ++++++------- 2 files changed, 65 insertions(+), 15 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 5161be50d68..f8a62c36061 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -45,11 +45,15 @@ Exit status is 0 if the command was successful, and non-zero if there was any er // RestoreOptions collects all options for the restore command. type RestoreOptions struct { - Exclude []string - InsensitiveExclude []string - Include []string - InsensitiveInclude []string - Target string + Exclude []string + ExcludeFiles []string + InsensitiveExclude []string + InsensitiveExcludeFiles []string + Include []string + IncludeFiles []string + InsensitiveInclude []string + InsensitiveIncludeFiles []string + Target string restic.SnapshotFilter Sparse bool Verify bool @@ -66,6 +70,10 @@ func init() { flags.StringArrayVarP(&restoreOptions.Include, "include", "i", nil, "include a `pattern`, exclude everything else (can be specified multiple times)") flags.StringArrayVar(&restoreOptions.InsensitiveInclude, "iinclude", nil, "same as --include but ignores the casing of `pattern`") flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to") + flags.StringArrayVar(&restoreOptions.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)") + flags.StringArrayVar(&restoreOptions.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns") + flags.StringArrayVar(&restoreOptions.IncludeFiles, "include-file", nil, "read include patterns from a `file` (can be specified multiple times)") + flags.StringArrayVar(&restoreOptions.InsensitiveIncludeFiles, "iinclude-file", nil, "same as --include-file but ignores casing of `file`names in patterns") initSingleSnapshotFilter(flags, &restoreOptions.SnapshotFilter) flags.BoolVar(&restoreOptions.Sparse, "sparse", false, "restore files as sparse") @@ -176,6 +184,27 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, excludePatterns := filter.ParsePatterns(opts.Exclude) insensitiveExcludePatterns := filter.ParsePatterns(opts.InsensitiveExclude) + + if len(opts.ExcludeFiles) > 0 { + patternsFromFile, err := readPatternsFromFiles(opts.ExcludeFiles) + if err != nil { + return err + } + + excludePatternsFromFile := filter.ParsePatterns(patternsFromFile) + excludePatterns = append(excludePatterns, excludePatternsFromFile...) + } + + if len(opts.InsensitiveExcludeFiles) > 0 { + patternsFromFile, err := readPatternsFromFiles(opts.ExcludeFiles) + if err != nil { + return err + } + + iexcludePatternsFromFile := filter.ParsePatterns(patternsFromFile) + insensitiveExcludePatterns = append(insensitiveExcludePatterns, iexcludePatternsFromFile...) + } + selectExcludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { matched, err := filter.List(excludePatterns, item) if err != nil { @@ -199,6 +228,27 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, includePatterns := filter.ParsePatterns(opts.Include) insensitiveIncludePatterns := filter.ParsePatterns(opts.InsensitiveInclude) + + if len(opts.IncludeFiles) > 0 { + patternsFromFile, err := readPatternsFromFiles(opts.IncludeFiles) + if err != nil { + return err + } + + includePatternsFromFile := filter.ParsePatterns(patternsFromFile) + includePatterns = append(includePatterns, includePatternsFromFile...) + } + + if len(opts.InsensitiveIncludeFiles) > 0 { + patternsFromFile, err := readPatternsFromFiles(opts.InsensitiveIncludeFiles) + if err != nil { + return err + } + + iincludePatternsFromFile := filter.ParsePatterns(patternsFromFile) + insensitiveIncludePatterns = append(insensitiveIncludePatterns, iincludePatternsFromFile...) + } + selectIncludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { matched, childMayMatch, err := filter.ListWithChild(includePatterns, item) if err != nil { diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go index d9bb63aebfa..4657e4915dd 100644 --- a/cmd/restic/exclude.go +++ b/cmd/restic/exclude.go @@ -385,12 +385,12 @@ func rejectBySize(maxSizeStr string) (RejectFunc, error) { }, nil } -// readExcludePatternsFromFiles reads all exclude files and returns the list of -// exclude patterns. For each line, leading and trailing white space is removed +// readPatternsFromFiles reads all files and returns the list of +// patterns. For each line, leading and trailing white space is removed // and comment lines are ignored. For each remaining pattern, environment // variables are resolved. For adding a literal dollar sign ($), write $$ to // the file. -func readExcludePatternsFromFiles(excludeFiles []string) ([]string, error) { +func readPatternsFromFiles(files []string) ([]string, error) { getenvOrDollar := func(s string) string { if s == "$" { return "$" @@ -398,8 +398,8 @@ func readExcludePatternsFromFiles(excludeFiles []string) ([]string, error) { return os.Getenv(s) } - var excludes []string - for _, filename := range excludeFiles { + var patterns []string + for _, filename := range files { err := func() (err error) { data, err := textfile.Read(filename) if err != nil { @@ -421,15 +421,15 @@ func readExcludePatternsFromFiles(excludeFiles []string) ([]string, error) { } line = os.Expand(line, getenvOrDollar) - excludes = append(excludes, line) + patterns = append(patterns, line) } return scanner.Err() }() if err != nil { - return nil, fmt.Errorf("failed to read excludes from file %q: %w", filename, err) + return nil, fmt.Errorf("failed to read patterns from file %q: %w", filename, err) } } - return excludes, nil + return patterns, nil } type excludePatternOptions struct { @@ -454,7 +454,7 @@ func (opts excludePatternOptions) CollectPatterns() ([]RejectByNameFunc, error) var fs []RejectByNameFunc // add patterns from file if len(opts.ExcludeFiles) > 0 { - excludePatterns, err := readExcludePatternsFromFiles(opts.ExcludeFiles) + excludePatterns, err := readPatternsFromFiles(opts.ExcludeFiles) if err != nil { return nil, err } @@ -467,7 +467,7 @@ func (opts excludePatternOptions) CollectPatterns() ([]RejectByNameFunc, error) } if len(opts.InsensitiveExcludeFiles) > 0 { - excludes, err := readExcludePatternsFromFiles(opts.InsensitiveExcludeFiles) + excludes, err := readPatternsFromFiles(opts.InsensitiveExcludeFiles) if err != nil { return nil, err } From 7d5dd6db669ddd99dd7c58a8621f9e0ed3def400 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 19 May 2024 23:40:20 +0530 Subject: [PATCH 341/893] fix: add string.Lower for insenstive includes and excludes read from file --- cmd/restic/cmd_restore.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index f8a62c36061..f71133a0a76 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -201,6 +201,10 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, return err } + for i, str := range patternsFromFile { + patternsFromFile[i] = strings.ToLower(str) + } + iexcludePatternsFromFile := filter.ParsePatterns(patternsFromFile) insensitiveExcludePatterns = append(insensitiveExcludePatterns, iexcludePatternsFromFile...) } @@ -235,6 +239,10 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, return err } + for i, str := range patternsFromFile { + patternsFromFile[i] = strings.ToLower(str) + } + includePatternsFromFile := filter.ParsePatterns(patternsFromFile) includePatterns = append(includePatterns, includePatternsFromFile...) } From fdf2e4ed0ebe5c1af33b0b23fb9aade3060ef075 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 1 Jun 2024 17:49:15 +0530 Subject: [PATCH 342/893] restore: refactor include and exclude - added includePatternOptions similar to excludePatternOptions - followed similar approach to backup for selecting files for restore --- cmd/restic/cmd_restore.go | 153 ++++-------------- cmd/restic/cmd_restore_integration_test.go | 59 ++++++- cmd/restic/include.go | 100 ++++++++++++ cmd/restic/integration_filter_pattern_test.go | 8 +- 4 files changed, 186 insertions(+), 134 deletions(-) create mode 100644 cmd/restic/include.go diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index f71133a0a76..e833f7c830f 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -2,12 +2,10 @@ package main import ( "context" - "strings" "time" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/restorer" "github.com/restic/restic/internal/ui" @@ -45,15 +43,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er // RestoreOptions collects all options for the restore command. type RestoreOptions struct { - Exclude []string - ExcludeFiles []string - InsensitiveExclude []string - InsensitiveExcludeFiles []string - Include []string - IncludeFiles []string - InsensitiveInclude []string - InsensitiveIncludeFiles []string - Target string + excludePatternOptions + includePatternOptions + Target string restic.SnapshotFilter Sparse bool Verify bool @@ -65,15 +57,10 @@ func init() { cmdRoot.AddCommand(cmdRestore) flags := cmdRestore.Flags() - flags.StringArrayVarP(&restoreOptions.Exclude, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)") - flags.StringArrayVar(&restoreOptions.InsensitiveExclude, "iexclude", nil, "same as --exclude but ignores the casing of `pattern`") - flags.StringArrayVarP(&restoreOptions.Include, "include", "i", nil, "include a `pattern`, exclude everything else (can be specified multiple times)") - flags.StringArrayVar(&restoreOptions.InsensitiveInclude, "iinclude", nil, "same as --include but ignores the casing of `pattern`") flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to") - flags.StringArrayVar(&restoreOptions.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)") - flags.StringArrayVar(&restoreOptions.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns") - flags.StringArrayVar(&restoreOptions.IncludeFiles, "include-file", nil, "read include patterns from a `file` (can be specified multiple times)") - flags.StringArrayVar(&restoreOptions.InsensitiveIncludeFiles, "iinclude-file", nil, "same as --include-file but ignores casing of `file`names in patterns") + + initExcludePatternOptions(flags, &restoreOptions.excludePatternOptions) + initIncludePatternOptions(flags, &restoreOptions.includePatternOptions) initSingleSnapshotFilter(flags, &restoreOptions.SnapshotFilter) flags.BoolVar(&restoreOptions.Sparse, "sparse", false, "restore files as sparse") @@ -83,38 +70,8 @@ func init() { func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { - hasExcludes := len(opts.Exclude) > 0 || len(opts.InsensitiveExclude) > 0 - hasIncludes := len(opts.Include) > 0 || len(opts.InsensitiveInclude) > 0 - - // Validate provided patterns - if len(opts.Exclude) > 0 { - if err := filter.ValidatePatterns(opts.Exclude); err != nil { - return errors.Fatalf("--exclude: %s", err) - } - } - if len(opts.InsensitiveExclude) > 0 { - if err := filter.ValidatePatterns(opts.InsensitiveExclude); err != nil { - return errors.Fatalf("--iexclude: %s", err) - } - } - if len(opts.Include) > 0 { - if err := filter.ValidatePatterns(opts.Include); err != nil { - return errors.Fatalf("--include: %s", err) - } - } - if len(opts.InsensitiveInclude) > 0 { - if err := filter.ValidatePatterns(opts.InsensitiveInclude); err != nil { - return errors.Fatalf("--iinclude: %s", err) - } - } - - for i, str := range opts.InsensitiveExclude { - opts.InsensitiveExclude[i] = strings.ToLower(str) - } - - for i, str := range opts.InsensitiveInclude { - opts.InsensitiveInclude[i] = strings.ToLower(str) - } + hasExcludes := len(opts.Excludes) > 0 || len(opts.InsensitiveExcludes) > 0 + hasIncludes := len(opts.Includes) > 0 || len(opts.InsensitiveIncludes) > 0 switch { case len(args) == 0: @@ -182,94 +139,38 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, msg.E("Warning: %s\n", message) } - excludePatterns := filter.ParsePatterns(opts.Exclude) - insensitiveExcludePatterns := filter.ParsePatterns(opts.InsensitiveExclude) - - if len(opts.ExcludeFiles) > 0 { - patternsFromFile, err := readPatternsFromFiles(opts.ExcludeFiles) - if err != nil { - return err - } - - excludePatternsFromFile := filter.ParsePatterns(patternsFromFile) - excludePatterns = append(excludePatterns, excludePatternsFromFile...) - } - - if len(opts.InsensitiveExcludeFiles) > 0 { - patternsFromFile, err := readPatternsFromFiles(opts.ExcludeFiles) - if err != nil { - return err - } - - for i, str := range patternsFromFile { - patternsFromFile[i] = strings.ToLower(str) - } - - iexcludePatternsFromFile := filter.ParsePatterns(patternsFromFile) - insensitiveExcludePatterns = append(insensitiveExcludePatterns, iexcludePatternsFromFile...) + excludePatterns, err := opts.excludePatternOptions.CollectPatterns() + if err != nil { + return err } selectExcludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { - matched, err := filter.List(excludePatterns, item) - if err != nil { - msg.E("error for exclude pattern: %v", err) - } - - matchedInsensitive, err := filter.List(insensitiveExcludePatterns, strings.ToLower(item)) - if err != nil { - msg.E("error for iexclude pattern: %v", err) - } + for _, rejectFn := range excludePatterns { + matched := rejectFn(item) - // An exclude filter is basically a 'wildcard but foo', - // so even if a childMayMatch, other children of a dir may not, - // therefore childMayMatch does not matter, but we should not go down - // unless the dir is selected for restore - selectedForRestore = !matched && !matchedInsensitive - childMayBeSelected = selectedForRestore && node.Type == "dir" + // An exclude filter is basically a 'wildcard but foo', + // so even if a childMayMatch, other children of a dir may not, + // therefore childMayMatch does not matter, but we should not go down + // unless the dir is selected for restore + selectedForRestore = !matched + childMayBeSelected = selectedForRestore && node.Type == "dir" - return selectedForRestore, childMayBeSelected - } - - includePatterns := filter.ParsePatterns(opts.Include) - insensitiveIncludePatterns := filter.ParsePatterns(opts.InsensitiveInclude) - - if len(opts.IncludeFiles) > 0 { - patternsFromFile, err := readPatternsFromFiles(opts.IncludeFiles) - if err != nil { - return err + return selectedForRestore, childMayBeSelected } - - for i, str := range patternsFromFile { - patternsFromFile[i] = strings.ToLower(str) - } - - includePatternsFromFile := filter.ParsePatterns(patternsFromFile) - includePatterns = append(includePatterns, includePatternsFromFile...) + return selectedForRestore, childMayBeSelected } - if len(opts.InsensitiveIncludeFiles) > 0 { - patternsFromFile, err := readPatternsFromFiles(opts.InsensitiveIncludeFiles) - if err != nil { - return err - } - - iincludePatternsFromFile := filter.ParsePatterns(patternsFromFile) - insensitiveIncludePatterns = append(insensitiveIncludePatterns, iincludePatternsFromFile...) + includePatterns, err := opts.includePatternOptions.CollectPatterns() + if err != nil { + return err } selectIncludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { - matched, childMayMatch, err := filter.ListWithChild(includePatterns, item) - if err != nil { - msg.E("error for include pattern: %v", err) - } - - matchedInsensitive, childMayMatchInsensitive, err := filter.ListWithChild(insensitiveIncludePatterns, strings.ToLower(item)) - if err != nil { - msg.E("error for iexclude pattern: %v", err) + for _, includeFn := range includePatterns { + selectedForRestore, childMayBeSelected = includeFn(item) } - selectedForRestore = matched || matchedInsensitive - childMayBeSelected = (childMayMatch || childMayMatchInsensitive) && node.Type == "dir" + childMayBeSelected = childMayBeSelected && node.Type == "dir" return selectedForRestore, childMayBeSelected } diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 8da6f522a2f..483cacc7f04 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -24,9 +24,9 @@ func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID res func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) { opts := RestoreOptions{ - Target: dir, - Exclude: excludes, + Target: dir, } + opts.Excludes = excludes rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) } @@ -51,13 +51,64 @@ func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths [ func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) { opts := RestoreOptions{ - Target: dir, - Include: includes, + Target: dir, } + opts.Includes = includes rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) } +func TestRestoreIncludesComplex(t *testing.T) { + testfiles := []struct { + path string + size uint + include bool // Whether this file should be included in the restore + }{ + {"dir1/include_me.txt", 100, true}, + {"dir1/something_else.txt", 200, false}, + {"dir2/also_include_me.txt", 150, true}, + {"dir2/important_file.txt", 150, true}, + {"dir3/not_included.txt", 180, false}, + {"dir4/subdir/should_include_me.txt", 120, true}, + } + + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + // Create test files and directories + for _, testFile := range testfiles { + fullPath := filepath.Join(env.testdata, testFile.path) + rtest.OK(t, os.MkdirAll(filepath.Dir(fullPath), 0755)) + rtest.OK(t, appendRandomData(fullPath, testFile.size)) + } + + opts := BackupOptions{} + + // Perform backup + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + snapshotID := testListSnapshots(t, env.gopts, 1)[0] + + // Restore using includes + includePatterns := []string{"dir1/*include_me.txt", "dir2/**", "dir4/**/*_me.txt"} + restoredir := filepath.Join(env.base, "restore") + testRunRestoreIncludes(t, env.gopts, restoredir, snapshotID, includePatterns) + + // Check that only the included files are restored + for _, testFile := range testfiles { + restoredFilePath := filepath.Join(restoredir, "testdata", testFile.path) + _, err := os.Stat(restoredFilePath) + if testFile.include { + rtest.OK(t, err) + } else { + rtest.Assert(t, os.IsNotExist(err), "File %s should not have been restored", testFile.path) + } + } +} + func TestRestoreFilter(t *testing.T) { testfiles := []struct { name string diff --git a/cmd/restic/include.go b/cmd/restic/include.go new file mode 100644 index 00000000000..dcc4c7f3727 --- /dev/null +++ b/cmd/restic/include.go @@ -0,0 +1,100 @@ +package main + +import ( + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/filter" + "github.com/spf13/pflag" +) + +// IncludeByNameFunc is a function that takes a filename that should be included +// in the restore process and returns whether it should be included. +type IncludeByNameFunc func(item string) (matched bool, childMayMatch bool) + +type includePatternOptions struct { + Includes []string + InsensitiveIncludes []string + IncludeFiles []string + InsensitiveIncludeFiles []string +} + +func initIncludePatternOptions(f *pflag.FlagSet, opts *includePatternOptions) { + f.StringArrayVarP(&opts.Includes, "include", "i", nil, "include a `pattern` (can be specified multiple times)") + f.StringArrayVar(&opts.InsensitiveIncludes, "iinclude", nil, "same as --include `pattern` but ignores the casing of filenames") + f.StringArrayVar(&opts.IncludeFiles, "include-file", nil, "read include patterns from a `file` (can be specified multiple times)") + f.StringArrayVar(&opts.InsensitiveIncludeFiles, "iinclude-file", nil, "same as --include-file but ignores casing of `file`names in patterns") +} + +func (opts includePatternOptions) CollectPatterns() ([]IncludeByNameFunc, error) { + var fs []IncludeByNameFunc + if len(opts.IncludeFiles) > 0 { + includePatterns, err := readPatternsFromFiles(opts.IncludeFiles) + if err != nil { + return nil, err + } + + if err := filter.ValidatePatterns(includePatterns); err != nil { + return nil, errors.Fatalf("--include-file: %s", err) + } + + opts.Includes = append(opts.Includes, includePatterns...) + } + + if len(opts.InsensitiveIncludeFiles) > 0 { + includePatterns, err := readPatternsFromFiles(opts.InsensitiveIncludeFiles) + if err != nil { + return nil, err + } + + if err := filter.ValidatePatterns(includePatterns); err != nil { + return nil, errors.Fatalf("--iinclude-file: %s", err) + } + + opts.InsensitiveIncludes = append(opts.InsensitiveIncludes, includePatterns...) + } + + if len(opts.InsensitiveIncludes) > 0 { + if err := filter.ValidatePatterns(opts.InsensitiveIncludes); err != nil { + return nil, errors.Fatalf("--iinclude: %s", err) + } + + fs = append(fs, includeByInsensitivePattern(opts.InsensitiveIncludes)) + } + + if len(opts.Includes) > 0 { + if err := filter.ValidatePatterns(opts.Includes); err != nil { + return nil, errors.Fatalf("--include: %s", err) + } + + fs = append(fs, includeByPattern(opts.Includes)) + } + return fs, nil +} + +// includeByPattern returns a IncludeByNameFunc which includes files that match +// one of the patterns. +func includeByPattern(patterns []string) IncludeByNameFunc { + parsedPatterns := filter.ParsePatterns(patterns) + return func(item string) (matched bool, childMayMatch bool) { + matched, childMayMatch, err := filter.ListWithChild(parsedPatterns, item) + if err != nil { + Warnf("error for include pattern: %v", err) + } + + return matched, childMayMatch + } +} + +// includeByInsensitivePattern returns a IncludeByNameFunc which includes files that match +// one of the patterns, ignoring the casing of the filenames. +func includeByInsensitivePattern(patterns []string) IncludeByNameFunc { + for index, path := range patterns { + patterns[index] = strings.ToLower(path) + } + + includeFunc := includeByPattern(patterns) + return func(item string) (matched bool, childMayMatch bool) { + return includeFunc(strings.ToLower(item)) + } +} diff --git a/cmd/restic/integration_filter_pattern_test.go b/cmd/restic/integration_filter_pattern_test.go index 2eacdeea98b..f8da7592eb1 100644 --- a/cmd/restic/integration_filter_pattern_test.go +++ b/cmd/restic/integration_filter_pattern_test.go @@ -70,28 +70,28 @@ func TestRestoreFailsWhenUsingInvalidPatterns(t *testing.T) { var err error // Test --exclude - err = testRunRestoreAssumeFailure("latest", RestoreOptions{Exclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{Excludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --iexclude - err = testRunRestoreAssumeFailure("latest", RestoreOptions{InsensitiveExclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --include - err = testRunRestoreAssumeFailure("latest", RestoreOptions{Include: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{Includes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --include: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --iinclude - err = testRunRestoreAssumeFailure("latest", RestoreOptions{InsensitiveInclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{InsensitiveIncludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --iinclude: invalid pattern(s) provided: *[._]log[.-][0-9] From 14d2799b44b08587ab1695ae42a5e602ec7dae04 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 1 Jun 2024 18:04:14 +0530 Subject: [PATCH 343/893] fix: move include and exclude pattern validations to top --- cmd/restic/cmd_restore.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index e833f7c830f..44394769c0b 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -73,6 +73,16 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, hasExcludes := len(opts.Excludes) > 0 || len(opts.InsensitiveExcludes) > 0 hasIncludes := len(opts.Includes) > 0 || len(opts.InsensitiveIncludes) > 0 + excludePatterns, err := opts.excludePatternOptions.CollectPatterns() + if err != nil { + return err + } + + includePatterns, err := opts.includePatternOptions.CollectPatterns() + if err != nil { + return err + } + switch { case len(args) == 0: return errors.Fatal("no snapshot ID specified") @@ -139,11 +149,6 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, msg.E("Warning: %s\n", message) } - excludePatterns, err := opts.excludePatternOptions.CollectPatterns() - if err != nil { - return err - } - selectExcludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { for _, rejectFn := range excludePatterns { matched := rejectFn(item) @@ -160,11 +165,6 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, return selectedForRestore, childMayBeSelected } - includePatterns, err := opts.includePatternOptions.CollectPatterns() - if err != nil { - return err - } - selectIncludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { for _, includeFn := range includePatterns { selectedForRestore, childMayBeSelected = includeFn(item) From 4e449ffaff483092c421b8a3a3d2b5137620bf13 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 1 Jun 2024 19:46:12 +0530 Subject: [PATCH 344/893] test: add tests for reading patterns from file --- cmd/restic/cmd_restore.go | 8 +-- cmd/restic/integration_filter_pattern_test.go | 55 +++++++++++++++++++ 2 files changed, 59 insertions(+), 4 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 44394769c0b..3ce2c8649a9 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -73,12 +73,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, hasExcludes := len(opts.Excludes) > 0 || len(opts.InsensitiveExcludes) > 0 hasIncludes := len(opts.Includes) > 0 || len(opts.InsensitiveIncludes) > 0 - excludePatterns, err := opts.excludePatternOptions.CollectPatterns() + excludePatternFns, err := opts.excludePatternOptions.CollectPatterns() if err != nil { return err } - includePatterns, err := opts.includePatternOptions.CollectPatterns() + includePatternFns, err := opts.includePatternOptions.CollectPatterns() if err != nil { return err } @@ -150,7 +150,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, } selectExcludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { - for _, rejectFn := range excludePatterns { + for _, rejectFn := range excludePatternFns { matched := rejectFn(item) // An exclude filter is basically a 'wildcard but foo', @@ -166,7 +166,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, } selectIncludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { - for _, includeFn := range includePatterns { + for _, includeFn := range includePatternFns { selectedForRestore, childMayBeSelected = includeFn(item) } diff --git a/cmd/restic/integration_filter_pattern_test.go b/cmd/restic/integration_filter_pattern_test.go index f8da7592eb1..0f547bdc252 100644 --- a/cmd/restic/integration_filter_pattern_test.go +++ b/cmd/restic/integration_filter_pattern_test.go @@ -97,3 +97,58 @@ func TestRestoreFailsWhenUsingInvalidPatterns(t *testing.T) { *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) } + +func TestRestoreFailsWhenUsingInvalidPatternsFromFile(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + // Create an include file with some invalid patterns + includeFile := env.base + "/includefile" + fileErr := os.WriteFile(includeFile, []byte("*.go\n*[._]log[.-][0-9]\n!*[._]log[.-][0-9]"), 0644) + if fileErr != nil { + t.Fatalf("Could not write include file: %v", fileErr) + } + + err := testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{IncludeFiles: []string{includeFile}}}, env.gopts) + rtest.Equals(t, `Fatal: --include-file: invalid pattern(s) provided: +*[._]log[.-][0-9] +!*[._]log[.-][0-9]`, err.Error()) + + // Create an exclude file with some invalid patterns + excludeFile := env.base + "/excludefile" + fileErr = os.WriteFile(excludeFile, []byte("*.go\n*[._]log[.-][0-9]\n!*[._]log[.-][0-9]"), 0644) + if fileErr != nil { + t.Fatalf("Could not write exclude file: %v", fileErr) + } + + err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{ExcludeFiles: []string{excludeFile}}}, env.gopts) + rtest.Equals(t, `Fatal: --exclude-file: invalid pattern(s) provided: +*[._]log[.-][0-9] +!*[._]log[.-][0-9]`, err.Error()) + + // Create an insentive include file with some invalid patterns + insensitiveIncludeFile := env.base + "/insensitiveincludefile" + fileErr = os.WriteFile(insensitiveIncludeFile, []byte("*.go\n*[._]log[.-][0-9]\n!*[._]log[.-][0-9]"), 0644) + if fileErr != nil { + t.Fatalf("Could not write insensitive include file: %v", fileErr) + } + + err = testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{InsensitiveIncludeFiles: []string{insensitiveIncludeFile}}}, env.gopts) + rtest.Equals(t, `Fatal: --iinclude-file: invalid pattern(s) provided: +*[._]log[.-][0-9] +!*[._]log[.-][0-9]`, err.Error()) + + // Create an insensitive exclude file with some invalid patterns + insensitiveExcludeFile := env.base + "/insensitiveexcludefile" + fileErr = os.WriteFile(insensitiveExcludeFile, []byte("*.go\n*[._]log[.-][0-9]\n!*[._]log[.-][0-9]"), 0644) + if fileErr != nil { + t.Fatalf("Could not write insensitive exclude file: %v", fileErr) + } + + err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludeFiles: []string{insensitiveExcludeFile}}}, env.gopts) + rtest.Equals(t, `Fatal: --iexclude-file: invalid pattern(s) provided: +*[._]log[.-][0-9] +!*[._]log[.-][0-9]`, err.Error()) +} From 1a7574e4b4555c25a548a60d1df2be0235bc840f Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 1 Jun 2024 20:02:16 +0530 Subject: [PATCH 345/893] test: add tests for include By pattern --- cmd/restic/include_test.go | 59 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 cmd/restic/include_test.go diff --git a/cmd/restic/include_test.go b/cmd/restic/include_test.go new file mode 100644 index 00000000000..751bfbb7685 --- /dev/null +++ b/cmd/restic/include_test.go @@ -0,0 +1,59 @@ +package main + +import ( + "testing" +) + +func TestIncludeByPattern(t *testing.T) { + var tests = []struct { + filename string + include bool + }{ + {filename: "/home/user/foo.go", include: true}, + {filename: "/home/user/foo.c", include: false}, + {filename: "/home/user/foobar", include: false}, + {filename: "/home/user/foobar/x", include: false}, + {filename: "/home/user/README", include: false}, + {filename: "/home/user/README.md", include: true}, + } + + patterns := []string{"*.go", "README.md"} + + for _, tc := range tests { + t.Run(tc.filename, func(t *testing.T) { + includeFunc := includeByPattern(patterns) + matched, _ := includeFunc(tc.filename) + if matched != tc.include { + t.Fatalf("wrong result for filename %v: want %v, got %v", + tc.filename, tc.include, matched) + } + }) + } +} + +func TestIncludeByInsensitivePattern(t *testing.T) { + var tests = []struct { + filename string + include bool + }{ + {filename: "/home/user/foo.GO", include: true}, + {filename: "/home/user/foo.c", include: false}, + {filename: "/home/user/foobar", include: false}, + {filename: "/home/user/FOObar/x", include: false}, + {filename: "/home/user/README", include: false}, + {filename: "/home/user/readme.MD", include: true}, + } + + patterns := []string{"*.go", "README.md"} + + for _, tc := range tests { + t.Run(tc.filename, func(t *testing.T) { + includeFunc := includeByInsensitivePattern(patterns) + matched, _ := includeFunc(tc.filename) + if matched != tc.include { + t.Fatalf("wrong result for filename %v: want %v, got %v", + tc.filename, tc.include, matched) + } + }) + } +} From 2a2c09e6660e55b1859392236c30b00b939cde92 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 1 Jun 2024 20:03:01 +0530 Subject: [PATCH 346/893] changelog: update changelog for gh-4781 --- changelog/unreleased/issue-4781 | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 changelog/unreleased/issue-4781 diff --git a/changelog/unreleased/issue-4781 b/changelog/unreleased/issue-4781 new file mode 100644 index 00000000000..b4af20885a4 --- /dev/null +++ b/changelog/unreleased/issue-4781 @@ -0,0 +1,8 @@ +Enhancement: Add restore flags to read include and exclude patterns from files + +Restic now supports reading include and exclude patterns from files using the +`--include-file`, `--exclude-file`, `--iinclude-file` and `--iexclude-file` +flags. + +https://github.com/restic/restic/issues/4781 +https://github.com/restic/restic/pull/4811 \ No newline at end of file From e579dfe72aba472256b9733909ef8021361caec7 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 1 Jun 2024 20:24:53 +0530 Subject: [PATCH 347/893] doc: update documentation for restore command --- doc/050_restore.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/050_restore.rst b/doc/050_restore.rst index ce17a1cf736..5fc94c145ff 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -68,6 +68,9 @@ There are case insensitive variants of ``--exclude`` and ``--include`` called ``--iexclude`` and ``--iinclude``. These options will behave the same way but ignore the casing of paths. +There are also ``--include-file``, ``--exclude-file``, ``--iinclude-file`` and + ``--iexclude-file`` flags that read the include and exclude patterns from a file. + Restoring symbolic links on windows is only possible when the user has ``SeCreateSymbolicLinkPrivilege`` privilege or is running as admin. This is a restriction of windows not restic. From 3a521761214995dcefedabdb329dacd67cbf217d Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 1 Jun 2024 20:37:52 +0530 Subject: [PATCH 348/893] restore: accumulate results of multiple pattern checks addressing review comments --- cmd/restic/cmd_restore.go | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 3ce2c8649a9..1a342800a37 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -150,24 +150,27 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, } selectExcludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + matched := false for _, rejectFn := range excludePatternFns { - matched := rejectFn(item) - - // An exclude filter is basically a 'wildcard but foo', - // so even if a childMayMatch, other children of a dir may not, - // therefore childMayMatch does not matter, but we should not go down - // unless the dir is selected for restore - selectedForRestore = !matched - childMayBeSelected = selectedForRestore && node.Type == "dir" - - return selectedForRestore, childMayBeSelected + matched = matched || rejectFn(item) } + // An exclude filter is basically a 'wildcard but foo', + // so even if a childMayMatch, other children of a dir may not, + // therefore childMayMatch does not matter, but we should not go down + // unless the dir is selected for restore + selectedForRestore = !matched + childMayBeSelected = selectedForRestore && node.Type == "dir" + return selectedForRestore, childMayBeSelected } selectIncludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + selectedForRestore = false + childMayBeSelected = false for _, includeFn := range includePatternFns { - selectedForRestore, childMayBeSelected = includeFn(item) + matched, childMayMatch := includeFn(item) + selectedForRestore = selectedForRestore || matched + childMayBeSelected = childMayBeSelected || childMayMatch } childMayBeSelected = childMayBeSelected && node.Type == "dir" From 24a247a0dc26cb2fa397d67e398633e610c75945 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 8 Jun 2024 13:21:34 +0530 Subject: [PATCH 349/893] test: update test case name --- cmd/restic/cmd_restore_integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 483cacc7f04..27272fe40d7 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -58,7 +58,7 @@ func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snaps rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) } -func TestRestoreIncludesComplex(t *testing.T) { +func TestRestoreIncludes(t *testing.T) { testfiles := []struct { path string size uint From bab760369f52125e177659ccffca1fd52ce16fce Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 5 Jun 2024 22:46:34 +0200 Subject: [PATCH 350/893] repository: double check that there is not repository before init Apparently, calling `Stat` on the config file can be unreliable for some backends. --- internal/repository/repository.go | 8 ++++++++ internal/repository/repository_test.go | 17 +++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index f2c28cecee5..775ff796417 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -772,6 +772,14 @@ func (r *Repository) Init(ctx context.Context, version uint, password string, ch if err == nil { return errors.New("repository master key and config already initialized") } + // double check to make sure that a repository is not accidentally reinitialized + // if the backend somehow fails to stat the config file. An initialized repository + // must always contain at least one key file. + if err := r.List(ctx, restic.KeyFile, func(_ restic.ID, _ int64) error { + return errors.New("repository already contains keys") + }); err != nil { + return err + } cfg, err := restic.CreateConfig(version) if err != nil { diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index c06d4f25bda..0c04319d66c 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -9,6 +9,7 @@ import ( "math/rand" "os" "path/filepath" + "strings" "sync" "testing" "time" @@ -457,3 +458,19 @@ func TestListPack(t *testing.T) { rtest.Assert(t, !c.Has(backend.Handle{Type: restic.PackFile, Name: packID.String()}), "tree pack should no longer be cached as ListPack does not set IsMetadata in the backend.Handle") } + +func TestNoDoubleInit(t *testing.T) { + r, be := repository.TestRepositoryWithVersion(t, restic.StableRepoVersion) + + repo, err := repository.New(be, repository.Options{}) + rtest.OK(t, err) + + pol := r.Config().ChunkerPolynomial + err = repo.Init(context.TODO(), r.Config().Version, test.TestPassword, &pol) + rtest.Assert(t, strings.Contains(err.Error(), "repository master key and config already initialized"), "expected config exist error, got %q", err) + + // must also prevent init if only keys exist + rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: backend.ConfigFile})) + err = repo.Init(context.TODO(), r.Config().Version, test.TestPassword, &pol) + rtest.Assert(t, strings.Contains(err.Error(), "repository already contains keys"), "expected already contains keys error, got %q", err) +} From 188684ee9e7c119d9e3abd07ba2c210fc1a680a0 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 9 Jun 2024 19:07:23 +0530 Subject: [PATCH 351/893] fix: include and exclude logic, add tests for include file and exclude file --- cmd/restic/cmd_restore.go | 20 ++- cmd/restic/cmd_restore_integration_test.go | 134 +++++++++++++++--- cmd/restic/exclude.go | 2 +- cmd/restic/include.go | 2 +- cmd/restic/integration_filter_pattern_test.go | 33 +---- 5 files changed, 139 insertions(+), 52 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 1a342800a37..03a990cc6a8 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -70,9 +70,6 @@ func init() { func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { - hasExcludes := len(opts.Excludes) > 0 || len(opts.InsensitiveExcludes) > 0 - hasIncludes := len(opts.Includes) > 0 || len(opts.InsensitiveIncludes) > 0 - excludePatternFns, err := opts.excludePatternOptions.CollectPatterns() if err != nil { return err @@ -83,6 +80,9 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, return err } + hasExcludes := len(excludePatternFns) > 0 + hasIncludes := len(includePatternFns) > 0 + switch { case len(args) == 0: return errors.Fatal("no snapshot ID specified") @@ -153,6 +153,13 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, matched := false for _, rejectFn := range excludePatternFns { matched = matched || rejectFn(item) + + // implementing a short-circuit here to improve the performance + // to prevent additional pattern matching once the first pattern + // matches. + if matched { + break + } } // An exclude filter is basically a 'wildcard but foo', // so even if a childMayMatch, other children of a dir may not, @@ -171,9 +178,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, matched, childMayMatch := includeFn(item) selectedForRestore = selectedForRestore || matched childMayBeSelected = childMayBeSelected || childMayMatch - } + childMayBeSelected = childMayBeSelected && node.Type == "dir" - childMayBeSelected = childMayBeSelected && node.Type == "dir" + if selectedForRestore || childMayBeSelected { + break + } + } return selectedForRestore, childMayBeSelected } diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 27272fe40d7..0e1620f7e99 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -7,6 +7,7 @@ import ( "math/rand" "os" "path/filepath" + "strings" "syscall" "testing" "time" @@ -58,6 +59,67 @@ func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snaps rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) } +func testRunRestoreIncludesFromFile(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includesFile string) { + opts := RestoreOptions{ + Target: dir, + } + opts.IncludeFiles = []string{includesFile} + + rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) +} + +func testRunRestoreExcludesFromFile(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludesFile string) { + opts := RestoreOptions{ + Target: dir, + } + opts.ExcludeFiles = []string{excludesFile} + + rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) +} + +func TestRestoreMustFailWhenUsingBothIncludesAndExcludes(t *testing.T) { + testfiles := []struct { + path string + size uint + }{ + {"dir1/include_me.txt", 100}, + } + + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + // Create test files and directories + for _, testFile := range testfiles { + fullPath := filepath.Join(env.testdata, testFile.path) + rtest.OK(t, os.MkdirAll(filepath.Dir(fullPath), 0755)) + rtest.OK(t, appendRandomData(fullPath, testFile.size)) + } + + opts := BackupOptions{} + // Perform backup + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + snapshotID := testListSnapshots(t, env.gopts, 1)[0] + + // Add both include and exclude patterns + includePatterns := []string{"dir1/*include_me.txt", "dir2/**", "dir4/**/*_me.txt"} + excludePatterns := []string{"dir1/*include_me.txt", "dir2/**", "dir4/**/*_me.txt"} + + restoredir := filepath.Join(env.base, "restore") + + restoreOpts := RestoreOptions{ + Target: restoredir, + } + restoreOpts.Includes = includePatterns + restoreOpts.Excludes = excludePatterns + + err := testRunRestoreAssumeFailure(snapshotID.String(), restoreOpts, env.gopts) + rtest.Assert(t, err != nil, "restore must fail if include and exclude patterns are provided") +} + func TestRestoreIncludes(t *testing.T) { testfiles := []struct { path string @@ -97,16 +159,33 @@ func TestRestoreIncludes(t *testing.T) { restoredir := filepath.Join(env.base, "restore") testRunRestoreIncludes(t, env.gopts, restoredir, snapshotID, includePatterns) - // Check that only the included files are restored - for _, testFile := range testfiles { - restoredFilePath := filepath.Join(restoredir, "testdata", testFile.path) - _, err := os.Stat(restoredFilePath) - if testFile.include { - rtest.OK(t, err) - } else { - rtest.Assert(t, os.IsNotExist(err), "File %s should not have been restored", testFile.path) + testRestoreFileInclusions := func(t *testing.T, env *testEnvironment, includePatterns []string) { + // Check that only the included files are restored + for _, testFile := range testfiles { + restoredFilePath := filepath.Join(restoredir, "testdata", testFile.path) + _, err := os.Stat(restoredFilePath) + if testFile.include { + rtest.OK(t, err) + } else { + rtest.Assert(t, os.IsNotExist(err), "File %s should not have been restored", testFile.path) + } } } + + testRestoreFileInclusions(t, env, includePatterns) + + // Create an include file with some patterns + patternsFile := env.base + "/patternsFile" + fileErr := os.WriteFile(patternsFile, []byte(strings.Join(includePatterns, "\n")), 0644) + if fileErr != nil { + t.Fatalf("Could not write include file: %v", fileErr) + } + + restoredir = filepath.Join(env.base, "restore-include-from-file") + + testRunRestoreIncludesFromFile(t, env.gopts, restoredir, snapshotID, patternsFile) + + testRestoreFileInclusions(t, env, includePatterns) } func TestRestoreFilter(t *testing.T) { @@ -144,19 +223,38 @@ func TestRestoreFilter(t *testing.T) { rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size))) } - for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} { - base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1)) - testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat}) - for _, testFile := range testfiles { - err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size)) - if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok { - rtest.OK(t, err) - } else { - rtest.Assert(t, os.IsNotExist(err), - "expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err) + excludePatterns := []string{"*.c", "*.exe", "*", "*file3*"} + + testRestoreFileExclusions := func(t *testing.T, env *testEnvironment, excludePatterns []string) { + for i, pat := range excludePatterns { + base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1)) + testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat}) + for _, testFile := range testfiles { + err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size)) + if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok { + rtest.OK(t, err) + } else { + rtest.Assert(t, os.IsNotExist(err), + "expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err) + } } } } + + testRestoreFileExclusions(t, env, excludePatterns) + + // Create an include file with some patterns + patternsFile := env.base + "/patternsFile" + fileErr := os.WriteFile(patternsFile, []byte(strings.Join(excludePatterns, "\n")), 0644) + if fileErr != nil { + t.Fatalf("Could not write include file: %v", fileErr) + } + + restoredir := filepath.Join(env.base, "restore-exclude-from-file") + + testRunRestoreExcludesFromFile(t, env.gopts, restoredir, snapshotID, patternsFile) + + testRestoreFileExclusions(t, env, excludePatterns) } func TestRestore(t *testing.T) { diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go index 4657e4915dd..9f5f4051128 100644 --- a/cmd/restic/exclude.go +++ b/cmd/restic/exclude.go @@ -443,7 +443,7 @@ func initExcludePatternOptions(f *pflag.FlagSet, opts *excludePatternOptions) { f.StringArrayVarP(&opts.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)") f.StringArrayVar(&opts.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames") f.StringArrayVar(&opts.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)") - f.StringArrayVar(&opts.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns") + f.StringArrayVar(&opts.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of filenames in patterns") } func (opts *excludePatternOptions) Empty() bool { diff --git a/cmd/restic/include.go b/cmd/restic/include.go index dcc4c7f3727..64659d98f35 100644 --- a/cmd/restic/include.go +++ b/cmd/restic/include.go @@ -23,7 +23,7 @@ func initIncludePatternOptions(f *pflag.FlagSet, opts *includePatternOptions) { f.StringArrayVarP(&opts.Includes, "include", "i", nil, "include a `pattern` (can be specified multiple times)") f.StringArrayVar(&opts.InsensitiveIncludes, "iinclude", nil, "same as --include `pattern` but ignores the casing of filenames") f.StringArrayVar(&opts.IncludeFiles, "include-file", nil, "read include patterns from a `file` (can be specified multiple times)") - f.StringArrayVar(&opts.InsensitiveIncludeFiles, "iinclude-file", nil, "same as --include-file but ignores casing of `file`names in patterns") + f.StringArrayVar(&opts.InsensitiveIncludeFiles, "iinclude-file", nil, "same as --include-file but ignores casing of filenames in patterns") } func (opts includePatternOptions) CollectPatterns() ([]IncludeByNameFunc, error) { diff --git a/cmd/restic/integration_filter_pattern_test.go b/cmd/restic/integration_filter_pattern_test.go index 0f547bdc252..dccbcc0a0b5 100644 --- a/cmd/restic/integration_filter_pattern_test.go +++ b/cmd/restic/integration_filter_pattern_test.go @@ -105,49 +105,28 @@ func TestRestoreFailsWhenUsingInvalidPatternsFromFile(t *testing.T) { testRunInit(t, env.gopts) // Create an include file with some invalid patterns - includeFile := env.base + "/includefile" - fileErr := os.WriteFile(includeFile, []byte("*.go\n*[._]log[.-][0-9]\n!*[._]log[.-][0-9]"), 0644) + patternsFile := env.base + "/patternsFile" + fileErr := os.WriteFile(patternsFile, []byte("*.go\n*[._]log[.-][0-9]\n!*[._]log[.-][0-9]"), 0644) if fileErr != nil { t.Fatalf("Could not write include file: %v", fileErr) } - err := testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{IncludeFiles: []string{includeFile}}}, env.gopts) + err := testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{IncludeFiles: []string{patternsFile}}}, env.gopts) rtest.Equals(t, `Fatal: --include-file: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) - // Create an exclude file with some invalid patterns - excludeFile := env.base + "/excludefile" - fileErr = os.WriteFile(excludeFile, []byte("*.go\n*[._]log[.-][0-9]\n!*[._]log[.-][0-9]"), 0644) - if fileErr != nil { - t.Fatalf("Could not write exclude file: %v", fileErr) - } - - err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{ExcludeFiles: []string{excludeFile}}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{ExcludeFiles: []string{patternsFile}}}, env.gopts) rtest.Equals(t, `Fatal: --exclude-file: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) - // Create an insentive include file with some invalid patterns - insensitiveIncludeFile := env.base + "/insensitiveincludefile" - fileErr = os.WriteFile(insensitiveIncludeFile, []byte("*.go\n*[._]log[.-][0-9]\n!*[._]log[.-][0-9]"), 0644) - if fileErr != nil { - t.Fatalf("Could not write insensitive include file: %v", fileErr) - } - - err = testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{InsensitiveIncludeFiles: []string{insensitiveIncludeFile}}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{InsensitiveIncludeFiles: []string{patternsFile}}}, env.gopts) rtest.Equals(t, `Fatal: --iinclude-file: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) - // Create an insensitive exclude file with some invalid patterns - insensitiveExcludeFile := env.base + "/insensitiveexcludefile" - fileErr = os.WriteFile(insensitiveExcludeFile, []byte("*.go\n*[._]log[.-][0-9]\n!*[._]log[.-][0-9]"), 0644) - if fileErr != nil { - t.Fatalf("Could not write insensitive exclude file: %v", fileErr) - } - - err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludeFiles: []string{insensitiveExcludeFile}}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludeFiles: []string{patternsFile}}}, env.gopts) rtest.Equals(t, `Fatal: --iexclude-file: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) From fe412e255394a1926c5cdb22e4bd881a7c9371cf Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Mon, 10 Jun 2024 01:55:39 +0530 Subject: [PATCH 352/893] fix: restore inclusion logic and restore tests doc: update exclude and include docs --- cmd/restic/cmd_restore.go | 4 +- cmd/restic/cmd_restore_integration_test.go | 84 +++++++++------------- cmd/restic/exclude.go | 2 +- cmd/restic/include.go | 2 +- 4 files changed, 36 insertions(+), 56 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 03a990cc6a8..12b698950f9 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -178,12 +178,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, matched, childMayMatch := includeFn(item) selectedForRestore = selectedForRestore || matched childMayBeSelected = childMayBeSelected || childMayMatch - childMayBeSelected = childMayBeSelected && node.Type == "dir" - if selectedForRestore || childMayBeSelected { + if selectedForRestore && childMayBeSelected { break } } + childMayBeSelected = childMayBeSelected && node.Type == "dir" return selectedForRestore, childMayBeSelected } diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 0e1620f7e99..b0543850bbb 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -13,7 +13,6 @@ import ( "time" "github.com/restic/restic/internal/feature" - "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/termstatus" @@ -78,32 +77,11 @@ func testRunRestoreExcludesFromFile(t testing.TB, gopts GlobalOptions, dir strin } func TestRestoreMustFailWhenUsingBothIncludesAndExcludes(t *testing.T) { - testfiles := []struct { - path string - size uint - }{ - {"dir1/include_me.txt", 100}, - } - env, cleanup := withTestEnvironment(t) defer cleanup() testRunInit(t, env.gopts) - // Create test files and directories - for _, testFile := range testfiles { - fullPath := filepath.Join(env.testdata, testFile.path) - rtest.OK(t, os.MkdirAll(filepath.Dir(fullPath), 0755)) - rtest.OK(t, appendRandomData(fullPath, testFile.size)) - } - - opts := BackupOptions{} - // Perform backup - testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) - testRunCheck(t, env.gopts) - - snapshotID := testListSnapshots(t, env.gopts, 1)[0] - // Add both include and exclude patterns includePatterns := []string{"dir1/*include_me.txt", "dir2/**", "dir4/**/*_me.txt"} excludePatterns := []string{"dir1/*include_me.txt", "dir2/**", "dir4/**/*_me.txt"} @@ -116,8 +94,9 @@ func TestRestoreMustFailWhenUsingBothIncludesAndExcludes(t *testing.T) { restoreOpts.Includes = includePatterns restoreOpts.Excludes = excludePatterns - err := testRunRestoreAssumeFailure(snapshotID.String(), restoreOpts, env.gopts) - rtest.Assert(t, err != nil, "restore must fail if include and exclude patterns are provided") + err := testRunRestoreAssumeFailure("latest", restoreOpts, env.gopts) + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "exclude and include patterns are mutually exclusive"), + "expected: %s error, got %v", "exclude and include patterns are mutually exclusive", err) } func TestRestoreIncludes(t *testing.T) { @@ -159,7 +138,7 @@ func TestRestoreIncludes(t *testing.T) { restoredir := filepath.Join(env.base, "restore") testRunRestoreIncludes(t, env.gopts, restoredir, snapshotID, includePatterns) - testRestoreFileInclusions := func(t *testing.T, env *testEnvironment, includePatterns []string) { + testRestoreFileInclusions := func(t *testing.T) { // Check that only the included files are restored for _, testFile := range testfiles { restoredFilePath := filepath.Join(restoredir, "testdata", testFile.path) @@ -172,7 +151,7 @@ func TestRestoreIncludes(t *testing.T) { } } - testRestoreFileInclusions(t, env, includePatterns) + testRestoreFileInclusions(t) // Create an include file with some patterns patternsFile := env.base + "/patternsFile" @@ -185,18 +164,19 @@ func TestRestoreIncludes(t *testing.T) { testRunRestoreIncludesFromFile(t, env.gopts, restoredir, snapshotID, patternsFile) - testRestoreFileInclusions(t, env, includePatterns) + testRestoreFileInclusions(t) } func TestRestoreFilter(t *testing.T) { testfiles := []struct { - name string - size uint + name string + size uint + exclude bool }{ - {"testfile1.c", 100}, - {"testfile2.exe", 101}, - {"subdir1/subdir2/testfile3.docx", 102}, - {"subdir1/subdir2/testfile4.c", 102}, + {"testfile1.c", 100, true}, + {"testfile2.exe", 101, true}, + {"subdir1/subdir2/testfile3.docx", 102, true}, + {"subdir1/subdir2/testfile4.c", 102, false}, } env, cleanup := withTestEnvironment(t) @@ -223,38 +203,38 @@ func TestRestoreFilter(t *testing.T) { rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size))) } - excludePatterns := []string{"*.c", "*.exe", "*", "*file3*"} - - testRestoreFileExclusions := func(t *testing.T, env *testEnvironment, excludePatterns []string) { - for i, pat := range excludePatterns { - base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1)) - testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat}) - for _, testFile := range testfiles { - err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size)) - if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok { - rtest.OK(t, err) - } else { - rtest.Assert(t, os.IsNotExist(err), - "expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err) - } + excludePatterns := []string{"testfile1.c", "*.exe", "*file3*"} + + // checks if the files are excluded correctly + testRestoredFileExclusions := func(t *testing.T, restoredir string) { + for _, testFile := range testfiles { + restoredFilePath := filepath.Join(restoredir, "testdata", testFile.name) + _, err := os.Stat(restoredFilePath) + if testFile.exclude { + rtest.Assert(t, os.IsNotExist(err), "File %s should not have been restored", testFile.name) + } else { + rtest.OK(t, testFileSize(restoredFilePath, int64(testFile.size))) } } } - testRestoreFileExclusions(t, env, excludePatterns) + // restore with excludes + restoredir := filepath.Join(env.base, "restore-with-excludes") + testRunRestoreExcludes(t, env.gopts, restoredir, snapshotID, excludePatterns) + testRestoredFileExclusions(t, restoredir) - // Create an include file with some patterns + // Create an exclude file with some patterns patternsFile := env.base + "/patternsFile" fileErr := os.WriteFile(patternsFile, []byte(strings.Join(excludePatterns, "\n")), 0644) if fileErr != nil { t.Fatalf("Could not write include file: %v", fileErr) } - restoredir := filepath.Join(env.base, "restore-exclude-from-file") - + // restore with excludes from file + restoredir = filepath.Join(env.base, "restore-with-exclude-from-file") testRunRestoreExcludesFromFile(t, env.gopts, restoredir, snapshotID, patternsFile) - testRestoreFileExclusions(t, env, excludePatterns) + testRestoredFileExclusions(t, restoredir) } func TestRestore(t *testing.T) { diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go index 9f5f4051128..4657e4915dd 100644 --- a/cmd/restic/exclude.go +++ b/cmd/restic/exclude.go @@ -443,7 +443,7 @@ func initExcludePatternOptions(f *pflag.FlagSet, opts *excludePatternOptions) { f.StringArrayVarP(&opts.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)") f.StringArrayVar(&opts.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames") f.StringArrayVar(&opts.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)") - f.StringArrayVar(&opts.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of filenames in patterns") + f.StringArrayVar(&opts.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns") } func (opts *excludePatternOptions) Empty() bool { diff --git a/cmd/restic/include.go b/cmd/restic/include.go index 64659d98f35..dcc4c7f3727 100644 --- a/cmd/restic/include.go +++ b/cmd/restic/include.go @@ -23,7 +23,7 @@ func initIncludePatternOptions(f *pflag.FlagSet, opts *includePatternOptions) { f.StringArrayVarP(&opts.Includes, "include", "i", nil, "include a `pattern` (can be specified multiple times)") f.StringArrayVar(&opts.InsensitiveIncludes, "iinclude", nil, "same as --include `pattern` but ignores the casing of filenames") f.StringArrayVar(&opts.IncludeFiles, "include-file", nil, "read include patterns from a `file` (can be specified multiple times)") - f.StringArrayVar(&opts.InsensitiveIncludeFiles, "iinclude-file", nil, "same as --include-file but ignores casing of filenames in patterns") + f.StringArrayVar(&opts.InsensitiveIncludeFiles, "iinclude-file", nil, "same as --include-file but ignores casing of `file`names in patterns") } func (opts includePatternOptions) CollectPatterns() ([]IncludeByNameFunc, error) { From 058292700c7a54ab001ba9897311ac37690b1e0b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 26 May 2024 14:24:15 +0200 Subject: [PATCH 353/893] mount: support fuse-t on macOS --- changelog/unreleased/issue-4048 | 6 ++++ doc/050_restore.rst | 6 ++-- go.mod | 3 +- go.sum | 51 ++++++++++++++++++++++++++------- 4 files changed, 50 insertions(+), 16 deletions(-) create mode 100644 changelog/unreleased/issue-4048 diff --git a/changelog/unreleased/issue-4048 b/changelog/unreleased/issue-4048 new file mode 100644 index 00000000000..0754171f332 --- /dev/null +++ b/changelog/unreleased/issue-4048 @@ -0,0 +1,6 @@ +Enhancement: Add support for fuse-t for `mount` on macOS + +Restic now supports creating fuse mounts using fuse-t on macOS. + +https://github.com/restic/restic/issues/4048 +https://github.com/restic/restic/pull/4825 diff --git a/doc/050_restore.rst b/doc/050_restore.rst index ce17a1cf736..44bbb393e42 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -103,9 +103,9 @@ command to serve the repository with FUSE: Mounting repositories via FUSE is only possible on Linux, macOS and FreeBSD. On Linux, the ``fuse`` kernel module needs to be loaded and the ``fusermount`` -command needs to be in the ``PATH``. On macOS, you need `FUSE for macOS -`__. On FreeBSD, you may need to install FUSE -and load the kernel module (``kldload fuse``). +command needs to be in the ``PATH``. On macOS, you need `FUSE-T +`__ or `FUSE for macOS `__. +On FreeBSD, you may need to install FUSE and load the kernel module (``kldload fuse``). Restic supports storage and preservation of hard links. However, since hard links exist in the scope of a filesystem by definition, restoring diff --git a/go.mod b/go.mod index a19d84f0e7f..89b93d99dfe 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 github.com/Backblaze/blazer v0.6.1 - github.com/anacrolix/fuse v0.2.0 + github.com/anacrolix/fuse v0.3.1-0.20240611061849-e4651fbc304d github.com/cenkalti/backoff/v4 v4.3.0 github.com/cespare/xxhash/v2 v2.3.0 github.com/elithrar/simple-scrypt v1.3.0 @@ -63,7 +63,6 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/kr/fs v0.1.0 // indirect - github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect diff --git a/go.sum b/go.sum index 3ec38b3dad2..afe3ceb9df1 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,15 @@ github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1 github.com/Backblaze/blazer v0.6.1/go.mod h1:7/jrGx4O6OKOto6av+hLwelPR8rwZ+PLxQ5ZOiYAjwY= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74= -github.com/anacrolix/fuse v0.2.0 h1:pc+To78kI2d/WUjIyrsdqeJQAesuwpGxlI3h1nAv3Do= -github.com/anacrolix/fuse v0.2.0/go.mod h1:Kfu02xBwnySDpH3N23BmrP3MDfwAQGRLUCj6XyeOvBQ= +github.com/anacrolix/envpprof v1.3.0 h1:WJt9bpuT7A/CDCxPOv/eeZqHWlle/Y0keJUvc6tcJDk= +github.com/anacrolix/envpprof v1.3.0/go.mod h1:7QIG4CaX1uexQ3tqd5+BRa/9e2D02Wcertl6Yh0jCB0= +github.com/anacrolix/fuse v0.3.1-0.20240611061849-e4651fbc304d h1:wCFrzeUrl7FASv1w6VRCaEAlG5u8dkSNGcj2OeJ78Vc= +github.com/anacrolix/fuse v0.3.1-0.20240611061849-e4651fbc304d/go.mod h1:vN3X/6E+uHNjg5F8Oy9FD9I+pYxeDWeB8mNjIoxL5ds= +github.com/anacrolix/generics v0.0.0-20230113004304-d6428d516633 h1:TO3pytMIJ98CO1nYtqbFx/iuTHi4OgIUoE2wNfDdKxw= +github.com/anacrolix/generics v0.0.0-20230113004304-d6428d516633/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= +github.com/anacrolix/log v0.13.1/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= +github.com/anacrolix/log v0.14.1 h1:j2FcIpYZ5FbANetUcm5JNu+zUBGADSp/VbjhUPrAY0k= +github.com/anacrolix/log v0.14.1/go.mod h1:1OmJESOtxQGNMlUO5rcv96Vpp9mfMqXXbe2RdinFLdY= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -47,7 +54,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= +github.com/dvyukov/go-fuzz v0.0.0-20220726122315-1d375ef9f9f6/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elithrar/simple-scrypt v1.3.0 h1:KIlOlxdoQf9JWKl5lMAJ28SY2URB0XTRDn2TckyzAZg= github.com/elithrar/simple-scrypt v1.3.0/go.mod h1:U2XQRI95XHY0St410VE3UjT7vuKb1qPwrl/EJwEqnZo= @@ -59,6 +66,7 @@ github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -90,6 +98,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -120,7 +129,12 @@ github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/4 github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -146,6 +160,7 @@ github.com/peterbourgon/unixtransport v0.0.4 h1:UTF0FxXCAglvoZz9jaGPYjEg52DjBLDY github.com/peterbourgon/unixtransport v0.0.4/go.mod h1:o8aUkOCa8W/BIXpi15uKvbSabjtBh0JhSOJGSfoOhAU= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= @@ -161,6 +176,10 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/restic/chunker v0.4.0 h1:YUPYCUn70MYP7VO4yllypp2SjmsRhRJaad3xKu1QFRw= github.com/restic/chunker v0.4.0/go.mod h1:z0cH2BejpW636LXw0R/BGyv+Ey8+m9QGiOanDHItzyw= github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -183,7 +202,7 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= @@ -208,10 +227,13 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= +golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -220,9 +242,9 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= @@ -234,7 +256,7 @@ golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= @@ -242,11 +264,12 @@ golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -254,6 +277,7 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -264,6 +288,7 @@ golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= @@ -276,12 +301,13 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= google.golang.org/api v0.181.0 h1:rPdjwnWgiPPOJx3IcSAQ2III5aX5tCer6wMpa/xmZi4= google.golang.org/api v0.181.0/go.mod h1:MnQ+M0CFsfUwA5beZ+g/vCBCPXvtmZwRz2qzZk8ih1k= @@ -315,7 +341,10 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= From 1e7bc6dab8f1aa7d34dfbf5d1803430b3a6d12a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Jun 2024 20:09:17 +0000 Subject: [PATCH 354/893] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.5.2 to 1.6.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/internal/v1.5.2...sdk/azcore/v1.6.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azidentity dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- go.mod | 14 +++++++------- go.sum | 34 ++++++++++++++++------------------ 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index 9ec46b38f17..2047d4657bb 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.41.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.3.1-0.20240611061849-e4651fbc304d @@ -26,13 +26,13 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.23.0 - golang.org/x/net v0.25.0 + golang.org/x/crypto v0.24.0 + golang.org/x/net v0.26.0 golang.org/x/oauth2 v0.20.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.20.0 - golang.org/x/term v0.20.0 - golang.org/x/text v0.15.0 + golang.org/x/sys v0.21.0 + golang.org/x/term v0.21.0 + golang.org/x/text v0.16.0 golang.org/x/time v0.5.0 google.golang.org/api v0.182.0 ) @@ -43,7 +43,7 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/iam v1.1.8 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect diff --git a/go.sum b/go.sum index 1b7a414b23e..e229a0bdc1e 100644 --- a/go.sum +++ b/go.sum @@ -13,10 +13,10 @@ cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RB cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E= @@ -51,7 +51,6 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvyukov/go-fuzz v0.0.0-20220726122315-1d375ef9f9f6/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= @@ -178,8 +177,8 @@ github.com/restic/chunker v0.4.0/go.mod h1:z0cH2BejpW636LXw0R/BGyv+Ey8+m9QGiOanD github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -224,8 +223,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= @@ -248,8 +247,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -278,21 +277,21 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -342,12 +341,11 @@ google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFW google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From c166ad7daf85e5a64b4d2534cf21ce67f71c896b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 30 May 2024 22:58:44 +0200 Subject: [PATCH 355/893] restore: factor out file creation helper --- internal/restorer/fileswriter.go | 79 ++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 34 deletions(-) diff --git a/internal/restorer/fileswriter.go b/internal/restorer/fileswriter.go index cbe89c30cc4..5e4931c63e9 100644 --- a/internal/restorer/fileswriter.go +++ b/internal/restorer/fileswriter.go @@ -39,6 +39,48 @@ func newFilesWriter(count int) *filesWriter { } } +func createFile(path string, createSize int64, sparse bool) (*os.File, error) { + var f *os.File + var err error + if f, err = os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600); err != nil { + if !fs.IsAccessDenied(err) { + return nil, err + } + + // If file is readonly, clear the readonly flag by resetting the + // permissions of the file and try again + // as the metadata will be set again in the second pass and the + // readonly flag will be applied again if needed. + if err = fs.ResetPermissions(path); err != nil { + return nil, err + } + if f, err = os.OpenFile(path, os.O_TRUNC|os.O_WRONLY, 0600); err != nil { + return nil, err + } + } + + if createSize > 0 { + if sparse { + err = truncateSparse(f, createSize) + if err != nil { + _ = f.Close() + return nil, err + } + } else { + err := fs.PreallocateFile(f, createSize) + if err != nil { + // Just log the preallocate error but don't let it cause the restore process to fail. + // Preallocate might return an error if the filesystem (implementation) does not + // support preallocation or our parameters combination to the preallocate call + // This should yield a syscall.ENOTSUP error, but some other errors might also + // show up. + debug.Log("Failed to preallocate %v with size %v: %v", path, createSize, err) + } + } + } + return f, err +} + func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, createSize int64, sparse bool) error { bucket := &w.buckets[uint(xxhash.Sum64String(path))%uint(len(w.buckets))] @@ -53,21 +95,9 @@ func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, create var f *os.File var err error if createSize >= 0 { - if f, err = os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600); err != nil { - if fs.IsAccessDenied(err) { - // If file is readonly, clear the readonly flag by resetting the - // permissions of the file and try again - // as the metadata will be set again in the second pass and the - // readonly flag will be applied again if needed. - if err = fs.ResetPermissions(path); err != nil { - return nil, err - } - if f, err = os.OpenFile(path, os.O_TRUNC|os.O_WRONLY, 0600); err != nil { - return nil, err - } - } else { - return nil, err - } + f, err = createFile(path, createSize, sparse) + if err != nil { + return nil, err } } else if f, err = os.OpenFile(path, os.O_WRONLY, 0600); err != nil { return nil, err @@ -76,25 +106,6 @@ func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, create wr := &partialFile{File: f, users: 1, sparse: sparse} bucket.files[path] = wr - if createSize >= 0 { - if sparse { - err = truncateSparse(f, createSize) - if err != nil { - return nil, err - } - } else { - err := fs.PreallocateFile(wr.File, createSize) - if err != nil { - // Just log the preallocate error but don't let it cause the restore process to fail. - // Preallocate might return an error if the filesystem (implementation) does not - // support preallocation or our parameters combination to the preallocate call - // This should yield a syscall.ENOTSUP error, but some other errors might also - // show up. - debug.Log("Failed to preallocate %v with size %v: %v", path, createSize, err) - } - } - } - return wr, nil } From 30320a249a9379ad1e17d4a4dc5b3ef9aad0343f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 30 May 2024 23:06:15 +0200 Subject: [PATCH 356/893] restore: let filerestorer also handle empty files This get's rid of the corresponding special cases. --- internal/restorer/filerestorer.go | 22 ++++++++++++++ internal/restorer/filerestorer_test.go | 4 +++ internal/restorer/restorer.go | 40 -------------------------- 3 files changed, 26 insertions(+), 40 deletions(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index 3551857dd91..f3a68c58a36 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -120,6 +120,13 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { // create packInfo from fileInfo for _, file := range r.files { fileBlobs := file.blobs.(restic.IDs) + if len(fileBlobs) == 0 { + err := r.restoreEmptyFileAt(file.location) + if errFile := r.sanitizeError(file, err); errFile != nil { + return errFile + } + } + largeFile := len(fileBlobs) > largeFileBlobCount var packsMap map[restic.ID][]fileBlobInfo if largeFile { @@ -195,6 +202,21 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { return wg.Wait() } +func (r *fileRestorer) restoreEmptyFileAt(location string) error { + f, err := createFile(r.targetPath(location), 0, false) + if err != nil { + return err + } + if err = f.Close(); err != nil { + return err + } + + if r.progress != nil { + r.progress.AddProgress(location, 0, 0) + } + return nil +} + type blobToFileOffsetsMapping map[restic.ID]struct { files map[*fileInfo][]int64 // file -> offsets (plural!) of the blob in the file blob restic.Blob diff --git a/internal/restorer/filerestorer_test.go b/internal/restorer/filerestorer_test.go index 03797e0c83b..d29c0dcead0 100644 --- a/internal/restorer/filerestorer_test.go +++ b/internal/restorer/filerestorer_test.go @@ -206,6 +206,10 @@ func TestFileRestorerBasic(t *testing.T) { {"data3-1", "pack3-1"}, }, }, + { + name: "empty", + blobs: []TestBlob{}, + }, }, nil, sparse) } } diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index c471800df63..f691c4cae45 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -203,31 +203,6 @@ func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location return res.restoreNodeMetadataTo(node, path, location) } -func (res *Restorer) restoreEmptyFileAt(node *restic.Node, target, location string) error { - wr, err := os.OpenFile(target, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) - if fs.IsAccessDenied(err) { - // If file is readonly, clear the readonly flag by resetting the - // permissions of the file and try again - // as the metadata will be set again in the second pass and the - // readonly flag will be applied again if needed. - if err = fs.ResetPermissions(target); err != nil { - return err - } - if wr, err = os.OpenFile(target, os.O_TRUNC|os.O_WRONLY, 0600); err != nil { - return err - } - } - if err = wr.Close(); err != nil { - return err - } - - if res.progress != nil { - res.progress.AddProgress(location, 0, 0) - } - - return res.restoreNodeMetadataTo(node, target, location) -} - // RestoreTo creates the directories and files in the snapshot below dst. // Before an item is created, res.Filter is called. func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { @@ -274,13 +249,6 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { return nil } - if node.Size == 0 { - if res.progress != nil { - res.progress.AddFile(node.Size) - } - return nil // deal with empty files later - } - if node.Links > 1 { if idx.Has(node.Inode, node.DeviceID) { if res.progress != nil { @@ -320,14 +288,6 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { return res.restoreNodeTo(ctx, node, target, location) } - // create empty files, but not hardlinks to empty files - if node.Size == 0 && (node.Links < 2 || !idx.Has(node.Inode, node.DeviceID)) { - if node.Links > 1 { - idx.Add(node.Inode, node.DeviceID, location) - } - return res.restoreEmptyFileAt(node, target, location) - } - if idx.Has(node.Inode, node.DeviceID) && idx.Value(node.Inode, node.DeviceID) != location { return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.Value(node.Inode, node.DeviceID)), target, location) } From 607daeed4ff670a3ab61b4fc8638e88f383d1ecc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 11:07:53 +0200 Subject: [PATCH 357/893] restore: move nil pointer check into restoreui --- internal/restorer/filerestorer.go | 10 ++-------- internal/restorer/restorer.go | 30 ++++++++---------------------- internal/ui/restore/progress.go | 8 ++++++++ 3 files changed, 18 insertions(+), 30 deletions(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index f3a68c58a36..49f5f7af855 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -211,9 +211,7 @@ func (r *fileRestorer) restoreEmptyFileAt(location string) error { return err } - if r.progress != nil { - r.progress.AddProgress(location, 0, 0) - } + r.progress.AddProgress(location, 0, 0) return nil } @@ -361,11 +359,7 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID, createSize = file.size } writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse) - - if r.progress != nil { - r.progress.AddProgress(file.location, uint64(len(blobData)), uint64(file.size)) - } - + r.progress.AddProgress(file.location, uint64(len(blobData)), uint64(file.size)) return writeErr } err := r.sanitizeError(file, writeToFile()) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index f691c4cae45..12ce84d5cb4 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -170,10 +170,7 @@ func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, targe return err } - if res.progress != nil { - res.progress.AddProgress(location, 0, 0) - } - + res.progress.AddProgress(location, 0, 0) return res.restoreNodeMetadataTo(node, target, location) } @@ -195,9 +192,7 @@ func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location return errors.WithStack(err) } - if res.progress != nil { - res.progress.AddProgress(location, 0, 0) - } + res.progress.AddProgress(location, 0, 0) // TODO investigate if hardlinks have separate metadata on any supported system return res.restoreNodeMetadataTo(node, path, location) @@ -225,9 +220,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { _, err = res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ enterDir: func(_ *restic.Node, target, location string) error { debug.Log("first pass, enterDir: mkdir %q, leaveDir should restore metadata", location) - if res.progress != nil { - res.progress.AddFile(0) - } + res.progress.AddFile(0) // create dir with default permissions // #leaveDir restores dir metadata after visiting all children return fs.MkdirAll(target, 0700) @@ -243,27 +236,20 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { } if node.Type != "file" { - if res.progress != nil { - res.progress.AddFile(0) - } + res.progress.AddFile(0) return nil } if node.Links > 1 { if idx.Has(node.Inode, node.DeviceID) { - if res.progress != nil { - // a hardlinked file does not increase the restore size - res.progress.AddFile(0) - } + // a hardlinked file does not increase the restore size + res.progress.AddFile(0) return nil } idx.Add(node.Inode, node.DeviceID, location) } - if res.progress != nil { - res.progress.AddFile(node.Size) - } - + res.progress.AddFile(node.Size) filerestorer.addFile(location, node.Content, int64(node.Size)) return nil @@ -296,7 +282,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { }, leaveDir: func(node *restic.Node, target, location string) error { err := res.restoreNodeMetadataTo(node, target, location) - if err == nil && res.progress != nil { + if err == nil { res.progress.AddProgress(location, 0, 0) } return err diff --git a/internal/ui/restore/progress.go b/internal/ui/restore/progress.go index f2bd5d38b91..0e120b6a668 100644 --- a/internal/ui/restore/progress.go +++ b/internal/ui/restore/progress.go @@ -59,6 +59,10 @@ func (p *Progress) update(runtime time.Duration, final bool) { // AddFile starts tracking a new file with the given size func (p *Progress) AddFile(size uint64) { + if p == nil { + return + } + p.m.Lock() defer p.m.Unlock() @@ -68,6 +72,10 @@ func (p *Progress) AddFile(size uint64) { // AddProgress accumulates the number of bytes written for a file func (p *Progress) AddProgress(name string, bytesWrittenPortion uint64, bytesTotal uint64) { + if p == nil { + return + } + p.m.Lock() defer p.m.Unlock() From fd2ff464a241f4239acd1d0c61d411b4769eb6dc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 11:29:16 +0200 Subject: [PATCH 358/893] restorer: remove stale comment --- internal/restorer/filerestorer.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index 49f5f7af855..8fe01c635d4 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -14,11 +14,6 @@ import ( "github.com/restic/restic/internal/ui/restore" ) -// TODO if a blob is corrupt, there may be good blob copies in other packs -// TODO evaluate if it makes sense to split download and processing workers -// pro: can (slowly) read network and decrypt/write files concurrently -// con: each worker needs to keep one pack in memory - const ( largeFileBlobCount = 25 ) From 0fcd89f89251d8c220cba1a99a53c737ba214ea3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 11:29:39 +0200 Subject: [PATCH 359/893] restorer: remove special case for blobs with many occurrences Loading blobs by now is no longer prone to timeouts when processing takes a long time. --- internal/restorer/filerestorer.go | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index 8fe01c635d4..b71e86712a1 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -255,32 +255,6 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error { // track already processed blobs for precise error reporting processedBlobs := restic.NewBlobSet() - for _, entry := range blobs { - occurrences := 0 - for _, offsets := range entry.files { - occurrences += len(offsets) - } - // With a maximum blob size of 8MB, the normal blob streaming has to write - // at most 800MB for a single blob. This should be short enough to avoid - // network connection timeouts. Based on a quick test, a limit of 100 only - // selects a very small number of blobs (the number of references per blob - // - aka. `count` - seem to follow a expontential distribution) - if occurrences > 100 { - // process frequently referenced blobs first as these can take a long time to write - // which can cause backend connections to time out - delete(blobs, entry.blob.ID) - partialBlobs := blobToFileOffsetsMapping{entry.blob.ID: entry} - err := r.downloadBlobs(ctx, pack.id, partialBlobs, processedBlobs) - if err := r.reportError(blobs, processedBlobs, err); err != nil { - return err - } - } - } - - if len(blobs) == 0 { - return nil - } - err := r.downloadBlobs(ctx, pack.id, blobs, processedBlobs) return r.reportError(blobs, processedBlobs, err) } From 2b50c2606c8962538dd8c5b3639f895b22f14581 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 11:42:25 +0200 Subject: [PATCH 360/893] restorer: use options struct --- cmd/restic/cmd_restore.go | 5 ++++- internal/restorer/restorer.go | 19 +++++++++++-------- internal/restorer/restorer_test.go | 12 ++++++------ internal/restorer/restorer_unix_test.go | 4 ++-- internal/restorer/restorer_windows_test.go | 2 +- 5 files changed, 24 insertions(+), 18 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 5161be50d68..f86391b2002 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -162,7 +162,10 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, } progress := restoreui.NewProgress(printer, calculateProgressInterval(!gopts.Quiet, gopts.JSON)) - res := restorer.NewRestorer(repo, sn, opts.Sparse, progress) + res := restorer.NewRestorer(repo, sn, restorer.Options{ + Sparse: opts.Sparse, + Progress: progress, + }) totalErrors := 0 res.Error = func(location string, err error) error { diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 12ce84d5cb4..8b39f138f35 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -17,10 +17,9 @@ import ( // Restorer is used to restore a snapshot to a directory. type Restorer struct { - repo restic.Repository - sn *restic.Snapshot - sparse bool - + repo restic.Repository + sn *restic.Snapshot + sparse bool progress *restoreui.Progress Error func(location string, err error) error @@ -30,15 +29,19 @@ type Restorer struct { var restorerAbortOnAllErrors = func(_ string, err error) error { return err } +type Options struct { + Sparse bool + Progress *restoreui.Progress +} + // NewRestorer creates a restorer preloaded with the content from the snapshot id. -func NewRestorer(repo restic.Repository, sn *restic.Snapshot, sparse bool, - progress *restoreui.Progress) *Restorer { +func NewRestorer(repo restic.Repository, sn *restic.Snapshot, opts Options) *Restorer { r := &Restorer{ repo: repo, - sparse: sparse, + sparse: opts.Sparse, + progress: opts.Progress, Error: restorerAbortOnAllErrors, SelectFilter: func(string, string, *restic.Node) (bool, bool) { return true, true }, - progress: progress, sn: sn, } diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 757a317b278..64e10a6e8ab 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -343,7 +343,7 @@ func TestRestorer(t *testing.T) { sn, id := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes) t.Logf("snapshot saved as %v", id.Str()) - res := NewRestorer(repo, sn, false, nil) + res := NewRestorer(repo, sn, Options{}) tempdir := rtest.TempDir(t) // make sure we're creating a new subdir of the tempdir @@ -460,7 +460,7 @@ func TestRestorerRelative(t *testing.T) { sn, id := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes) t.Logf("snapshot saved as %v", id.Str()) - res := NewRestorer(repo, sn, false, nil) + res := NewRestorer(repo, sn, Options{}) tempdir := rtest.TempDir(t) cleanup := rtest.Chdir(t, tempdir) @@ -689,7 +689,7 @@ func TestRestorerTraverseTree(t *testing.T) { repo := repository.TestRepository(t) sn, _ := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes) - res := NewRestorer(repo, sn, false, nil) + res := NewRestorer(repo, sn, Options{}) res.SelectFilter = test.Select @@ -765,7 +765,7 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) { }, }, noopGetGenericAttributes) - res := NewRestorer(repo, sn, false, nil) + res := NewRestorer(repo, sn, Options{}) res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { switch filepath.ToSlash(item) { @@ -820,7 +820,7 @@ func TestVerifyCancel(t *testing.T) { repo := repository.TestRepository(t) sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) - res := NewRestorer(repo, sn, false, nil) + res := NewRestorer(repo, sn, Options{}) tempdir := rtest.TempDir(t) ctx, cancel := context.WithCancel(context.Background()) @@ -862,7 +862,7 @@ func TestRestorerSparseFiles(t *testing.T) { archiver.SnapshotOptions{}) rtest.OK(t, err) - res := NewRestorer(repo, sn, true, nil) + res := NewRestorer(repo, sn, Options{Sparse: true}) tempdir := rtest.TempDir(t) ctx, cancel := context.WithCancel(context.Background()) diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go index 0cbfefa921f..95a83cdf775 100644 --- a/internal/restorer/restorer_unix_test.go +++ b/internal/restorer/restorer_unix_test.go @@ -31,7 +31,7 @@ func TestRestorerRestoreEmptyHardlinkedFileds(t *testing.T) { }, }, noopGetGenericAttributes) - res := NewRestorer(repo, sn, false, nil) + res := NewRestorer(repo, sn, Options{}) res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { return true, true @@ -99,7 +99,7 @@ func TestRestorerProgressBar(t *testing.T) { mock := &printerMock{} progress := restoreui.NewProgress(mock, 0) - res := NewRestorer(repo, sn, false, progress) + res := NewRestorer(repo, sn, Options{Progress: progress}) res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { return true, true } diff --git a/internal/restorer/restorer_windows_test.go b/internal/restorer/restorer_windows_test.go index 684d51ace21..90ece474d30 100644 --- a/internal/restorer/restorer_windows_test.go +++ b/internal/restorer/restorer_windows_test.go @@ -269,7 +269,7 @@ func setup(t *testing.T, nodesMap map[string]Node) *Restorer { sn, _ := saveSnapshot(t, repo, Snapshot{ Nodes: nodesMap, }, getFileAttributes) - res := NewRestorer(repo, sn, false, nil) + res := NewRestorer(repo, sn, Options{}) return res } From a23cb3a42801cba15cb40b115158938991c489bc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 15:50:48 +0200 Subject: [PATCH 361/893] restore: reduce memory usage --- internal/restorer/filerestorer.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index b71e86712a1..600a702b2b1 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -161,6 +161,8 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { file.blobs = packsMap } } + // drop no longer necessary file list + r.files = nil wg, ctx := errgroup.WithContext(ctx) downloadCh := make(chan *packInfo) From 6a4ae9d6b11deeb3595d640c447aedf33c144441 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 11:43:42 +0200 Subject: [PATCH 362/893] restore: configurable overwrite behavior --- cmd/restic/cmd_restore.go | 7 +- internal/restorer/restorer.go | 139 ++++++++++++++++++++++++++++++---- 2 files changed, 129 insertions(+), 17 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index f86391b2002..a9de998be1a 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -51,8 +51,9 @@ type RestoreOptions struct { InsensitiveInclude []string Target string restic.SnapshotFilter - Sparse bool - Verify bool + Sparse bool + Verify bool + Overwrite restorer.OverwriteBehavior } var restoreOptions RestoreOptions @@ -70,6 +71,7 @@ func init() { initSingleSnapshotFilter(flags, &restoreOptions.SnapshotFilter) flags.BoolVar(&restoreOptions.Sparse, "sparse", false, "restore files as sparse") flags.BoolVar(&restoreOptions.Verify, "verify", false, "verify restored files content") + flags.Var(&restoreOptions.Overwrite, "overwrite", "overwrite behavior, one of (always|if-newer|never) (default: always)") } func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, @@ -165,6 +167,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, res := restorer.NewRestorer(repo, sn, restorer.Options{ Sparse: opts.Sparse, Progress: progress, + Overwrite: opts.Overwrite, }) totalErrors := 0 diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 8b39f138f35..267b2898c74 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -2,6 +2,7 @@ package restorer import ( "context" + "fmt" "os" "path/filepath" "sync/atomic" @@ -17,10 +18,13 @@ import ( // Restorer is used to restore a snapshot to a directory. type Restorer struct { - repo restic.Repository - sn *restic.Snapshot - sparse bool - progress *restoreui.Progress + repo restic.Repository + sn *restic.Snapshot + sparse bool + progress *restoreui.Progress + overwrite OverwriteBehavior + + fileList map[string]struct{} Error func(location string, err error) error Warn func(message string) @@ -30,8 +34,53 @@ type Restorer struct { var restorerAbortOnAllErrors = func(_ string, err error) error { return err } type Options struct { - Sparse bool - Progress *restoreui.Progress + Sparse bool + Progress *restoreui.Progress + Overwrite OverwriteBehavior +} + +type OverwriteBehavior int + +// Constants for different overwrite behavior +const ( + OverwriteAlways OverwriteBehavior = 0 + OverwriteIfNewer OverwriteBehavior = 1 + OverwriteNever OverwriteBehavior = 2 + OverwriteInvalid OverwriteBehavior = 3 +) + +// Set implements the method needed for pflag command flag parsing. +func (c *OverwriteBehavior) Set(s string) error { + switch s { + case "always": + *c = OverwriteAlways + case "if-newer": + *c = OverwriteIfNewer + case "never": + *c = OverwriteNever + default: + *c = OverwriteInvalid + return fmt.Errorf("invalid overwrite behavior %q, must be one of (always|if-newer|never)", s) + } + + return nil +} + +func (c *OverwriteBehavior) String() string { + switch *c { + case OverwriteAlways: + return "always" + case OverwriteIfNewer: + return "if-newer" + case OverwriteNever: + return "never" + default: + return "invalid" + } + +} +func (c *OverwriteBehavior) Type() string { + return "behavior" } // NewRestorer creates a restorer preloaded with the content from the snapshot id. @@ -40,6 +89,8 @@ func NewRestorer(repo restic.Repository, sn *restic.Snapshot, opts Options) *Res repo: repo, sparse: opts.Sparse, progress: opts.Progress, + overwrite: opts.Overwrite, + fileList: make(map[string]struct{}), Error: restorerAbortOnAllErrors, SelectFilter: func(string, string, *restic.Node) (bool, bool) { return true, true }, sn: sn, @@ -252,10 +303,12 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { idx.Add(node.Inode, node.DeviceID, location) } - res.progress.AddFile(node.Size) - filerestorer.addFile(location, node.Content, int64(node.Size)) - - return nil + return res.withOverwriteCheck(node, target, location, false, func() error { + res.progress.AddFile(node.Size) + filerestorer.addFile(location, node.Content, int64(node.Size)) + res.trackFile(location) + return nil + }) }, }) if err != nil { @@ -274,14 +327,22 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { visitNode: func(node *restic.Node, target, location string) error { debug.Log("second pass, visitNode: restore node %q", location) if node.Type != "file" { - return res.restoreNodeTo(ctx, node, target, location) + return res.withOverwriteCheck(node, target, location, false, func() error { + return res.restoreNodeTo(ctx, node, target, location) + }) } if idx.Has(node.Inode, node.DeviceID) && idx.Value(node.Inode, node.DeviceID) != location { - return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.Value(node.Inode, node.DeviceID)), target, location) + return res.withOverwriteCheck(node, target, location, true, func() error { + return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.Value(node.Inode, node.DeviceID)), target, location) + }) } - return res.restoreNodeMetadataTo(node, target, location) + if res.hasRestoredFile(location) { + return res.restoreNodeMetadataTo(node, target, location) + } + // don't touch skipped files + return nil }, leaveDir: func(node *restic.Node, target, location string) error { err := res.restoreNodeMetadataTo(node, target, location) @@ -294,6 +355,54 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { return err } +func (res *Restorer) trackFile(location string) { + res.fileList[location] = struct{}{} +} + +func (res *Restorer) hasRestoredFile(location string) bool { + _, ok := res.fileList[location] + return ok +} + +func (res *Restorer) withOverwriteCheck(node *restic.Node, target, location string, isHardlink bool, cb func() error) error { + overwrite, err := shouldOverwrite(res.overwrite, node, target) + if err != nil { + return err + } else if !overwrite { + size := node.Size + if isHardlink { + size = 0 + } + res.progress.AddFile(size) + res.progress.AddProgress(location, size, size) + return nil + } + return cb() +} + +func shouldOverwrite(overwrite OverwriteBehavior, node *restic.Node, destination string) (bool, error) { + if overwrite == OverwriteAlways { + return true, nil + } + + fi, err := fs.Lstat(destination) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return true, nil + } + return false, err + } + + if overwrite == OverwriteIfNewer { + // return if node is newer + return node.ModTime.After(fi.ModTime()), nil + } else if overwrite == OverwriteNever { + // file exists + return false, nil + } + panic("unknown overwrite behavior") +} + // Snapshot returns the snapshot this restorer is configured to use. func (res *Restorer) Snapshot() *restic.Snapshot { return res.sn @@ -324,8 +433,8 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { defer close(work) _, err := res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ - visitNode: func(node *restic.Node, target, _ string) error { - if node.Type != "file" { + visitNode: func(node *restic.Node, target, location string) error { + if node.Type != "file" || !res.hasRestoredFile(location) { return nil } select { From 64b7b6b9759c42b1484e15d5c88f7b50d9ce8e9f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 13:43:57 +0200 Subject: [PATCH 363/893] restore/ui: refactor for extensibility --- internal/ui/restore/json.go | 24 ++++++++++++------------ internal/ui/restore/json_test.go | 6 +++--- internal/ui/restore/progress.go | 28 ++++++++++++++++------------ internal/ui/restore/progress_test.go | 24 ++++++++++++------------ internal/ui/restore/text.go | 22 +++++++++++----------- internal/ui/restore/text_test.go | 6 +++--- 6 files changed, 57 insertions(+), 53 deletions(-) diff --git a/internal/ui/restore/json.go b/internal/ui/restore/json.go index c1b95b00bf9..50d4fe0f76f 100644 --- a/internal/ui/restore/json.go +++ b/internal/ui/restore/json.go @@ -20,31 +20,31 @@ func (t *jsonPrinter) print(status interface{}) { t.terminal.Print(ui.ToJSONString(status)) } -func (t *jsonPrinter) Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) { +func (t *jsonPrinter) Update(p State, duration time.Duration) { status := statusUpdate{ MessageType: "status", SecondsElapsed: uint64(duration / time.Second), - TotalFiles: filesTotal, - FilesRestored: filesFinished, - TotalBytes: allBytesTotal, - BytesRestored: allBytesWritten, + TotalFiles: p.FilesTotal, + FilesRestored: p.FilesFinished, + TotalBytes: p.AllBytesTotal, + BytesRestored: p.AllBytesWritten, } - if allBytesTotal > 0 { - status.PercentDone = float64(allBytesWritten) / float64(allBytesTotal) + if p.AllBytesTotal > 0 { + status.PercentDone = float64(p.AllBytesWritten) / float64(p.AllBytesTotal) } t.print(status) } -func (t *jsonPrinter) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) { +func (t *jsonPrinter) Finish(p State, duration time.Duration) { status := summaryOutput{ MessageType: "summary", SecondsElapsed: uint64(duration / time.Second), - TotalFiles: filesTotal, - FilesRestored: filesFinished, - TotalBytes: allBytesTotal, - BytesRestored: allBytesWritten, + TotalFiles: p.FilesTotal, + FilesRestored: p.FilesFinished, + TotalBytes: p.AllBytesTotal, + BytesRestored: p.AllBytesWritten, } t.print(status) } diff --git a/internal/ui/restore/json_test.go b/internal/ui/restore/json_test.go index 7bcabb4d78b..7ce7b58f3c8 100644 --- a/internal/ui/restore/json_test.go +++ b/internal/ui/restore/json_test.go @@ -10,20 +10,20 @@ import ( func TestJSONPrintUpdate(t *testing.T) { term := &mockTerm{} printer := NewJSONProgress(term) - printer.Update(3, 11, 29, 47, 5*time.Second) + printer.Update(State{3, 11, 29, 47}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.output) } func TestJSONPrintSummaryOnSuccess(t *testing.T) { term := &mockTerm{} printer := NewJSONProgress(term) - printer.Finish(11, 11, 47, 47, 5*time.Second) + printer.Finish(State{11, 11, 47, 47}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"total_bytes\":47,\"bytes_restored\":47}\n"}, term.output) } func TestJSONPrintSummaryOnErrors(t *testing.T) { term := &mockTerm{} printer := NewJSONProgress(term) - printer.Finish(3, 11, 29, 47, 5*time.Second) + printer.Finish(State{3, 11, 29, 47}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.output) } diff --git a/internal/ui/restore/progress.go b/internal/ui/restore/progress.go index 0e120b6a668..5e501c4b38b 100644 --- a/internal/ui/restore/progress.go +++ b/internal/ui/restore/progress.go @@ -7,15 +7,19 @@ import ( "github.com/restic/restic/internal/ui/progress" ) +type State struct { + FilesFinished uint64 + FilesTotal uint64 + AllBytesWritten uint64 + AllBytesTotal uint64 +} + type Progress struct { updater progress.Updater m sync.Mutex progressInfoMap map[string]progressInfoEntry - filesFinished uint64 - filesTotal uint64 - allBytesWritten uint64 - allBytesTotal uint64 + s State started time.Time printer ProgressPrinter @@ -32,8 +36,8 @@ type term interface { } type ProgressPrinter interface { - Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) - Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) + Update(progress State, duration time.Duration) + Finish(progress State, duration time.Duration) } func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress { @@ -51,9 +55,9 @@ func (p *Progress) update(runtime time.Duration, final bool) { defer p.m.Unlock() if !final { - p.printer.Update(p.filesFinished, p.filesTotal, p.allBytesWritten, p.allBytesTotal, runtime) + p.printer.Update(p.s, runtime) } else { - p.printer.Finish(p.filesFinished, p.filesTotal, p.allBytesWritten, p.allBytesTotal, runtime) + p.printer.Finish(p.s, runtime) } } @@ -66,8 +70,8 @@ func (p *Progress) AddFile(size uint64) { p.m.Lock() defer p.m.Unlock() - p.filesTotal++ - p.allBytesTotal += size + p.s.FilesTotal++ + p.s.AllBytesTotal += size } // AddProgress accumulates the number of bytes written for a file @@ -86,10 +90,10 @@ func (p *Progress) AddProgress(name string, bytesWrittenPortion uint64, bytesTot entry.bytesWritten += bytesWrittenPortion p.progressInfoMap[name] = entry - p.allBytesWritten += bytesWrittenPortion + p.s.AllBytesWritten += bytesWrittenPortion if entry.bytesWritten == entry.bytesTotal { delete(p.progressInfoMap, name) - p.filesFinished++ + p.s.FilesFinished++ } } diff --git a/internal/ui/restore/progress_test.go b/internal/ui/restore/progress_test.go index 9e625aa20b7..728b743503f 100644 --- a/internal/ui/restore/progress_test.go +++ b/internal/ui/restore/progress_test.go @@ -8,7 +8,7 @@ import ( ) type printerTraceEntry struct { - filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64 + progress State duration time.Duration isFinished bool @@ -22,11 +22,11 @@ type mockPrinter struct { const mockFinishDuration = 42 * time.Second -func (p *mockPrinter) Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) { - p.trace = append(p.trace, printerTraceEntry{filesFinished, filesTotal, allBytesWritten, allBytesTotal, duration, false}) +func (p *mockPrinter) Update(progress State, duration time.Duration) { + p.trace = append(p.trace, printerTraceEntry{progress, duration, false}) } -func (p *mockPrinter) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, _ time.Duration) { - p.trace = append(p.trace, printerTraceEntry{filesFinished, filesTotal, allBytesWritten, allBytesTotal, mockFinishDuration, true}) +func (p *mockPrinter) Finish(progress State, _ time.Duration) { + p.trace = append(p.trace, printerTraceEntry{progress, mockFinishDuration, true}) } func testProgress(fn func(progress *Progress) bool) printerTrace { @@ -45,7 +45,7 @@ func TestNew(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{0, 0, 0, 0, 0, false}, + printerTraceEntry{State{0, 0, 0, 0}, 0, false}, }, result) } @@ -57,7 +57,7 @@ func TestAddFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{0, 1, 0, fileSize, 0, false}, + printerTraceEntry{State{0, 1, 0, fileSize}, 0, false}, }, result) } @@ -71,7 +71,7 @@ func TestFirstProgressOnAFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{0, 1, expectedBytesWritten, expectedBytesTotal, 0, false}, + printerTraceEntry{State{0, 1, expectedBytesWritten, expectedBytesTotal}, 0, false}, }, result) } @@ -86,7 +86,7 @@ func TestLastProgressOnAFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{1, 1, fileSize, fileSize, 0, false}, + printerTraceEntry{State{1, 1, fileSize, fileSize}, 0, false}, }, result) } @@ -102,7 +102,7 @@ func TestLastProgressOnLastFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{2, 2, 50 + fileSize, 50 + fileSize, 0, false}, + printerTraceEntry{State{2, 2, 50 + fileSize, 50 + fileSize}, 0, false}, }, result) } @@ -117,7 +117,7 @@ func TestSummaryOnSuccess(t *testing.T) { return true }) test.Equals(t, printerTrace{ - printerTraceEntry{2, 2, 50 + fileSize, 50 + fileSize, mockFinishDuration, true}, + printerTraceEntry{State{2, 2, 50 + fileSize, 50 + fileSize}, mockFinishDuration, true}, }, result) } @@ -132,6 +132,6 @@ func TestSummaryOnErrors(t *testing.T) { return true }) test.Equals(t, printerTrace{ - printerTraceEntry{1, 2, 50 + fileSize/2, 50 + fileSize, mockFinishDuration, true}, + printerTraceEntry{State{1, 2, 50 + fileSize/2, 50 + fileSize}, mockFinishDuration, true}, }, result) } diff --git a/internal/ui/restore/text.go b/internal/ui/restore/text.go index 2647bb28b1b..9da388e5172 100644 --- a/internal/ui/restore/text.go +++ b/internal/ui/restore/text.go @@ -17,30 +17,30 @@ func NewTextProgress(terminal term) ProgressPrinter { } } -func (t *textPrinter) Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) { +func (t *textPrinter) Update(p State, duration time.Duration) { timeLeft := ui.FormatDuration(duration) - formattedAllBytesWritten := ui.FormatBytes(allBytesWritten) - formattedAllBytesTotal := ui.FormatBytes(allBytesTotal) - allPercent := ui.FormatPercent(allBytesWritten, allBytesTotal) + formattedAllBytesWritten := ui.FormatBytes(p.AllBytesWritten) + formattedAllBytesTotal := ui.FormatBytes(p.AllBytesTotal) + allPercent := ui.FormatPercent(p.AllBytesWritten, p.AllBytesTotal) progress := fmt.Sprintf("[%s] %s %v files/dirs %s, total %v files/dirs %v", - timeLeft, allPercent, filesFinished, formattedAllBytesWritten, filesTotal, formattedAllBytesTotal) + timeLeft, allPercent, p.FilesFinished, formattedAllBytesWritten, p.FilesTotal, formattedAllBytesTotal) t.terminal.SetStatus([]string{progress}) } -func (t *textPrinter) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) { +func (t *textPrinter) Finish(p State, duration time.Duration) { t.terminal.SetStatus([]string{}) timeLeft := ui.FormatDuration(duration) - formattedAllBytesTotal := ui.FormatBytes(allBytesTotal) + formattedAllBytesTotal := ui.FormatBytes(p.AllBytesTotal) var summary string - if filesFinished == filesTotal && allBytesWritten == allBytesTotal { - summary = fmt.Sprintf("Summary: Restored %d files/dirs (%s) in %s", filesTotal, formattedAllBytesTotal, timeLeft) + if p.FilesFinished == p.FilesTotal && p.AllBytesWritten == p.AllBytesTotal { + summary = fmt.Sprintf("Summary: Restored %d files/dirs (%s) in %s", p.FilesTotal, formattedAllBytesTotal, timeLeft) } else { - formattedAllBytesWritten := ui.FormatBytes(allBytesWritten) + formattedAllBytesWritten := ui.FormatBytes(p.AllBytesWritten) summary = fmt.Sprintf("Summary: Restored %d / %d files/dirs (%s / %s) in %s", - filesFinished, filesTotal, formattedAllBytesWritten, formattedAllBytesTotal, timeLeft) + p.FilesFinished, p.FilesTotal, formattedAllBytesWritten, formattedAllBytesTotal, timeLeft) } t.terminal.Print(summary) diff --git a/internal/ui/restore/text_test.go b/internal/ui/restore/text_test.go index fc03904ff08..2a17239438b 100644 --- a/internal/ui/restore/text_test.go +++ b/internal/ui/restore/text_test.go @@ -22,20 +22,20 @@ func (m *mockTerm) SetStatus(lines []string) { func TestPrintUpdate(t *testing.T) { term := &mockTerm{} printer := NewTextProgress(term) - printer.Update(3, 11, 29, 47, 5*time.Second) + printer.Update(State{3, 11, 29, 47}, 5*time.Second) test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B"}, term.output) } func TestPrintSummaryOnSuccess(t *testing.T) { term := &mockTerm{} printer := NewTextProgress(term) - printer.Finish(11, 11, 47, 47, 5*time.Second) + printer.Finish(State{11, 11, 47, 47}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05"}, term.output) } func TestPrintSummaryOnErrors(t *testing.T) { term := &mockTerm{} printer := NewTextProgress(term) - printer.Finish(3, 11, 29, 47, 5*time.Second) + printer.Finish(State{3, 11, 29, 47}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 3 / 11 files/dirs (29 B / 47 B) in 0:05"}, term.output) } From e47e08a68803baf8b240a9abbeb540b492695b2d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 14:12:06 +0200 Subject: [PATCH 364/893] restorer: separately track skipped files --- internal/restorer/restorer.go | 3 +-- internal/restorer/restorer_unix_test.go | 27 +++++++++++-------------- internal/ui/restore/json.go | 8 ++++++++ internal/ui/restore/json_test.go | 20 +++++++++++++++--- internal/ui/restore/progress.go | 14 +++++++++++++ internal/ui/restore/progress_test.go | 26 +++++++++++++++++------- internal/ui/restore/text.go | 6 ++++++ internal/ui/restore/text_test.go | 20 +++++++++++++++--- 8 files changed, 94 insertions(+), 30 deletions(-) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 267b2898c74..ae622874b3d 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -373,8 +373,7 @@ func (res *Restorer) withOverwriteCheck(node *restic.Node, target, location stri if isHardlink { size = 0 } - res.progress.AddFile(size) - res.progress.AddProgress(location, size, size) + res.progress.AddSkippedFile(size) return nil } return cb() diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go index 95a83cdf775..97d2dd07d56 100644 --- a/internal/restorer/restorer_unix_test.go +++ b/internal/restorer/restorer_unix_test.go @@ -70,16 +70,13 @@ func getBlockCount(t *testing.T, filename string) int64 { } type printerMock struct { - filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64 + s restoreui.State } -func (p *printerMock) Update(_, _, _, _ uint64, _ time.Duration) { +func (p *printerMock) Update(_ restoreui.State, _ time.Duration) { } -func (p *printerMock) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, _ time.Duration) { - p.filesFinished = filesFinished - p.filesTotal = filesTotal - p.allBytesWritten = allBytesWritten - p.allBytesTotal = allBytesTotal +func (p *printerMock) Finish(s restoreui.State, _ time.Duration) { + p.s = s } func TestRestorerProgressBar(t *testing.T) { @@ -112,12 +109,12 @@ func TestRestorerProgressBar(t *testing.T) { rtest.OK(t, err) progress.Finish() - const filesFinished = 4 - const filesTotal = filesFinished - const allBytesWritten = 10 - const allBytesTotal = allBytesWritten - rtest.Assert(t, mock.filesFinished == filesFinished, "filesFinished: expected %v, got %v", filesFinished, mock.filesFinished) - rtest.Assert(t, mock.filesTotal == filesTotal, "filesTotal: expected %v, got %v", filesTotal, mock.filesTotal) - rtest.Assert(t, mock.allBytesWritten == allBytesWritten, "allBytesWritten: expected %v, got %v", allBytesWritten, mock.allBytesWritten) - rtest.Assert(t, mock.allBytesTotal == allBytesTotal, "allBytesTotal: expected %v, got %v", allBytesTotal, mock.allBytesTotal) + rtest.Equals(t, restoreui.State{ + FilesFinished: 4, + FilesTotal: 4, + FilesSkipped: 0, + AllBytesWritten: 10, + AllBytesTotal: 10, + AllBytesSkipped: 0, + }, mock.s) } diff --git a/internal/ui/restore/json.go b/internal/ui/restore/json.go index 50d4fe0f76f..512640a7a95 100644 --- a/internal/ui/restore/json.go +++ b/internal/ui/restore/json.go @@ -26,8 +26,10 @@ func (t *jsonPrinter) Update(p State, duration time.Duration) { SecondsElapsed: uint64(duration / time.Second), TotalFiles: p.FilesTotal, FilesRestored: p.FilesFinished, + FilesSkipped: p.FilesSkipped, TotalBytes: p.AllBytesTotal, BytesRestored: p.AllBytesWritten, + BytesSkipped: p.AllBytesSkipped, } if p.AllBytesTotal > 0 { @@ -43,8 +45,10 @@ func (t *jsonPrinter) Finish(p State, duration time.Duration) { SecondsElapsed: uint64(duration / time.Second), TotalFiles: p.FilesTotal, FilesRestored: p.FilesFinished, + FilesSkipped: p.FilesSkipped, TotalBytes: p.AllBytesTotal, BytesRestored: p.AllBytesWritten, + BytesSkipped: p.AllBytesSkipped, } t.print(status) } @@ -55,8 +59,10 @@ type statusUpdate struct { PercentDone float64 `json:"percent_done"` TotalFiles uint64 `json:"total_files,omitempty"` FilesRestored uint64 `json:"files_restored,omitempty"` + FilesSkipped uint64 `json:"files_skipped,omitempty"` TotalBytes uint64 `json:"total_bytes,omitempty"` BytesRestored uint64 `json:"bytes_restored,omitempty"` + BytesSkipped uint64 `json:"bytes_skipped,omitempty"` } type summaryOutput struct { @@ -64,6 +70,8 @@ type summaryOutput struct { SecondsElapsed uint64 `json:"seconds_elapsed,omitempty"` TotalFiles uint64 `json:"total_files,omitempty"` FilesRestored uint64 `json:"files_restored,omitempty"` + FilesSkipped uint64 `json:"files_skipped,omitempty"` TotalBytes uint64 `json:"total_bytes,omitempty"` BytesRestored uint64 `json:"bytes_restored,omitempty"` + BytesSkipped uint64 `json:"bytes_skipped,omitempty"` } diff --git a/internal/ui/restore/json_test.go b/internal/ui/restore/json_test.go index 7ce7b58f3c8..37983f7d7eb 100644 --- a/internal/ui/restore/json_test.go +++ b/internal/ui/restore/json_test.go @@ -10,20 +10,34 @@ import ( func TestJSONPrintUpdate(t *testing.T) { term := &mockTerm{} printer := NewJSONProgress(term) - printer.Update(State{3, 11, 29, 47}, 5*time.Second) + printer.Update(State{3, 11, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.output) } +func TestJSONPrintUpdateWithSkipped(t *testing.T) { + term := &mockTerm{} + printer := NewJSONProgress(term) + printer.Update(State{3, 11, 2, 29, 47, 59}, 5*time.Second) + test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":29,\"bytes_skipped\":59}\n"}, term.output) +} + func TestJSONPrintSummaryOnSuccess(t *testing.T) { term := &mockTerm{} printer := NewJSONProgress(term) - printer.Finish(State{11, 11, 47, 47}, 5*time.Second) + printer.Finish(State{11, 11, 0, 47, 47, 0}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"total_bytes\":47,\"bytes_restored\":47}\n"}, term.output) } func TestJSONPrintSummaryOnErrors(t *testing.T) { term := &mockTerm{} printer := NewJSONProgress(term) - printer.Finish(State{3, 11, 29, 47}, 5*time.Second) + printer.Finish(State{3, 11, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.output) } + +func TestJSONPrintSummaryOnSuccessWithSkipped(t *testing.T) { + term := &mockTerm{} + printer := NewJSONProgress(term) + printer.Finish(State{11, 11, 2, 47, 47, 59}, 5*time.Second) + test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":47,\"bytes_skipped\":59}\n"}, term.output) +} diff --git a/internal/ui/restore/progress.go b/internal/ui/restore/progress.go index 5e501c4b38b..7e8bcfd256e 100644 --- a/internal/ui/restore/progress.go +++ b/internal/ui/restore/progress.go @@ -10,8 +10,10 @@ import ( type State struct { FilesFinished uint64 FilesTotal uint64 + FilesSkipped uint64 AllBytesWritten uint64 AllBytesTotal uint64 + AllBytesSkipped uint64 } type Progress struct { @@ -97,6 +99,18 @@ func (p *Progress) AddProgress(name string, bytesWrittenPortion uint64, bytesTot } } +func (p *Progress) AddSkippedFile(size uint64) { + if p == nil { + return + } + + p.m.Lock() + defer p.m.Unlock() + + p.s.FilesSkipped++ + p.s.AllBytesSkipped += size +} + func (p *Progress) Finish() { p.updater.Done() } diff --git a/internal/ui/restore/progress_test.go b/internal/ui/restore/progress_test.go index 728b743503f..56f5f62ce64 100644 --- a/internal/ui/restore/progress_test.go +++ b/internal/ui/restore/progress_test.go @@ -45,7 +45,7 @@ func TestNew(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{0, 0, 0, 0}, 0, false}, + printerTraceEntry{State{0, 0, 0, 0, 0, 0}, 0, false}, }, result) } @@ -57,7 +57,7 @@ func TestAddFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{0, 1, 0, fileSize}, 0, false}, + printerTraceEntry{State{0, 1, 0, 0, fileSize, 0}, 0, false}, }, result) } @@ -71,7 +71,7 @@ func TestFirstProgressOnAFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{0, 1, expectedBytesWritten, expectedBytesTotal}, 0, false}, + printerTraceEntry{State{0, 1, 0, expectedBytesWritten, expectedBytesTotal, 0}, 0, false}, }, result) } @@ -86,7 +86,7 @@ func TestLastProgressOnAFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{1, 1, fileSize, fileSize}, 0, false}, + printerTraceEntry{State{1, 1, 0, fileSize, fileSize, 0}, 0, false}, }, result) } @@ -102,7 +102,7 @@ func TestLastProgressOnLastFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{2, 2, 50 + fileSize, 50 + fileSize}, 0, false}, + printerTraceEntry{State{2, 2, 0, 50 + fileSize, 50 + fileSize, 0}, 0, false}, }, result) } @@ -117,7 +117,7 @@ func TestSummaryOnSuccess(t *testing.T) { return true }) test.Equals(t, printerTrace{ - printerTraceEntry{State{2, 2, 50 + fileSize, 50 + fileSize}, mockFinishDuration, true}, + printerTraceEntry{State{2, 2, 0, 50 + fileSize, 50 + fileSize, 0}, mockFinishDuration, true}, }, result) } @@ -132,6 +132,18 @@ func TestSummaryOnErrors(t *testing.T) { return true }) test.Equals(t, printerTrace{ - printerTraceEntry{State{1, 2, 50 + fileSize/2, 50 + fileSize}, mockFinishDuration, true}, + printerTraceEntry{State{1, 2, 0, 50 + fileSize/2, 50 + fileSize, 0}, mockFinishDuration, true}, + }, result) +} + +func TestSkipFile(t *testing.T) { + fileSize := uint64(100) + + result := testProgress(func(progress *Progress) bool { + progress.AddSkippedFile(fileSize) + return true + }) + test.Equals(t, printerTrace{ + printerTraceEntry{State{0, 0, 1, 0, 0, fileSize}, mockFinishDuration, true}, }, result) } diff --git a/internal/ui/restore/text.go b/internal/ui/restore/text.go index 9da388e5172..28a6eb96560 100644 --- a/internal/ui/restore/text.go +++ b/internal/ui/restore/text.go @@ -24,6 +24,9 @@ func (t *textPrinter) Update(p State, duration time.Duration) { allPercent := ui.FormatPercent(p.AllBytesWritten, p.AllBytesTotal) progress := fmt.Sprintf("[%s] %s %v files/dirs %s, total %v files/dirs %v", timeLeft, allPercent, p.FilesFinished, formattedAllBytesWritten, p.FilesTotal, formattedAllBytesTotal) + if p.FilesSkipped > 0 { + progress += fmt.Sprintf(", skipped %v files/dirs %v", p.FilesSkipped, ui.FormatBytes(p.AllBytesSkipped)) + } t.terminal.SetStatus([]string{progress}) } @@ -42,6 +45,9 @@ func (t *textPrinter) Finish(p State, duration time.Duration) { summary = fmt.Sprintf("Summary: Restored %d / %d files/dirs (%s / %s) in %s", p.FilesFinished, p.FilesTotal, formattedAllBytesWritten, formattedAllBytesTotal, timeLeft) } + if p.FilesSkipped > 0 { + summary += fmt.Sprintf(", skipped %v files/dirs %v", p.FilesSkipped, ui.FormatBytes(p.AllBytesSkipped)) + } t.terminal.Print(summary) } diff --git a/internal/ui/restore/text_test.go b/internal/ui/restore/text_test.go index 2a17239438b..3b776a7dff4 100644 --- a/internal/ui/restore/text_test.go +++ b/internal/ui/restore/text_test.go @@ -22,20 +22,34 @@ func (m *mockTerm) SetStatus(lines []string) { func TestPrintUpdate(t *testing.T) { term := &mockTerm{} printer := NewTextProgress(term) - printer.Update(State{3, 11, 29, 47}, 5*time.Second) + printer.Update(State{3, 11, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B"}, term.output) } +func TestPrintUpdateWithSkipped(t *testing.T) { + term := &mockTerm{} + printer := NewTextProgress(term) + printer.Update(State{3, 11, 2, 29, 47, 59}, 5*time.Second) + test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B, skipped 2 files/dirs 59 B"}, term.output) +} + func TestPrintSummaryOnSuccess(t *testing.T) { term := &mockTerm{} printer := NewTextProgress(term) - printer.Finish(State{11, 11, 47, 47}, 5*time.Second) + printer.Finish(State{11, 11, 0, 47, 47, 0}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05"}, term.output) } func TestPrintSummaryOnErrors(t *testing.T) { term := &mockTerm{} printer := NewTextProgress(term) - printer.Finish(State{3, 11, 29, 47}, 5*time.Second) + printer.Finish(State{3, 11, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 3 / 11 files/dirs (29 B / 47 B) in 0:05"}, term.output) } + +func TestPrintSummaryOnSuccessWithSkipped(t *testing.T) { + term := &mockTerm{} + printer := NewTextProgress(term) + printer.Finish(State{11, 11, 2, 47, 47, 59}, 5*time.Second) + test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05, skipped 2 files/dirs 59 B"}, term.output) +} From ba53a2abb5220a96b53598d77e9c45e960e7ae7e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 15:30:30 +0200 Subject: [PATCH 365/893] test overwrite behavior --- internal/restorer/restorer_test.go | 89 ++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 64e10a6e8ab..635c30ee664 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -893,3 +893,92 @@ func TestRestorerSparseFiles(t *testing.T) { t.Logf("wrote %d zeros as %d blocks, %.1f%% sparse", len(zeros), blocks, 100*sparsity) } + +func TestRestorerOverwriteBehavior(t *testing.T) { + baseTime := time.Now() + baseSnapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n", ModTime: baseTime}, + "dirtest": Dir{ + Nodes: map[string]Node{ + "file": File{Data: "content: file\n", ModTime: baseTime}, + }, + ModTime: baseTime, + }, + }, + } + overwriteSnapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: new\n", ModTime: baseTime.Add(time.Second)}, + "dirtest": Dir{ + Nodes: map[string]Node{ + "file": File{Data: "content: file2\n", ModTime: baseTime.Add(-time.Second)}, + }, + }, + }, + } + + var tests = []struct { + Overwrite OverwriteBehavior + Files map[string]string + }{ + { + Overwrite: OverwriteAlways, + Files: map[string]string{ + "foo": "content: new\n", + "dirtest/file": "content: file2\n", + }, + }, + { + Overwrite: OverwriteIfNewer, + Files: map[string]string{ + "foo": "content: new\n", + "dirtest/file": "content: file\n", + }, + }, + { + Overwrite: OverwriteNever, + Files: map[string]string{ + "foo": "content: foo\n", + "dirtest/file": "content: file\n", + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // base snapshot + sn, id := saveSnapshot(t, repo, baseSnapshot, noopGetGenericAttributes) + t.Logf("base snapshot saved as %v", id.Str()) + + res := NewRestorer(repo, sn, Options{}) + rtest.OK(t, res.RestoreTo(ctx, tempdir)) + + // overwrite snapshot + sn, id = saveSnapshot(t, repo, overwriteSnapshot, noopGetGenericAttributes) + t.Logf("overwrite snapshot saved as %v", id.Str()) + res = NewRestorer(repo, sn, Options{Overwrite: test.Overwrite}) + rtest.OK(t, res.RestoreTo(ctx, tempdir)) + + _, err := res.VerifyFiles(ctx, tempdir) + rtest.OK(t, err) + + for filename, content := range test.Files { + data, err := os.ReadFile(filepath.Join(tempdir, filepath.FromSlash(filename))) + if err != nil { + t.Errorf("unable to read file %v: %v", filename, err) + continue + } + + if !bytes.Equal(data, []byte(content)) { + t.Errorf("file %v has wrong content: want %q, got %q", filename, content, data) + } + } + }) + } +} From e1ec60c2ee4eb66bcd69f4f745ebc1fa1b98ee38 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 16:11:18 +0200 Subject: [PATCH 366/893] document restore --overwrite --- doc/050_restore.rst | 9 +++++++++ doc/075_scripting.rst | 9 ++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/doc/050_restore.rst b/doc/050_restore.rst index ce17a1cf736..b5eb4134927 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -85,6 +85,15 @@ disk space. Note that the exact location of the holes can differ from those in the original file, as their location is determined while restoring and is not stored explicitly. +Restoring in-place +------------------ + +By default, the ``restore`` command overwrites already existing files in the target +directory. This behavior can be configured via the ``--overwrite`` option. The +default is ``--overwrite always``. To only overwrite existing files if the file in +the snapshot is newer, use ``--overwrite if-newer``. To never overwrite existing files, +use ``--overwrite never``. + Restore using mount =================== diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index e413e349f90..d40f7c97608 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -502,11 +502,14 @@ Status +----------------------+------------------------------------------------------------+ |``files_restored`` | Files restored | +----------------------+------------------------------------------------------------+ +|``files_skipped`` | Files skipped due to overwrite setting | ++----------------------+------------------------------------------------------------+ |``total_bytes`` | Total number of bytes in restore set | +----------------------+------------------------------------------------------------+ |``bytes_restored`` | Number of bytes restored | +----------------------+------------------------------------------------------------+ - +|``bytes_skipped`` | Total size of skipped files | ++----------------------+------------------------------------------------------------+ Summary ^^^^^^^ @@ -520,10 +523,14 @@ Summary +----------------------+------------------------------------------------------------+ |``files_restored`` | Files restored | +----------------------+------------------------------------------------------------+ +|``files_skipped`` | Files skipped due to overwrite setting | ++----------------------+------------------------------------------------------------+ |``total_bytes`` | Total number of bytes in restore set | +----------------------+------------------------------------------------------------+ |``bytes_restored`` | Number of bytes restored | +----------------------+------------------------------------------------------------+ +|``bytes_skipped`` | Total size of skipped files | ++----------------------+------------------------------------------------------------+ snapshots From 105261e12e58b95e66f3bce5d5b52cad7f03c63c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 16:25:42 +0200 Subject: [PATCH 367/893] add changelog for restore --overwrite --- changelog/unreleased/issue-4817 | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 changelog/unreleased/issue-4817 diff --git a/changelog/unreleased/issue-4817 b/changelog/unreleased/issue-4817 new file mode 100644 index 00000000000..b8ffc35ca00 --- /dev/null +++ b/changelog/unreleased/issue-4817 @@ -0,0 +1,11 @@ +Enhancement: Make overwrite behavior of `restore` customizable + +The `restore` command now supports an `--overwrite` option to configure whether +already existing files are overwritten. The default is `--overwrite always`, +which overwrites existing files. `--overwrite if-newer` only restores files +from the snapshot that are newer than the local state. And `--overwrite never` +does not modify existing files. + +https://github.com/restic/restic/issues/4817 +https://github.com/restic/restic/issues/200 +https://github.com/restic/restic/pull/4837 From 7f7c995977b9dfd420cee3ce843ae40a171e661a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 18:30:51 +0200 Subject: [PATCH 368/893] fix linter warnings --- internal/restorer/restorer.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index ae622874b3d..84b7869979a 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -303,7 +303,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { idx.Add(node.Inode, node.DeviceID, location) } - return res.withOverwriteCheck(node, target, location, false, func() error { + return res.withOverwriteCheck(node, target, false, func() error { res.progress.AddFile(node.Size) filerestorer.addFile(location, node.Content, int64(node.Size)) res.trackFile(location) @@ -327,13 +327,13 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { visitNode: func(node *restic.Node, target, location string) error { debug.Log("second pass, visitNode: restore node %q", location) if node.Type != "file" { - return res.withOverwriteCheck(node, target, location, false, func() error { + return res.withOverwriteCheck(node, target, false, func() error { return res.restoreNodeTo(ctx, node, target, location) }) } if idx.Has(node.Inode, node.DeviceID) && idx.Value(node.Inode, node.DeviceID) != location { - return res.withOverwriteCheck(node, target, location, true, func() error { + return res.withOverwriteCheck(node, target, true, func() error { return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.Value(node.Inode, node.DeviceID)), target, location) }) } @@ -364,7 +364,7 @@ func (res *Restorer) hasRestoredFile(location string) bool { return ok } -func (res *Restorer) withOverwriteCheck(node *restic.Node, target, location string, isHardlink bool, cb func() error) error { +func (res *Restorer) withOverwriteCheck(node *restic.Node, target string, isHardlink bool, cb func() error) error { overwrite, err := shouldOverwrite(res.overwrite, node, target) if err != nil { return err From 8aa3ab6ef8df4ef4c212672e0c1e3afed3875756 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 22 May 2024 17:36:52 +0200 Subject: [PATCH 369/893] restore: check file type in verify --- internal/restorer/restorer.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 84b7869979a..b7d40f712b9 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -473,7 +473,7 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { // Reusing buffers prevents the verifier goroutines allocating all of RAM and // flushing the filesystem cache (at least on Linux). func (res *Restorer) verifyFile(target string, node *restic.Node, buf []byte) ([]byte, error) { - f, err := os.Open(target) + f, err := os.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) if err != nil { return buf, err } @@ -485,6 +485,8 @@ func (res *Restorer) verifyFile(target string, node *restic.Node, buf []byte) ([ switch { case err != nil: return buf, err + case !fi.Mode().IsRegular(): + return buf, errors.Errorf("Expected %s to be a regular file", target) case int64(node.Size) != fi.Size(): return buf, errors.Errorf("Invalid file size for %s: expected %d, got %d", target, node.Size, fi.Size()) From 8733cd21b9444f23623634f23ad621bfebfd0e31 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 21:47:50 +0200 Subject: [PATCH 370/893] restore: fix file encryption on windows The assumption that a file is not readonly is no longer valid. --- internal/restic/node_windows.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 9117c6a1071..0d96bdb98df 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -275,17 +275,20 @@ func fixEncryptionAttribute(path string, attrs *uint32, pathPointer *uint16) (er // File should be encrypted. err = encryptFile(pathPointer) if err != nil { - if fs.IsAccessDenied(err) { + if fs.IsAccessDenied(err) || errors.Is(err, windows.ERROR_FILE_READ_ONLY) { // If existing file already has readonly or system flag, encrypt file call fails. - // We have already cleared readonly flag, clearing system flag if needed. // The readonly and system flags will be set again at the end of this func if they are needed. + err = fs.ResetPermissions(path) + if err != nil { + return fmt.Errorf("failed to encrypt file: failed to reset permissions: %s : %v", path, err) + } err = fs.ClearSystem(path) if err != nil { return fmt.Errorf("failed to encrypt file: failed to clear system flag: %s : %v", path, err) } err = encryptFile(pathPointer) if err != nil { - return fmt.Errorf("failed to encrypt file: %s : %v", path, err) + return fmt.Errorf("failed retry to encrypt file: %s : %v", path, err) } } else { return fmt.Errorf("failed to encrypt file: %s : %v", path, err) @@ -300,17 +303,20 @@ func fixEncryptionAttribute(path string, attrs *uint32, pathPointer *uint16) (er // File should not be encrypted, but its already encrypted. Decrypt it. err = decryptFile(pathPointer) if err != nil { - if fs.IsAccessDenied(err) { + if fs.IsAccessDenied(err) || errors.Is(err, windows.ERROR_FILE_READ_ONLY) { // If existing file already has readonly or system flag, decrypt file call fails. - // We have already cleared readonly flag, clearing system flag if needed. // The readonly and system flags will be set again after this func if they are needed. + err = fs.ResetPermissions(path) + if err != nil { + return fmt.Errorf("failed to encrypt file: failed to reset permissions: %s : %v", path, err) + } err = fs.ClearSystem(path) if err != nil { return fmt.Errorf("failed to decrypt file: failed to clear system flag: %s : %v", path, err) } err = decryptFile(pathPointer) if err != nil { - return fmt.Errorf("failed to decrypt file: %s : %v", path, err) + return fmt.Errorf("failed retry to decrypt file: %s : %v", path, err) } } else { return fmt.Errorf("failed to decrypt file: %s : %v", path, err) From 62e0e5e8d8af2c04fef6b3107931de2a2d0ee4be Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 17:46:59 +0200 Subject: [PATCH 371/893] restore: cleanup options handling --- internal/restorer/restorer.go | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index b7d40f712b9..2c64ee75ca7 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -18,11 +18,9 @@ import ( // Restorer is used to restore a snapshot to a directory. type Restorer struct { - repo restic.Repository - sn *restic.Snapshot - sparse bool - progress *restoreui.Progress - overwrite OverwriteBehavior + repo restic.Repository + sn *restic.Snapshot + opts Options fileList map[string]struct{} @@ -87,9 +85,7 @@ func (c *OverwriteBehavior) Type() string { func NewRestorer(repo restic.Repository, sn *restic.Snapshot, opts Options) *Restorer { r := &Restorer{ repo: repo, - sparse: opts.Sparse, - progress: opts.Progress, - overwrite: opts.Overwrite, + opts: opts, fileList: make(map[string]struct{}), Error: restorerAbortOnAllErrors, SelectFilter: func(string, string, *restic.Node) (bool, bool) { return true, true }, @@ -224,7 +220,7 @@ func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, targe return err } - res.progress.AddProgress(location, 0, 0) + res.opts.Progress.AddProgress(location, 0, 0) return res.restoreNodeMetadataTo(node, target, location) } @@ -246,7 +242,7 @@ func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location return errors.WithStack(err) } - res.progress.AddProgress(location, 0, 0) + res.opts.Progress.AddProgress(location, 0, 0) // TODO investigate if hardlinks have separate metadata on any supported system return res.restoreNodeMetadataTo(node, path, location) @@ -265,7 +261,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { idx := NewHardlinkIndex[string]() filerestorer := newFileRestorer(dst, res.repo.LoadBlobsFromPack, res.repo.LookupBlob, - res.repo.Connections(), res.sparse, res.progress) + res.repo.Connections(), res.opts.Sparse, res.opts.Progress) filerestorer.Error = res.Error debug.Log("first pass for %q", dst) @@ -274,7 +270,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { _, err = res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ enterDir: func(_ *restic.Node, target, location string) error { debug.Log("first pass, enterDir: mkdir %q, leaveDir should restore metadata", location) - res.progress.AddFile(0) + res.opts.Progress.AddFile(0) // create dir with default permissions // #leaveDir restores dir metadata after visiting all children return fs.MkdirAll(target, 0700) @@ -290,21 +286,21 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { } if node.Type != "file" { - res.progress.AddFile(0) + res.opts.Progress.AddFile(0) return nil } if node.Links > 1 { if idx.Has(node.Inode, node.DeviceID) { // a hardlinked file does not increase the restore size - res.progress.AddFile(0) + res.opts.Progress.AddFile(0) return nil } idx.Add(node.Inode, node.DeviceID, location) } return res.withOverwriteCheck(node, target, false, func() error { - res.progress.AddFile(node.Size) + res.opts.Progress.AddFile(node.Size) filerestorer.addFile(location, node.Content, int64(node.Size)) res.trackFile(location) return nil @@ -347,7 +343,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { leaveDir: func(node *restic.Node, target, location string) error { err := res.restoreNodeMetadataTo(node, target, location) if err == nil { - res.progress.AddProgress(location, 0, 0) + res.opts.Progress.AddProgress(location, 0, 0) } return err }, @@ -365,7 +361,7 @@ func (res *Restorer) hasRestoredFile(location string) bool { } func (res *Restorer) withOverwriteCheck(node *restic.Node, target string, isHardlink bool, cb func() error) error { - overwrite, err := shouldOverwrite(res.overwrite, node, target) + overwrite, err := shouldOverwrite(res.opts.Overwrite, node, target) if err != nil { return err } else if !overwrite { @@ -373,7 +369,7 @@ func (res *Restorer) withOverwriteCheck(node *restic.Node, target string, isHard if isHardlink { size = 0 } - res.progress.AddSkippedFile(size) + res.opts.Progress.AddSkippedFile(size) return nil } return cb() From a66658b4c9ac871ef5360e8937144bd4e4ee7b76 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 17:06:08 +0200 Subject: [PATCH 372/893] restore: only restore changed file parts For files that should be overwritten, first check whether their content matches the snapshot and only restore diverging parts. --- internal/restorer/filerestorer.go | 19 ++--- internal/restorer/fileswriter.go | 29 ++++--- internal/restorer/restorer.go | 124 ++++++++++++++++++++++-------- 3 files changed, 122 insertions(+), 50 deletions(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index 600a702b2b1..1e67debeb98 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -26,6 +26,7 @@ type fileInfo struct { size int64 location string // file on local filesystem relative to restorer basedir blobs interface{} // blobs of the file + state *fileState } type fileBlobInfo struct { @@ -80,25 +81,25 @@ func newFileRestorer(dst string, } } -func (r *fileRestorer) addFile(location string, content restic.IDs, size int64) { - r.files = append(r.files, &fileInfo{location: location, blobs: content, size: size}) +func (r *fileRestorer) addFile(location string, content restic.IDs, size int64, state *fileState) { + r.files = append(r.files, &fileInfo{location: location, blobs: content, size: size, state: state}) } func (r *fileRestorer) targetPath(location string) string { return filepath.Join(r.dst, location) } -func (r *fileRestorer) forEachBlob(blobIDs []restic.ID, fn func(packID restic.ID, packBlob restic.Blob)) error { +func (r *fileRestorer) forEachBlob(blobIDs []restic.ID, fn func(packID restic.ID, packBlob restic.Blob, idx int)) error { if len(blobIDs) == 0 { return nil } - for _, blobID := range blobIDs { + for i, blobID := range blobIDs { packs := r.idx(restic.DataBlob, blobID) if len(packs) == 0 { return errors.Errorf("Unknown blob %s", blobID.String()) } - fn(packs[0].PackID, packs[0].Blob) + fn(packs[0].PackID, packs[0].Blob, i) } return nil @@ -128,8 +129,8 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { packsMap = make(map[restic.ID][]fileBlobInfo) } fileOffset := int64(0) - err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob) { - if largeFile { + err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) { + if largeFile && !file.state.HasMatchingBlob(idx) { packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset}) fileOffset += int64(blob.DataLength()) } @@ -232,8 +233,8 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error { } if fileBlobs, ok := file.blobs.(restic.IDs); ok { fileOffset := int64(0) - err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob) { - if packID.Equal(pack.id) { + err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) { + if packID.Equal(pack.id) && !file.state.HasMatchingBlob(idx) { addBlob(blob, fileOffset) } fileOffset += int64(blob.DataLength()) diff --git a/internal/restorer/fileswriter.go b/internal/restorer/fileswriter.go index 5e4931c63e9..50f06c83db0 100644 --- a/internal/restorer/fileswriter.go +++ b/internal/restorer/fileswriter.go @@ -40,9 +40,8 @@ func newFilesWriter(count int) *filesWriter { } func createFile(path string, createSize int64, sparse bool) (*os.File, error) { - var f *os.File - var err error - if f, err = os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600); err != nil { + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { if !fs.IsAccessDenied(err) { return nil, err } @@ -54,19 +53,31 @@ func createFile(path string, createSize int64, sparse bool) (*os.File, error) { if err = fs.ResetPermissions(path); err != nil { return nil, err } - if f, err = os.OpenFile(path, os.O_TRUNC|os.O_WRONLY, 0600); err != nil { + if f, err = os.OpenFile(path, os.O_WRONLY, 0600); err != nil { return nil, err } } - if createSize > 0 { - if sparse { - err = truncateSparse(f, createSize) + if sparse { + err = truncateSparse(f, createSize) + if err != nil { + _ = f.Close() + return nil, err + } + } else { + info, err := f.Stat() + if err != nil { + _ = f.Close() + return nil, err + } + if info.Size() > createSize { + // file is too long must shorten it + err = f.Truncate(createSize) if err != nil { _ = f.Close() return nil, err } - } else { + } else if createSize > 0 { err := fs.PreallocateFile(f, createSize) if err != nil { // Just log the preallocate error but don't let it cause the restore process to fail. @@ -78,7 +89,7 @@ func createFile(path string, createSize int64, sparse bool) (*os.File, error) { } } } - return f, err + return f, nil } func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, createSize int64, sparse bool) error { diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 2c64ee75ca7..88c3c597208 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -3,6 +3,7 @@ package restorer import ( "context" "fmt" + "io" "os" "path/filepath" "sync/atomic" @@ -22,7 +23,7 @@ type Restorer struct { sn *restic.Snapshot opts Options - fileList map[string]struct{} + fileList map[string]bool Error func(location string, err error) error Warn func(message string) @@ -86,7 +87,7 @@ func NewRestorer(repo restic.Repository, sn *restic.Snapshot, opts Options) *Res r := &Restorer{ repo: repo, opts: opts, - fileList: make(map[string]struct{}), + fileList: make(map[string]bool), Error: restorerAbortOnAllErrors, SelectFilter: func(string, string, *restic.Node) (bool, bool) { return true, true }, sn: sn, @@ -266,6 +267,8 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { debug.Log("first pass for %q", dst) + var buf []byte + // first tree pass: create directories and collect all files to restore _, err = res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ enterDir: func(_ *restic.Node, target, location string) error { @@ -299,12 +302,17 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { idx.Add(node.Inode, node.DeviceID, location) } - return res.withOverwriteCheck(node, target, false, func() error { - res.opts.Progress.AddFile(node.Size) - filerestorer.addFile(location, node.Content, int64(node.Size)) - res.trackFile(location) + buf, err = res.withOverwriteCheck(node, target, false, buf, func(updateMetadataOnly bool, matches *fileState) error { + if updateMetadataOnly { + res.opts.Progress.AddSkippedFile(node.Size) + } else { + res.opts.Progress.AddFile(node.Size) + filerestorer.addFile(location, node.Content, int64(node.Size), matches) + } + res.trackFile(location, updateMetadataOnly) return nil }) + return err }, }) if err != nil { @@ -323,18 +331,20 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { visitNode: func(node *restic.Node, target, location string) error { debug.Log("second pass, visitNode: restore node %q", location) if node.Type != "file" { - return res.withOverwriteCheck(node, target, false, func() error { + _, err := res.withOverwriteCheck(node, target, false, nil, func(_ bool, _ *fileState) error { return res.restoreNodeTo(ctx, node, target, location) }) + return err } if idx.Has(node.Inode, node.DeviceID) && idx.Value(node.Inode, node.DeviceID) != location { - return res.withOverwriteCheck(node, target, true, func() error { + _, err := res.withOverwriteCheck(node, target, true, nil, func(_ bool, _ *fileState) error { return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.Value(node.Inode, node.DeviceID)), target, location) }) + return err } - if res.hasRestoredFile(location) { + if _, ok := res.hasRestoredFile(location); ok { return res.restoreNodeMetadataTo(node, target, location) } // don't touch skipped files @@ -351,28 +361,38 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { return err } -func (res *Restorer) trackFile(location string) { - res.fileList[location] = struct{}{} +func (res *Restorer) trackFile(location string, metadataOnly bool) { + res.fileList[location] = metadataOnly } -func (res *Restorer) hasRestoredFile(location string) bool { - _, ok := res.fileList[location] - return ok +func (res *Restorer) hasRestoredFile(location string) (metadataOnly bool, ok bool) { + metadataOnly, ok = res.fileList[location] + return metadataOnly, ok } -func (res *Restorer) withOverwriteCheck(node *restic.Node, target string, isHardlink bool, cb func() error) error { +func (res *Restorer) withOverwriteCheck(node *restic.Node, target string, isHardlink bool, buf []byte, cb func(updateMetadataOnly bool, matches *fileState) error) ([]byte, error) { overwrite, err := shouldOverwrite(res.opts.Overwrite, node, target) if err != nil { - return err + return buf, err } else if !overwrite { size := node.Size if isHardlink { size = 0 } res.opts.Progress.AddSkippedFile(size) - return nil + return buf, nil } - return cb() + + var matches *fileState + updateMetadataOnly := false + if node.Type == "file" && !isHardlink { + // if a file fails to verify, then matches is nil which results in restoring from scratch + matches, buf, _ = res.verifyFile(target, node, false, buf) + // skip files that are already correct completely + updateMetadataOnly = !matches.NeedsRestore() + } + + return buf, cb(updateMetadataOnly, matches) } func shouldOverwrite(overwrite OverwriteBehavior, node *restic.Node, destination string) (bool, error) { @@ -429,7 +449,10 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { _, err := res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ visitNode: func(node *restic.Node, target, location string) error { - if node.Type != "file" || !res.hasRestoredFile(location) { + if node.Type != "file" { + return nil + } + if metadataOnly, ok := res.hasRestoredFile(location); !ok || metadataOnly { return nil } select { @@ -447,7 +470,7 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { g.Go(func() (err error) { var buf []byte for job := range work { - buf, err = res.verifyFile(job.path, job.node, buf) + _, buf, err = res.verifyFile(job.path, job.node, true, buf) if err != nil { err = res.Error(job.path, err) } @@ -463,36 +486,68 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { return int(nchecked), g.Wait() } +type fileState struct { + blobMatches []bool + sizeMatches bool +} + +func (s *fileState) NeedsRestore() bool { + if s == nil { + return true + } + if !s.sizeMatches { + return true + } + for _, match := range s.blobMatches { + if !match { + return true + } + } + return false +} + +func (s *fileState) HasMatchingBlob(i int) bool { + if s == nil || s.blobMatches == nil { + return false + } + return i < len(s.blobMatches) && s.blobMatches[i] +} + // Verify that the file target has the contents of node. // // buf and the first return value are scratch space, passed around for reuse. // Reusing buffers prevents the verifier goroutines allocating all of RAM and // flushing the filesystem cache (at least on Linux). -func (res *Restorer) verifyFile(target string, node *restic.Node, buf []byte) ([]byte, error) { +func (res *Restorer) verifyFile(target string, node *restic.Node, failFast bool, buf []byte) (*fileState, []byte, error) { f, err := os.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) if err != nil { - return buf, err + return nil, buf, err } defer func() { _ = f.Close() }() fi, err := f.Stat() + sizeMatches := true switch { case err != nil: - return buf, err + return nil, buf, err case !fi.Mode().IsRegular(): - return buf, errors.Errorf("Expected %s to be a regular file", target) + return nil, buf, errors.Errorf("Expected %s to be a regular file", target) case int64(node.Size) != fi.Size(): - return buf, errors.Errorf("Invalid file size for %s: expected %d, got %d", - target, node.Size, fi.Size()) + if failFast { + return nil, buf, errors.Errorf("Invalid file size for %s: expected %d, got %d", + target, node.Size, fi.Size()) + } + sizeMatches = false } + matches := make([]bool, len(node.Content)) var offset int64 - for _, blobID := range node.Content { + for i, blobID := range node.Content { length, found := res.repo.LookupBlobSize(restic.DataBlob, blobID) if !found { - return buf, errors.Errorf("Unable to fetch blob %s", blobID) + return nil, buf, errors.Errorf("Unable to fetch blob %s", blobID) } if length > uint(cap(buf)) { @@ -501,16 +556,21 @@ func (res *Restorer) verifyFile(target string, node *restic.Node, buf []byte) ([ buf = buf[:length] _, err = f.ReadAt(buf, offset) + if err == io.EOF && !failFast { + sizeMatches = false + break + } if err != nil { - return buf, err + return nil, buf, err } - if !blobID.Equal(restic.Hash(buf)) { - return buf, errors.Errorf( + matches[i] = blobID.Equal(restic.Hash(buf)) + if failFast && !matches[i] { + return nil, buf, errors.Errorf( "Unexpected content in %s, starting at offset %d", target, offset) } offset += int64(length) } - return buf, nil + return &fileState{matches, sizeMatches}, buf, nil } From 5c3709e17a692b69351c1dc2ae598eaca7cb22c3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 17:34:48 +0200 Subject: [PATCH 373/893] restore: add --overwrite=if-changed to skip files if their mtime&size matches --overwrite=always still checks the file content --- cmd/restic/cmd_restore.go | 2 +- internal/restorer/restorer.go | 27 +++++++++++++++++++-------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 361eb59a8db..f39db6a7619 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -66,7 +66,7 @@ func init() { initSingleSnapshotFilter(flags, &restoreOptions.SnapshotFilter) flags.BoolVar(&restoreOptions.Sparse, "sparse", false, "restore files as sparse") flags.BoolVar(&restoreOptions.Verify, "verify", false, "verify restored files content") - flags.Var(&restoreOptions.Overwrite, "overwrite", "overwrite behavior, one of (always|if-newer|never) (default: always)") + flags.Var(&restoreOptions.Overwrite, "overwrite", "overwrite behavior, one of (always|if-changed|if-newer|never) (default: always)") } func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 88c3c597208..6002d6f0e71 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -42,10 +42,13 @@ type OverwriteBehavior int // Constants for different overwrite behavior const ( - OverwriteAlways OverwriteBehavior = 0 - OverwriteIfNewer OverwriteBehavior = 1 - OverwriteNever OverwriteBehavior = 2 - OverwriteInvalid OverwriteBehavior = 3 + OverwriteAlways OverwriteBehavior = iota + // OverwriteIfChanged is like OverwriteAlways except that it skips restoring the content + // of files with matching size&mtime. Metatdata is always restored. + OverwriteIfChanged + OverwriteIfNewer + OverwriteNever + OverwriteInvalid ) // Set implements the method needed for pflag command flag parsing. @@ -53,6 +56,8 @@ func (c *OverwriteBehavior) Set(s string) error { switch s { case "always": *c = OverwriteAlways + case "if-changed": + *c = OverwriteIfChanged case "if-newer": *c = OverwriteIfNewer case "never": @@ -69,6 +74,8 @@ func (c *OverwriteBehavior) String() string { switch *c { case OverwriteAlways: return "always" + case OverwriteIfChanged: + return "if-changed" case OverwriteIfNewer: return "if-newer" case OverwriteNever: @@ -387,7 +394,7 @@ func (res *Restorer) withOverwriteCheck(node *restic.Node, target string, isHard updateMetadataOnly := false if node.Type == "file" && !isHardlink { // if a file fails to verify, then matches is nil which results in restoring from scratch - matches, buf, _ = res.verifyFile(target, node, false, buf) + matches, buf, _ = res.verifyFile(target, node, false, res.opts.Overwrite == OverwriteIfChanged, buf) // skip files that are already correct completely updateMetadataOnly = !matches.NeedsRestore() } @@ -396,7 +403,7 @@ func (res *Restorer) withOverwriteCheck(node *restic.Node, target string, isHard } func shouldOverwrite(overwrite OverwriteBehavior, node *restic.Node, destination string) (bool, error) { - if overwrite == OverwriteAlways { + if overwrite == OverwriteAlways || overwrite == OverwriteIfChanged { return true, nil } @@ -470,7 +477,7 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { g.Go(func() (err error) { var buf []byte for job := range work { - _, buf, err = res.verifyFile(job.path, job.node, true, buf) + _, buf, err = res.verifyFile(job.path, job.node, true, false, buf) if err != nil { err = res.Error(job.path, err) } @@ -518,7 +525,7 @@ func (s *fileState) HasMatchingBlob(i int) bool { // buf and the first return value are scratch space, passed around for reuse. // Reusing buffers prevents the verifier goroutines allocating all of RAM and // flushing the filesystem cache (at least on Linux). -func (res *Restorer) verifyFile(target string, node *restic.Node, failFast bool, buf []byte) (*fileState, []byte, error) { +func (res *Restorer) verifyFile(target string, node *restic.Node, failFast bool, trustMtime bool, buf []byte) (*fileState, []byte, error) { f, err := os.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) if err != nil { return nil, buf, err @@ -542,6 +549,10 @@ func (res *Restorer) verifyFile(target string, node *restic.Node, failFast bool, sizeMatches = false } + if trustMtime && fi.ModTime().Equal(node.ModTime) && sizeMatches { + return &fileState{nil, sizeMatches}, buf, nil + } + matches := make([]bool, len(node.Content)) var offset int64 for i, blobID := range node.Content { From 3ec28ff853a7f701a44f2b845826483a3a7fc3ed Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 17:59:06 +0200 Subject: [PATCH 374/893] update restore --overwrite documentation --- changelog/unreleased/issue-4817 | 19 +++++++++++++++---- doc/050_restore.rst | 19 ++++++++++++++----- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/changelog/unreleased/issue-4817 b/changelog/unreleased/issue-4817 index b8ffc35ca00..ddbd5672b04 100644 --- a/changelog/unreleased/issue-4817 +++ b/changelog/unreleased/issue-4817 @@ -1,11 +1,22 @@ Enhancement: Make overwrite behavior of `restore` customizable The `restore` command now supports an `--overwrite` option to configure whether -already existing files are overwritten. The default is `--overwrite always`, -which overwrites existing files. `--overwrite if-newer` only restores files -from the snapshot that are newer than the local state. And `--overwrite never` -does not modify existing files. +already existing files are overwritten. This behavior can now be configured via +the `--overwrite` option. The following values are supported: + +* `--overwrite always` (default): always overwrites already existing files. `restore` + will verify the existing file content and only restore mismatching parts to minimize + downloads. Updates the metadata of all files. +* `--overwrite if-changed`: like the previous case, but speeds up the file content check + by assuming that files with matching size and modification time (mtime) are already up to date. + In case of a mismatch, the full file content is verified. Updates the metadata of all files. +* `--overwrite if-newer`: only overwrite existing files if the file in the snapshot has a + newer modification time (mtime). +* `--overwrite never`: never overwrite existing files. https://github.com/restic/restic/issues/4817 https://github.com/restic/restic/issues/200 +https://github.com/restic/restic/issues/407 +https://github.com/restic/restic/issues/2662 https://github.com/restic/restic/pull/4837 +https://github.com/restic/restic/pull/4838 diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 637ffc3370d..483ba496fdc 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -91,11 +91,20 @@ stored explicitly. Restoring in-place ------------------ -By default, the ``restore`` command overwrites already existing files in the target -directory. This behavior can be configured via the ``--overwrite`` option. The -default is ``--overwrite always``. To only overwrite existing files if the file in -the snapshot is newer, use ``--overwrite if-newer``. To never overwrite existing files, -use ``--overwrite never``. +By default, the ``restore`` command overwrites already existing files at the target +directory. This behavior can be configured via the ``--overwrite`` option. The following +values are supported: + +* ``--overwrite always`` (default): always overwrites already existing files. ``restore`` + will verify the existing file content and only restore mismatching parts to minimize + downloads. Updates the metadata of all files. +* ``--overwrite if-changed``: like the previous case, but speeds up the file content check + by assuming that files with matching size and modification time (mtime) are already up to date. + In case of a mismatch, the full file content is verified. Updates the metadata of all files. +* ``--overwrite if-newer``: only overwrite existing files if the file in the snapshot has a + newer modification time (mtime). +* ``--overwrite never``: never overwrite existing files. + Restore using mount =================== From 1eccd6504bc19d431aa4f18b65929d0d911e93d8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 18:15:50 +0200 Subject: [PATCH 375/893] restore: test restore only changed parts functionality --- internal/restorer/restorer_test.go | 93 +++++++++++++++++++++++++ internal/restorer/restorer_unix_test.go | 32 +++++++++ 2 files changed, 125 insertions(+) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 635c30ee664..0d43ffa9d61 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -10,6 +10,7 @@ import ( "path/filepath" "runtime" "strings" + "syscall" "testing" "time" @@ -929,6 +930,13 @@ func TestRestorerOverwriteBehavior(t *testing.T) { "dirtest/file": "content: file2\n", }, }, + { + Overwrite: OverwriteIfChanged, + Files: map[string]string{ + "foo": "content: new\n", + "dirtest/file": "content: file2\n", + }, + }, { Overwrite: OverwriteIfNewer, Files: map[string]string{ @@ -982,3 +990,88 @@ func TestRestorerOverwriteBehavior(t *testing.T) { }) } } + +func TestRestoreModified(t *testing.T) { + // overwrite files between snapshots and also change their filesize + snapshots := []Snapshot{ + { + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n", ModTime: time.Now()}, + "bar": File{Data: "content: a\n", ModTime: time.Now()}, + }, + }, + { + Nodes: map[string]Node{ + "foo": File{Data: "content: a\n", ModTime: time.Now()}, + "bar": File{Data: "content: bar\n", ModTime: time.Now()}, + }, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for _, snapshot := range snapshots { + sn, id := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) + t.Logf("snapshot saved as %v", id.Str()) + + res := NewRestorer(repo, sn, Options{Overwrite: OverwriteIfChanged}) + rtest.OK(t, res.RestoreTo(ctx, tempdir)) + n, err := res.VerifyFiles(ctx, tempdir) + rtest.OK(t, err) + rtest.Equals(t, 2, n, "unexpected number of verified files") + } +} + +func TestRestoreIfChanged(t *testing.T) { + origData := "content: foo\n" + modData := "content: bar\n" + rtest.Equals(t, len(modData), len(origData), "broken testcase") + snapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: origData, ModTime: time.Now()}, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sn, id := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) + t.Logf("snapshot saved as %v", id.Str()) + + res := NewRestorer(repo, sn, Options{}) + rtest.OK(t, res.RestoreTo(ctx, tempdir)) + + // modify file but maintain size and timestamp + path := filepath.Join(tempdir, "foo") + f, err := os.OpenFile(path, os.O_RDWR, 0) + rtest.OK(t, err) + fi, err := f.Stat() + rtest.OK(t, err) + _, err = f.Write([]byte(modData)) + rtest.OK(t, err) + rtest.OK(t, f.Close()) + var utimes = [...]syscall.Timespec{ + syscall.NsecToTimespec(fi.ModTime().UnixNano()), + syscall.NsecToTimespec(fi.ModTime().UnixNano()), + } + rtest.OK(t, syscall.UtimesNano(path, utimes[:])) + + for _, overwrite := range []OverwriteBehavior{OverwriteIfChanged, OverwriteAlways} { + res = NewRestorer(repo, sn, Options{Overwrite: overwrite}) + rtest.OK(t, res.RestoreTo(ctx, tempdir)) + data, err := os.ReadFile(path) + rtest.OK(t, err) + if overwrite == OverwriteAlways { + // restore should notice the changed file content + rtest.Equals(t, origData, string(data), "expected original file content") + } else { + // restore should not have noticed the changed file content + rtest.Equals(t, modData, string(data), "expeced modified file content") + } + } +} diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go index 97d2dd07d56..9523440cfda 100644 --- a/internal/restorer/restorer_unix_test.go +++ b/internal/restorer/restorer_unix_test.go @@ -5,6 +5,7 @@ package restorer import ( "context" + "io/fs" "os" "path/filepath" "syscall" @@ -118,3 +119,34 @@ func TestRestorerProgressBar(t *testing.T) { AllBytesSkipped: 0, }, mock.s) } + +func TestRestorePermissions(t *testing.T) { + snapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n", Mode: 0o600, ModTime: time.Now()}, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sn, id := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) + t.Logf("snapshot saved as %v", id.Str()) + + res := NewRestorer(repo, sn, Options{}) + rtest.OK(t, res.RestoreTo(ctx, tempdir)) + + for _, overwrite := range []OverwriteBehavior{OverwriteIfChanged, OverwriteAlways} { + // tamper with permissions + path := filepath.Join(tempdir, "foo") + rtest.OK(t, os.Chmod(path, 0o700)) + + res = NewRestorer(repo, sn, Options{Overwrite: overwrite}) + rtest.OK(t, res.RestoreTo(ctx, tempdir)) + fi, err := os.Stat(path) + rtest.OK(t, err) + rtest.Equals(t, fs.FileMode(0o600), fi.Mode().Perm(), "unexpected permissions") + } +} From f1c76a82861f59e4fd03c7696a669553ae9d2f96 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 5 Jun 2024 22:01:55 +0200 Subject: [PATCH 376/893] restore: fix corrupted sparse files --- internal/restorer/filerestorer.go | 6 +++++ internal/restorer/restorer_test.go | 38 ++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index 1e67debeb98..f57d5859818 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -153,6 +153,12 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { // in addition, a short chunk will never match r.zeroChunk which would prevent sparseness for short files file.sparse = r.sparse } + if file.state != nil { + // The restorer currently cannot punch new holes into an existing files. + // Thus sections that contained data but should be sparse after restoring + // the snapshot would still contain the old data resulting in a corrupt restore. + file.sparse = false + } if err != nil { // repository index is messed up, can't do anything diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 0d43ffa9d61..25ce668dbd8 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -895,6 +895,44 @@ func TestRestorerSparseFiles(t *testing.T) { len(zeros), blocks, 100*sparsity) } +func TestRestorerSparseOverwrite(t *testing.T) { + baseSnapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: new\n"}, + }, + } + var zero [14]byte + sparseSnapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: string(zero[:])}, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // base snapshot + sn, id := saveSnapshot(t, repo, baseSnapshot, noopGetGenericAttributes) + t.Logf("base snapshot saved as %v", id.Str()) + + res := NewRestorer(repo, sn, Options{Sparse: true}) + err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + // sparse snapshot + sn, id = saveSnapshot(t, repo, sparseSnapshot, noopGetGenericAttributes) + t.Logf("base snapshot saved as %v", id.Str()) + + res = NewRestorer(repo, sn, Options{Sparse: true, Overwrite: OverwriteAlways}) + err = res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + files, err := res.VerifyFiles(ctx, tempdir) + rtest.OK(t, err) + rtest.Equals(t, 1, files, "unexpected number of verified files") +} + func TestRestorerOverwriteBehavior(t *testing.T) { baseTime := time.Now() baseSnapshot := Snapshot{ From d890bfff7ca95585bcf90ecd139be29ff1fb1ee1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 7 Jun 2024 21:54:58 +0200 Subject: [PATCH 377/893] doc: suggest backup before in-place restore --- doc/050_restore.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 483ba496fdc..9c24f09de1f 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -91,6 +91,12 @@ stored explicitly. Restoring in-place ------------------ +.. note:: + + Restoring data in-place can leave files in a partially restored state if the ``restore`` + operation is interrupted. To ensure you can revert back to the previous state, create + a current ``backup`` before restoring a different snapshot. + By default, the ``restore`` command overwrites already existing files at the target directory. This behavior can be configured via the ``--overwrite`` option. The following values are supported: From f19b69af252200640bae4e36678e13069f8cc7ea Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 1 Jun 2024 21:56:56 +0200 Subject: [PATCH 378/893] restore: allow overwrite to replace empty directories and symlinks With an already existing file tree an old directory or symlink may exist in a place where restore wants to create a new file. Thus, check for unexpected file types and clean up if necessary. --- internal/restorer/fileswriter.go | 106 +++++++++++++++++++------- internal/restorer/fileswriter_test.go | 77 +++++++++++++++++++ 2 files changed, 156 insertions(+), 27 deletions(-) diff --git a/internal/restorer/fileswriter.go b/internal/restorer/fileswriter.go index 50f06c83db0..39ad65da884 100644 --- a/internal/restorer/fileswriter.go +++ b/internal/restorer/fileswriter.go @@ -1,11 +1,15 @@ package restorer import ( + "fmt" + stdfs "io/fs" "os" "sync" + "syscall" "github.com/cespare/xxhash/v2" "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" ) @@ -39,13 +43,26 @@ func newFilesWriter(count int) *filesWriter { } } -func createFile(path string, createSize int64, sparse bool) (*os.File, error) { - f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600) +func openFile(path string) (*os.File, error) { + f, err := os.OpenFile(path, os.O_WRONLY|fs.O_NOFOLLOW, 0600) if err != nil { - if !fs.IsAccessDenied(err) { - return nil, err - } + return nil, err + } + fi, err := f.Stat() + if err != nil { + _ = f.Close() + return nil, err + } + if !fi.Mode().IsRegular() { + _ = f.Close() + return nil, fmt.Errorf("unexpected file type %v at %q", fi.Mode().Type(), path) + } + return f, nil +} +func createFile(path string, createSize int64, sparse bool) (*os.File, error) { + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|fs.O_NOFOLLOW, 0600) + if err != nil && fs.IsAccessDenied(err) { // If file is readonly, clear the readonly flag by resetting the // permissions of the file and try again // as the metadata will be set again in the second pass and the @@ -53,40 +70,75 @@ func createFile(path string, createSize int64, sparse bool) (*os.File, error) { if err = fs.ResetPermissions(path); err != nil { return nil, err } - if f, err = os.OpenFile(path, os.O_WRONLY, 0600); err != nil { + if f, err = os.OpenFile(path, os.O_WRONLY|fs.O_NOFOLLOW, 0600); err != nil { + return nil, err + } + } else if err != nil && (errors.Is(err, syscall.ELOOP) || errors.Is(err, syscall.EISDIR)) { + // symlink or directory, try to remove it later on + f = nil + } else if err != nil { + return nil, err + } + + var fi stdfs.FileInfo + if f != nil { + // stat to check that we've opened a regular file + fi, err = f.Stat() + if err != nil { + _ = f.Close() + return nil, err + } + } + if f == nil || !fi.Mode().IsRegular() { + // close handle if we still have it + if f != nil { + if err := f.Close(); err != nil { + return nil, err + } + } + + // not what we expected, try to get rid of it + if err := os.Remove(path); err != nil { + return nil, err + } + // create a new file, pass O_EXCL to make sure there are no surprises + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_EXCL|fs.O_NOFOLLOW, 0600) + if err != nil { + return nil, err + } + fi, err = f.Stat() + if err != nil { + _ = f.Close() return nil, err } } + return ensureSize(f, fi, createSize, sparse) +} + +func ensureSize(f *os.File, fi stdfs.FileInfo, createSize int64, sparse bool) (*os.File, error) { if sparse { - err = truncateSparse(f, createSize) + err := truncateSparse(f, createSize) if err != nil { _ = f.Close() return nil, err } - } else { - info, err := f.Stat() + } else if fi.Size() > createSize { + // file is too long must shorten it + err := f.Truncate(createSize) if err != nil { _ = f.Close() return nil, err } - if info.Size() > createSize { - // file is too long must shorten it - err = f.Truncate(createSize) - if err != nil { - _ = f.Close() - return nil, err - } - } else if createSize > 0 { - err := fs.PreallocateFile(f, createSize) - if err != nil { - // Just log the preallocate error but don't let it cause the restore process to fail. - // Preallocate might return an error if the filesystem (implementation) does not - // support preallocation or our parameters combination to the preallocate call - // This should yield a syscall.ENOTSUP error, but some other errors might also - // show up. - debug.Log("Failed to preallocate %v with size %v: %v", path, createSize, err) - } + } else if createSize > 0 { + err := fs.PreallocateFile(f, createSize) + if err != nil { + // Just log the preallocate error but don't let it cause the restore process to fail. + // Preallocate might return an error if the filesystem (implementation) does not + // support preallocation or our parameters combination to the preallocate call + // This should yield a syscall.ENOTSUP error, but some other errors might also + // show up. + debug.Log("Failed to preallocate %v with size %v: %v", f.Name(), createSize, err) } } return f, nil @@ -110,7 +162,7 @@ func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, create if err != nil { return nil, err } - } else if f, err = os.OpenFile(path, os.O_WRONLY, 0600); err != nil { + } else if f, err = openFile(path); err != nil { return nil, err } diff --git a/internal/restorer/fileswriter_test.go b/internal/restorer/fileswriter_test.go index 7beb9a2dc1b..74bf479bb04 100644 --- a/internal/restorer/fileswriter_test.go +++ b/internal/restorer/fileswriter_test.go @@ -1,9 +1,13 @@ package restorer import ( + "fmt" "os" + "path/filepath" + "syscall" "testing" + "github.com/restic/restic/internal/errors" rtest "github.com/restic/restic/internal/test" ) @@ -34,3 +38,76 @@ func TestFilesWriterBasic(t *testing.T) { rtest.OK(t, err) rtest.Equals(t, []byte{2, 2}, buf) } + +func TestCreateFile(t *testing.T) { + basepath := filepath.Join(t.TempDir(), "test") + + scenarios := []struct { + name string + create func(t testing.TB, path string) + err error + }{ + { + "file", + func(t testing.TB, path string) { + rtest.OK(t, os.WriteFile(path, []byte("test-test-test-data"), 0o400)) + }, + nil, + }, + { + "empty dir", + func(t testing.TB, path string) { + rtest.OK(t, os.Mkdir(path, 0o400)) + }, + nil, + }, + { + "symlink", + func(t testing.TB, path string) { + rtest.OK(t, os.Symlink("./something", path)) + }, + nil, + }, + { + "filled dir", + func(t testing.TB, path string) { + rtest.OK(t, os.Mkdir(path, 0o700)) + rtest.OK(t, os.WriteFile(filepath.Join(path, "file"), []byte("data"), 0o400)) + }, + syscall.ENOTEMPTY, + }, + } + + tests := []struct { + size int64 + isSparse bool + }{ + {5, false}, + {21, false}, + {100, false}, + {5, true}, + {21, true}, + {100, true}, + } + + for i, sc := range scenarios { + t.Run(sc.name, func(t *testing.T) { + for _, test := range tests { + path := basepath + fmt.Sprintf("%v", i) + sc.create(t, path) + f, err := createFile(path, test.size, test.isSparse) + if sc.err == nil { + rtest.OK(t, err) + fi, err := f.Stat() + rtest.OK(t, err) + rtest.Assert(t, fi.Mode().IsRegular(), "wrong filetype %v", fi.Mode()) + rtest.Assert(t, fi.Size() <= test.size, "unexpected file size expected %v, got %v", test.size, fi.Size()) + rtest.OK(t, f.Close()) + } else { + rtest.Assert(t, errors.Is(err, sc.err), "unexpected error got %v expected %v", err, sc.err) + } + rtest.OK(t, os.RemoveAll(path)) + } + }) + } +} From d265ec64f2411bfb259f83f33b4f39fe4e592725 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 1 Jun 2024 22:21:16 +0200 Subject: [PATCH 379/893] restore: correctly handle existing hardlinks With hardlinks there's no efficient way to detect which files are linked with each other. Thus, just start from scratch when restore has to modify a hardlinked file. --- internal/restorer/fileswriter.go | 13 ++++++++- internal/restorer/fileswriter_test.go | 41 ++++++++++++++++++--------- 2 files changed, 39 insertions(+), 15 deletions(-) diff --git a/internal/restorer/fileswriter.go b/internal/restorer/fileswriter.go index 39ad65da884..9e9b6b01cfe 100644 --- a/internal/restorer/fileswriter.go +++ b/internal/restorer/fileswriter.go @@ -89,7 +89,18 @@ func createFile(path string, createSize int64, sparse bool) (*os.File, error) { return nil, err } } - if f == nil || !fi.Mode().IsRegular() { + + mustReplace := f == nil || !fi.Mode().IsRegular() + if !mustReplace { + ex := fs.ExtendedStat(fi) + if ex.Links > 1 { + // there is no efficient way to find out which other files might be linked to this file + // thus nuke the existing file and start with a fresh one + mustReplace = true + } + } + + if mustReplace { // close handle if we still have it if f != nil { if err := f.Close(); err != nil { diff --git a/internal/restorer/fileswriter_test.go b/internal/restorer/fileswriter_test.go index 74bf479bb04..b4252a96e40 100644 --- a/internal/restorer/fileswriter_test.go +++ b/internal/restorer/fileswriter_test.go @@ -45,36 +45,46 @@ func TestCreateFile(t *testing.T) { scenarios := []struct { name string create func(t testing.TB, path string) + check func(t testing.TB, path string) err error }{ { - "file", - func(t testing.TB, path string) { + name: "file", + create: func(t testing.TB, path string) { rtest.OK(t, os.WriteFile(path, []byte("test-test-test-data"), 0o400)) }, - nil, }, { - "empty dir", - func(t testing.TB, path string) { + name: "empty dir", + create: func(t testing.TB, path string) { rtest.OK(t, os.Mkdir(path, 0o400)) }, - nil, }, { - "symlink", - func(t testing.TB, path string) { + name: "symlink", + create: func(t testing.TB, path string) { rtest.OK(t, os.Symlink("./something", path)) }, - nil, }, { - "filled dir", - func(t testing.TB, path string) { + name: "filled dir", + create: func(t testing.TB, path string) { rtest.OK(t, os.Mkdir(path, 0o700)) rtest.OK(t, os.WriteFile(filepath.Join(path, "file"), []byte("data"), 0o400)) }, - syscall.ENOTEMPTY, + err: syscall.ENOTEMPTY, + }, + { + name: "hardlinks", + create: func(t testing.TB, path string) { + rtest.OK(t, os.WriteFile(path, []byte("test-test-test-data"), 0o400)) + rtest.OK(t, os.Link(path, path+"h")) + }, + check: func(t testing.TB, path string) { + data, err := os.ReadFile(path + "h") + rtest.OK(t, err) + rtest.Equals(t, "test-test-test-data", string(data), "unexpected content change") + }, }, } @@ -92,8 +102,8 @@ func TestCreateFile(t *testing.T) { for i, sc := range scenarios { t.Run(sc.name, func(t *testing.T) { - for _, test := range tests { - path := basepath + fmt.Sprintf("%v", i) + for j, test := range tests { + path := basepath + fmt.Sprintf("%v%v", i, j) sc.create(t, path) f, err := createFile(path, test.size, test.isSparse) if sc.err == nil { @@ -103,6 +113,9 @@ func TestCreateFile(t *testing.T) { rtest.Assert(t, fi.Mode().IsRegular(), "wrong filetype %v", fi.Mode()) rtest.Assert(t, fi.Size() <= test.size, "unexpected file size expected %v, got %v", test.size, fi.Size()) rtest.OK(t, f.Close()) + if sc.check != nil { + sc.check(t, path) + } } else { rtest.Assert(t, errors.Is(err, sc.err), "unexpected error got %v expected %v", err, sc.err) } From c598a751c25fb9d2f4050b3717639e0c6d723cc4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 2 Jun 2024 16:54:19 +0200 Subject: [PATCH 380/893] restore: fine-grained sparse support for windows --- internal/restorer/sparsewrite.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/internal/restorer/sparsewrite.go b/internal/restorer/sparsewrite.go index 2c1f234defb..ae354f64f17 100644 --- a/internal/restorer/sparsewrite.go +++ b/internal/restorer/sparsewrite.go @@ -1,6 +1,3 @@ -//go:build !windows -// +build !windows - package restorer import ( From c7902b77248a13729bcf6f8c93f0b9a47cfd27b6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 7 Jun 2024 22:43:31 +0200 Subject: [PATCH 381/893] restorer: cleanup overwrite tests --- internal/restorer/restorer_test.go | 70 ++++++++++++------------------ 1 file changed, 27 insertions(+), 43 deletions(-) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 25ce668dbd8..b1fb0ff92d0 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -895,6 +895,31 @@ func TestRestorerSparseFiles(t *testing.T) { len(zeros), blocks, 100*sparsity) } +func saveSnapshotsAndOverwrite(t *testing.T, baseSnapshot Snapshot, overwriteSnapshot Snapshot, options Options) string { + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // base snapshot + sn, id := saveSnapshot(t, repo, baseSnapshot, noopGetGenericAttributes) + t.Logf("base snapshot saved as %v", id.Str()) + + res := NewRestorer(repo, sn, options) + rtest.OK(t, res.RestoreTo(ctx, tempdir)) + + // overwrite snapshot + sn, id = saveSnapshot(t, repo, overwriteSnapshot, noopGetGenericAttributes) + t.Logf("overwrite snapshot saved as %v", id.Str()) + res = NewRestorer(repo, sn, options) + rtest.OK(t, res.RestoreTo(ctx, tempdir)) + + _, err := res.VerifyFiles(ctx, tempdir) + rtest.OK(t, err) + + return tempdir +} + func TestRestorerSparseOverwrite(t *testing.T) { baseSnapshot := Snapshot{ Nodes: map[string]Node{ @@ -908,29 +933,7 @@ func TestRestorerSparseOverwrite(t *testing.T) { }, } - repo := repository.TestRepository(t) - tempdir := filepath.Join(rtest.TempDir(t), "target") - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // base snapshot - sn, id := saveSnapshot(t, repo, baseSnapshot, noopGetGenericAttributes) - t.Logf("base snapshot saved as %v", id.Str()) - - res := NewRestorer(repo, sn, Options{Sparse: true}) - err := res.RestoreTo(ctx, tempdir) - rtest.OK(t, err) - - // sparse snapshot - sn, id = saveSnapshot(t, repo, sparseSnapshot, noopGetGenericAttributes) - t.Logf("base snapshot saved as %v", id.Str()) - - res = NewRestorer(repo, sn, Options{Sparse: true, Overwrite: OverwriteAlways}) - err = res.RestoreTo(ctx, tempdir) - rtest.OK(t, err) - files, err := res.VerifyFiles(ctx, tempdir) - rtest.OK(t, err) - rtest.Equals(t, 1, files, "unexpected number of verified files") + saveSnapshotsAndOverwrite(t, baseSnapshot, sparseSnapshot, Options{Sparse: true, Overwrite: OverwriteAlways}) } func TestRestorerOverwriteBehavior(t *testing.T) { @@ -993,26 +996,7 @@ func TestRestorerOverwriteBehavior(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - repo := repository.TestRepository(t) - tempdir := filepath.Join(rtest.TempDir(t), "target") - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // base snapshot - sn, id := saveSnapshot(t, repo, baseSnapshot, noopGetGenericAttributes) - t.Logf("base snapshot saved as %v", id.Str()) - - res := NewRestorer(repo, sn, Options{}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) - - // overwrite snapshot - sn, id = saveSnapshot(t, repo, overwriteSnapshot, noopGetGenericAttributes) - t.Logf("overwrite snapshot saved as %v", id.Str()) - res = NewRestorer(repo, sn, Options{Overwrite: test.Overwrite}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) - - _, err := res.VerifyFiles(ctx, tempdir) - rtest.OK(t, err) + tempdir := saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{Overwrite: test.Overwrite}) for filename, content := range test.Files { data, err := os.ReadFile(filepath.Join(tempdir, filepath.FromSlash(filename))) From ac729db3ce8e2073d472272ec7b2a8845d5ac120 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 7 Jun 2024 22:44:47 +0200 Subject: [PATCH 382/893] restorer: fix overwriting of special file types An attempt to replace an existing file with a hardlink previously ended with a missing file. Remove an existing file before trying to restore a special node. This generalizes the existing behavior for symlinks to all special node types. --- internal/restic/node.go | 5 --- internal/restorer/restorer.go | 5 ++- internal/restorer/restorer_test.go | 67 ++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+), 6 deletions(-) diff --git a/internal/restic/node.go b/internal/restic/node.go index 5bdc5ba27c2..51c6071b7a7 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -348,11 +348,6 @@ func (node Node) writeNodeContent(ctx context.Context, repo BlobLoader, f *os.Fi } func (node Node) createSymlinkAt(path string) error { - - if err := os.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) { - return errors.Wrap(err, "Symlink") - } - if err := fs.Symlink(node.LinkTarget, path); err != nil { return errors.WithStack(err) } diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 6002d6f0e71..313174fc3e8 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -221,6 +221,9 @@ func (res *Restorer) traverseTree(ctx context.Context, target, location string, func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, target, location string) error { debug.Log("restoreNode %v %v %v", node.Name, target, location) + if err := fs.Remove(target); err != nil && !errors.Is(err, os.ErrNotExist) { + return errors.Wrap(err, "RemoveNode") + } err := node.CreateAt(ctx, target, res.repo) if err != nil { @@ -242,7 +245,7 @@ func (res *Restorer) restoreNodeMetadataTo(node *restic.Node, target, location s } func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location string) error { - if err := fs.Remove(path); !os.IsNotExist(err) { + if err := fs.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) { return errors.Wrap(err, "RemoveCreateHardlink") } err := fs.Link(target, path) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index b1fb0ff92d0..5c23a88e420 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -37,6 +37,11 @@ type File struct { attributes *FileAttributes } +type Symlink struct { + Target string + ModTime time.Time +} + type Dir struct { Nodes map[string]Node Mode os.FileMode @@ -103,6 +108,20 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u GenericAttributes: getGenericAttributes(node.attributes, false), }) rtest.OK(t, err) + case Symlink: + symlink := n.(Symlink) + err := tree.Insert(&restic.Node{ + Type: "symlink", + Mode: os.ModeSymlink | 0o777, + ModTime: symlink.ModTime, + Name: name, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + LinkTarget: symlink.Target, + Inode: inode, + Links: 1, + }) + rtest.OK(t, err) case Dir: id := saveDir(t, repo, node.Nodes, inode, getGenericAttributes) @@ -1013,6 +1032,54 @@ func TestRestorerOverwriteBehavior(t *testing.T) { } } +func TestRestorerOverwriteSpecial(t *testing.T) { + baseTime := time.Now() + baseSnapshot := Snapshot{ + Nodes: map[string]Node{ + "dirtest": Dir{ModTime: baseTime}, + "link": Symlink{Target: "foo", ModTime: baseTime}, + "file": File{Data: "content: file\n", Inode: 42, Links: 2, ModTime: baseTime}, + "hardlink": File{Data: "content: file\n", Inode: 42, Links: 2, ModTime: baseTime}, + }, + } + overwriteSnapshot := Snapshot{ + Nodes: map[string]Node{ + "dirtest": Symlink{Target: "foo", ModTime: baseTime}, + "link": File{Data: "content: link\n", Inode: 42, Links: 2, ModTime: baseTime.Add(time.Second)}, + "file": Symlink{Target: "foo2", ModTime: baseTime}, + "hardlink": File{Data: "content: link\n", Inode: 42, Links: 2, ModTime: baseTime.Add(time.Second)}, + }, + } + + files := map[string]string{ + "link": "content: link\n", + "hardlink": "content: link\n", + } + links := map[string]string{ + "dirtest": "foo", + "file": "foo2", + } + + tempdir := saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{Overwrite: OverwriteAlways}) + + for filename, content := range files { + data, err := os.ReadFile(filepath.Join(tempdir, filepath.FromSlash(filename))) + if err != nil { + t.Errorf("unable to read file %v: %v", filename, err) + continue + } + + if !bytes.Equal(data, []byte(content)) { + t.Errorf("file %v has wrong content: want %q, got %q", filename, content, data) + } + } + for filename, target := range links { + link, err := fs.Readlink(filepath.Join(tempdir, filepath.FromSlash(filename))) + rtest.OK(t, err) + rtest.Equals(t, link, target, "wrong symlink target") + } +} + func TestRestoreModified(t *testing.T) { // overwrite files between snapshots and also change their filesize snapshots := []Snapshot{ From ebbd4e26d7027c92498184083d816a28684178d7 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 7 Jun 2024 23:02:46 +0200 Subject: [PATCH 383/893] restorer: allow directory to replace existing file --- internal/restorer/restorer.go | 26 +++++++++++++++++++------- internal/restorer/restorer_test.go | 2 ++ 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 313174fc3e8..19555afb8bc 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -259,6 +259,23 @@ func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location return res.restoreNodeMetadataTo(node, path, location) } +func (res *Restorer) ensureDir(target string) error { + fi, err := fs.Lstat(target) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to check for directory: %w", err) + } + if err == nil && !fi.IsDir() { + // try to cleanup unexpected file + if err := fs.Remove(target); err != nil { + return fmt.Errorf("failed to remove stale item: %w", err) + } + } + + // create parent dir with default permissions + // second pass #leaveDir restores dir metadata after visiting/restoring all children + return fs.MkdirAll(target, 0700) +} + // RestoreTo creates the directories and files in the snapshot below dst. // Before an item is created, res.Filter is called. func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { @@ -284,17 +301,12 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { enterDir: func(_ *restic.Node, target, location string) error { debug.Log("first pass, enterDir: mkdir %q, leaveDir should restore metadata", location) res.opts.Progress.AddFile(0) - // create dir with default permissions - // #leaveDir restores dir metadata after visiting all children - return fs.MkdirAll(target, 0700) + return res.ensureDir(target) }, visitNode: func(node *restic.Node, target, location string) error { debug.Log("first pass, visitNode: mkdir %q, leaveDir on second pass should restore metadata", location) - // create parent dir with default permissions - // second pass #leaveDir restores dir metadata after visiting/restoring all children - err := fs.MkdirAll(filepath.Dir(target), 0700) - if err != nil { + if err := res.ensureDir(filepath.Dir(target)); err != nil { return err } diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 5c23a88e420..3becf7c7a0b 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -1040,6 +1040,7 @@ func TestRestorerOverwriteSpecial(t *testing.T) { "link": Symlink{Target: "foo", ModTime: baseTime}, "file": File{Data: "content: file\n", Inode: 42, Links: 2, ModTime: baseTime}, "hardlink": File{Data: "content: file\n", Inode: 42, Links: 2, ModTime: baseTime}, + "newdir": File{Data: "content: dir\n", ModTime: baseTime}, }, } overwriteSnapshot := Snapshot{ @@ -1048,6 +1049,7 @@ func TestRestorerOverwriteSpecial(t *testing.T) { "link": File{Data: "content: link\n", Inode: 42, Links: 2, ModTime: baseTime.Add(time.Second)}, "file": Symlink{Target: "foo2", ModTime: baseTime}, "hardlink": File{Data: "content: link\n", Inode: 42, Links: 2, ModTime: baseTime.Add(time.Second)}, + "newdir": Dir{ModTime: baseTime}, }, } From 4d6042fe951ba2c72eda68a65dee7c67c56ec77d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 13 Jun 2024 22:21:00 +0200 Subject: [PATCH 384/893] restore: remove unexpected xattrs from files --- internal/restic/node_xattr.go | 22 +++++++++++++ internal/restic/node_xattr_all_test.go | 44 ++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) create mode 100644 internal/restic/node_xattr_all_test.go diff --git a/internal/restic/node_xattr.go b/internal/restic/node_xattr.go index a55fcb2db4a..5a5a253d98b 100644 --- a/internal/restic/node_xattr.go +++ b/internal/restic/node_xattr.go @@ -40,6 +40,11 @@ func setxattr(path, name string, data []byte) error { return handleXattrErr(xattr.LSet(path, name, data)) } +// removexattr removes the attribute name from path. +func removexattr(path, name string) error { + return handleXattrErr(xattr.LRemove(path, name)) +} + func handleXattrErr(err error) error { switch e := err.(type) { case nil: @@ -70,12 +75,29 @@ func (node *Node) fillGenericAttributes(_ string, _ os.FileInfo, _ *statT) (allo } func (node Node) restoreExtendedAttributes(path string) error { + expectedAttrs := map[string]struct{}{} for _, attr := range node.ExtendedAttributes { err := setxattr(path, attr.Name, attr.Value) if err != nil { return err } + expectedAttrs[attr.Name] = struct{}{} + } + + // remove unexpected xattrs + xattrs, err := listxattr(path) + if err != nil { + return err } + for _, name := range xattrs { + if _, ok := expectedAttrs[name]; ok { + continue + } + if err := removexattr(path, name); err != nil { + return err + } + } + return nil } diff --git a/internal/restic/node_xattr_all_test.go b/internal/restic/node_xattr_all_test.go new file mode 100644 index 00000000000..4e93330bc03 --- /dev/null +++ b/internal/restic/node_xattr_all_test.go @@ -0,0 +1,44 @@ +//go:build darwin || freebsd || linux || solaris || windows +// +build darwin freebsd linux solaris windows + +package restic + +import ( + "os" + "path/filepath" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func setAndVerifyXattr(t *testing.T, file string, attrs []ExtendedAttribute) { + node := Node{ + ExtendedAttributes: attrs, + } + rtest.OK(t, node.restoreExtendedAttributes(file)) + + nodeActual := Node{} + rtest.OK(t, nodeActual.fillExtendedAttributes(file, false)) + + rtest.Assert(t, nodeActual.sameExtendedAttributes(node), "xattr mismatch got %v expected %v", nodeActual.ExtendedAttributes, node.ExtendedAttributes) +} + +func TestOverwriteXattr(t *testing.T) { + dir := t.TempDir() + file := filepath.Join(dir, "file") + rtest.OK(t, os.WriteFile(file, []byte("hello world"), 0o600)) + + setAndVerifyXattr(t, file, []ExtendedAttribute{ + { + Name: "user.foo", + Value: []byte("bar"), + }, + }) + + setAndVerifyXattr(t, file, []ExtendedAttribute{ + { + Name: "user.other", + Value: []byte("some"), + }, + }) +} From 3d73ae9988aeae1f7b23877ebdb46404932cc788 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 13 Jun 2024 22:32:53 +0200 Subject: [PATCH 385/893] update restore changelog --- changelog/unreleased/issue-4817 | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog/unreleased/issue-4817 b/changelog/unreleased/issue-4817 index ddbd5672b04..e9c2d01a51c 100644 --- a/changelog/unreleased/issue-4817 +++ b/changelog/unreleased/issue-4817 @@ -20,3 +20,4 @@ https://github.com/restic/restic/issues/407 https://github.com/restic/restic/issues/2662 https://github.com/restic/restic/pull/4837 https://github.com/restic/restic/pull/4838 +https://github.com/restic/restic/pull/4864 From ca41c8fd11e18cc82f3c280e3573433c40fa62fa Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 13 Jun 2024 22:40:35 +0200 Subject: [PATCH 386/893] restore: use fs function wrappers This ensures proper path handling on Windows. --- internal/restorer/fileswriter.go | 10 +++++----- internal/restorer/restorer.go | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/restorer/fileswriter.go b/internal/restorer/fileswriter.go index 9e9b6b01cfe..034ed27256a 100644 --- a/internal/restorer/fileswriter.go +++ b/internal/restorer/fileswriter.go @@ -44,7 +44,7 @@ func newFilesWriter(count int) *filesWriter { } func openFile(path string) (*os.File, error) { - f, err := os.OpenFile(path, os.O_WRONLY|fs.O_NOFOLLOW, 0600) + f, err := fs.OpenFile(path, fs.O_WRONLY|fs.O_NOFOLLOW, 0600) if err != nil { return nil, err } @@ -61,7 +61,7 @@ func openFile(path string) (*os.File, error) { } func createFile(path string, createSize int64, sparse bool) (*os.File, error) { - f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|fs.O_NOFOLLOW, 0600) + f, err := fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_NOFOLLOW, 0600) if err != nil && fs.IsAccessDenied(err) { // If file is readonly, clear the readonly flag by resetting the // permissions of the file and try again @@ -70,7 +70,7 @@ func createFile(path string, createSize int64, sparse bool) (*os.File, error) { if err = fs.ResetPermissions(path); err != nil { return nil, err } - if f, err = os.OpenFile(path, os.O_WRONLY|fs.O_NOFOLLOW, 0600); err != nil { + if f, err = fs.OpenFile(path, fs.O_WRONLY|fs.O_NOFOLLOW, 0600); err != nil { return nil, err } } else if err != nil && (errors.Is(err, syscall.ELOOP) || errors.Is(err, syscall.EISDIR)) { @@ -109,11 +109,11 @@ func createFile(path string, createSize int64, sparse bool) (*os.File, error) { } // not what we expected, try to get rid of it - if err := os.Remove(path); err != nil { + if err := fs.Remove(path); err != nil { return nil, err } // create a new file, pass O_EXCL to make sure there are no surprises - f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_EXCL|fs.O_NOFOLLOW, 0600) + f, err = fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_EXCL|fs.O_NOFOLLOW, 0600) if err != nil { return nil, err } diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 19555afb8bc..85132c8b4f8 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -541,7 +541,7 @@ func (s *fileState) HasMatchingBlob(i int) bool { // Reusing buffers prevents the verifier goroutines allocating all of RAM and // flushing the filesystem cache (at least on Linux). func (res *Restorer) verifyFile(target string, node *restic.Node, failFast bool, trustMtime bool, buf []byte) (*fileState, []byte, error) { - f, err := os.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + f, err := fs.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) if err != nil { return nil, buf, err } From 9572b7224f6ac16c0b8a9a3bbf8bd8a7c29f65e9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 13 Jun 2024 22:52:31 +0200 Subject: [PATCH 387/893] restorer: windows test fixes --- internal/restic/node_xattr_all_test.go | 14 +++++++++++++- internal/restorer/fileswriter_other_test.go | 10 ++++++++++ internal/restorer/fileswriter_test.go | 9 +++++++-- internal/restorer/fileswriter_windows_test.go | 7 +++++++ 4 files changed, 37 insertions(+), 3 deletions(-) create mode 100644 internal/restorer/fileswriter_other_test.go create mode 100644 internal/restorer/fileswriter_windows_test.go diff --git a/internal/restic/node_xattr_all_test.go b/internal/restic/node_xattr_all_test.go index 4e93330bc03..56ce5e28603 100644 --- a/internal/restic/node_xattr_all_test.go +++ b/internal/restic/node_xattr_all_test.go @@ -6,18 +6,30 @@ package restic import ( "os" "path/filepath" + "runtime" + "strings" "testing" rtest "github.com/restic/restic/internal/test" ) func setAndVerifyXattr(t *testing.T, file string, attrs []ExtendedAttribute) { + if runtime.GOOS == "windows" { + // windows seems to convert the xattr name to upper case + for i := range attrs { + attrs[i].Name = strings.ToUpper(attrs[i].Name) + } + } + node := Node{ + Type: "file", ExtendedAttributes: attrs, } rtest.OK(t, node.restoreExtendedAttributes(file)) - nodeActual := Node{} + nodeActual := Node{ + Type: "file", + } rtest.OK(t, nodeActual.fillExtendedAttributes(file, false)) rtest.Assert(t, nodeActual.sameExtendedAttributes(node), "xattr mismatch got %v expected %v", nodeActual.ExtendedAttributes, node.ExtendedAttributes) diff --git a/internal/restorer/fileswriter_other_test.go b/internal/restorer/fileswriter_other_test.go new file mode 100644 index 00000000000..530a190e524 --- /dev/null +++ b/internal/restorer/fileswriter_other_test.go @@ -0,0 +1,10 @@ +//go:build !windows +// +build !windows + +package restorer + +import "syscall" + +func notEmptyDirError() error { + return syscall.ENOTEMPTY +} diff --git a/internal/restorer/fileswriter_test.go b/internal/restorer/fileswriter_test.go index b4252a96e40..383a9e0d743 100644 --- a/internal/restorer/fileswriter_test.go +++ b/internal/restorer/fileswriter_test.go @@ -4,7 +4,7 @@ import ( "fmt" "os" "path/filepath" - "syscall" + "runtime" "testing" "github.com/restic/restic/internal/errors" @@ -72,7 +72,7 @@ func TestCreateFile(t *testing.T) { rtest.OK(t, os.Mkdir(path, 0o700)) rtest.OK(t, os.WriteFile(filepath.Join(path, "file"), []byte("data"), 0o400)) }, - err: syscall.ENOTEMPTY, + err: notEmptyDirError(), }, { name: "hardlinks", @@ -81,6 +81,11 @@ func TestCreateFile(t *testing.T) { rtest.OK(t, os.Link(path, path+"h")) }, check: func(t testing.TB, path string) { + if runtime.GOOS == "windows" { + // hardlinks are not supported on windows + return + } + data, err := os.ReadFile(path + "h") rtest.OK(t, err) rtest.Equals(t, "test-test-test-data", string(data), "unexpected content change") diff --git a/internal/restorer/fileswriter_windows_test.go b/internal/restorer/fileswriter_windows_test.go new file mode 100644 index 00000000000..ec2b062f0b3 --- /dev/null +++ b/internal/restorer/fileswriter_windows_test.go @@ -0,0 +1,7 @@ +package restorer + +import "syscall" + +func notEmptyDirError() error { + return syscall.ERROR_DIR_NOT_EMPTY +} From deca7d08ac3981105ecf4a04c9261e95d579e99b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 14 Jun 2024 20:17:06 +0200 Subject: [PATCH 388/893] restorer: cleanup unexpected xattrs on windows --- internal/restic/node_windows.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 0d96bdb98df..48ce072955a 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -155,6 +155,26 @@ func restoreExtendedAttributes(nodeType, path string, eas []fs.ExtendedAttribute } defer closeFileHandle(fileHandle, path) // Replaced inline defer with named function call + // clear old unexpected xattrs by setting them to an empty value + oldEAs, err := fs.GetFileEA(fileHandle) + if err != nil { + return err + } + + for _, oldEA := range oldEAs { + found := false + for _, ea := range eas { + if strings.EqualFold(ea.Name, oldEA.Name) { + found = true + break + } + } + + if !found { + eas = append(eas, fs.ExtendedAttribute{Name: oldEA.Name, Value: nil}) + } + } + if err = fs.SetFileEA(fileHandle, eas); err != nil { return errors.Errorf("set EA failed for path %v, with: %v", path, err) } From b80aa7b1cc0bdbf4911d3776b04f6fb2eb900a5b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 13 Jun 2024 20:00:20 +0200 Subject: [PATCH 389/893] repository: prevent initialization if a snapshot exists --- internal/repository/repository.go | 9 +++++++++ internal/repository/repository_test.go | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 775ff796417..838858c38fd 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -780,6 +780,15 @@ func (r *Repository) Init(ctx context.Context, version uint, password string, ch }); err != nil { return err } + // Also check for snapshots to detect repositories with a misconfigured retention + // policy that deletes files older than x days. For such repositories usually the + // config and key files are removed first and therefore the check would not detect + // the old repository. + if err := r.List(ctx, restic.SnapshotFile, func(_ restic.ID, _ int64) error { + return errors.New("repository already contains snapshots") + }); err != nil { + return err + } cfg, err := restic.CreateConfig(version) if err != nil { diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 0c04319d66c..ea21ea3f3d6 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -473,4 +473,14 @@ func TestNoDoubleInit(t *testing.T) { rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: backend.ConfigFile})) err = repo.Init(context.TODO(), r.Config().Version, test.TestPassword, &pol) rtest.Assert(t, strings.Contains(err.Error(), "repository already contains keys"), "expected already contains keys error, got %q", err) + + // must also prevent init if a snapshot exists and keys were deleted + var data [32]byte + hash := restic.Hash(data[:]) + rtest.OK(t, be.Save(context.TODO(), backend.Handle{Type: backend.SnapshotFile, Name: hash.String()}, backend.NewByteReader(data[:], be.Hasher()))) + rtest.OK(t, be.List(context.TODO(), restic.KeyFile, func(fi backend.FileInfo) error { + return be.Remove(context.TODO(), backend.Handle{Type: restic.KeyFile, Name: fi.Name}) + })) + err = repo.Init(context.TODO(), r.Config().Version, test.TestPassword, &pol) + rtest.Assert(t, strings.Contains(err.Error(), "repository already contains snapshots"), "expected already contains snapshots error, got %q", err) } From 20a6c526e518b1d73a3b2b1797bccbff218c6fbc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 14 Jun 2024 21:15:46 +0200 Subject: [PATCH 390/893] restic: also fix file path when reading windows xattrs --- internal/fs/file_windows.go | 18 ++++++++++++++++++ internal/restic/node_windows.go | 21 ++------------------- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/internal/fs/file_windows.go b/internal/fs/file_windows.go index 2f09698040c..cadd69284d1 100644 --- a/internal/fs/file_windows.go +++ b/internal/fs/file_windows.go @@ -103,3 +103,21 @@ func ClearAttribute(path string, attribute uint32) error { } return nil } + +// OpenHandleForEA return a file handle for file or dir for setting/getting EAs +func OpenHandleForEA(nodeType, path string) (handle windows.Handle, err error) { + path = fixpath(path) + switch nodeType { + case "file": + utf16Path := windows.StringToUTF16Ptr(path) + fileAccessRightReadWriteEA := (0x8 | 0x10) + handle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) + case "dir": + utf16Path := windows.StringToUTF16Ptr(path) + fileAccessRightReadWriteEA := (0x8 | 0x10) + handle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + default: + return 0, nil + } + return handle, err +} diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 0d96bdb98df..0825992b125 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -88,7 +88,7 @@ func (node Node) restoreExtendedAttributes(path string) (err error) { // fill extended attributes in the node. This also includes the Generic attributes for windows. func (node *Node) fillExtendedAttributes(path string, _ bool) (err error) { var fileHandle windows.Handle - if fileHandle, err = getFileHandleForEA(node.Type, path); fileHandle == 0 { + if fileHandle, err = fs.OpenHandleForEA(node.Type, path); fileHandle == 0 { return nil } if err != nil { @@ -118,23 +118,6 @@ func (node *Node) fillExtendedAttributes(path string, _ bool) (err error) { return nil } -// Get file handle for file or dir for setting/getting EAs -func getFileHandleForEA(nodeType, path string) (handle windows.Handle, err error) { - switch nodeType { - case "file": - utf16Path := windows.StringToUTF16Ptr(path) - fileAccessRightReadWriteEA := (0x8 | 0x10) - handle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) - case "dir": - utf16Path := windows.StringToUTF16Ptr(path) - fileAccessRightReadWriteEA := (0x8 | 0x10) - handle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - default: - return 0, nil - } - return handle, err -} - // closeFileHandle safely closes a file handle and logs any errors. func closeFileHandle(fileHandle windows.Handle, path string) { err := windows.CloseHandle(fileHandle) @@ -147,7 +130,7 @@ func closeFileHandle(fileHandle windows.Handle, path string) { // The Windows API requires setting of all the Extended Attributes in one call. func restoreExtendedAttributes(nodeType, path string, eas []fs.ExtendedAttribute) (err error) { var fileHandle windows.Handle - if fileHandle, err = getFileHandleForEA(nodeType, path); fileHandle == 0 { + if fileHandle, err = fs.OpenHandleForEA(nodeType, path); fileHandle == 0 { return nil } if err != nil { From c01b65522917cff4ebd3d51528732d8f3815a853 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 14 Jun 2024 22:36:07 +0200 Subject: [PATCH 391/893] backup: fix reading xattrs on Windows for root disks as regular user Unprivileged users cannot open the root disks with write permissions for xattrs. Thus, only request read permissions for reading the xattrs. --- internal/fs/file_windows.go | 13 ++++++++----- internal/restic/node_windows.go | 4 ++-- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/internal/fs/file_windows.go b/internal/fs/file_windows.go index cadd69284d1..b05068c426b 100644 --- a/internal/fs/file_windows.go +++ b/internal/fs/file_windows.go @@ -105,17 +105,20 @@ func ClearAttribute(path string, attribute uint32) error { } // OpenHandleForEA return a file handle for file or dir for setting/getting EAs -func OpenHandleForEA(nodeType, path string) (handle windows.Handle, err error) { +func OpenHandleForEA(nodeType, path string, writeAccess bool) (handle windows.Handle, err error) { path = fixpath(path) + fileAccess := windows.FILE_READ_EA + if writeAccess { + fileAccess = fileAccess | windows.FILE_WRITE_EA + } + switch nodeType { case "file": utf16Path := windows.StringToUTF16Ptr(path) - fileAccessRightReadWriteEA := (0x8 | 0x10) - handle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) + handle, err = windows.CreateFile(utf16Path, uint32(fileAccess), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) case "dir": utf16Path := windows.StringToUTF16Ptr(path) - fileAccessRightReadWriteEA := (0x8 | 0x10) - handle, err = windows.CreateFile(utf16Path, uint32(fileAccessRightReadWriteEA), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + handle, err = windows.CreateFile(utf16Path, uint32(fileAccess), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) default: return 0, nil } diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 0825992b125..8942db9013f 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -88,7 +88,7 @@ func (node Node) restoreExtendedAttributes(path string) (err error) { // fill extended attributes in the node. This also includes the Generic attributes for windows. func (node *Node) fillExtendedAttributes(path string, _ bool) (err error) { var fileHandle windows.Handle - if fileHandle, err = fs.OpenHandleForEA(node.Type, path); fileHandle == 0 { + if fileHandle, err = fs.OpenHandleForEA(node.Type, path, false); fileHandle == 0 { return nil } if err != nil { @@ -130,7 +130,7 @@ func closeFileHandle(fileHandle windows.Handle, path string) { // The Windows API requires setting of all the Extended Attributes in one call. func restoreExtendedAttributes(nodeType, path string, eas []fs.ExtendedAttribute) (err error) { var fileHandle windows.Handle - if fileHandle, err = fs.OpenHandleForEA(nodeType, path); fileHandle == 0 { + if fileHandle, err = fs.OpenHandleForEA(nodeType, path, true); fileHandle == 0 { return nil } if err != nil { From e9d711422ac7e66c05fc3e4403e230fd72e6ccb2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 26 Jun 2024 19:52:26 +0200 Subject: [PATCH 392/893] bump azure, golang and gcs dependencies --- changelog/unreleased/issue-4873 | 6 ++ go.mod | 66 +++++++------- go.sum | 151 +++++++++++++++++--------------- 3 files changed, 121 insertions(+), 102 deletions(-) create mode 100644 changelog/unreleased/issue-4873 diff --git a/changelog/unreleased/issue-4873 b/changelog/unreleased/issue-4873 new file mode 100644 index 00000000000..22d25dff8dd --- /dev/null +++ b/changelog/unreleased/issue-4873 @@ -0,0 +1,6 @@ +Enhancement: Update dependencies + +A few potentially vulnerable dependencies were updated. + +https://github.com/restic/restic/issues/4873 +https://github.com/restic/restic/pull/4878 diff --git a/go.mod b/go.mod index 970770a90c6..c8b67ee4a07 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,9 @@ module github.com/restic/restic require ( - cloud.google.com/go/storage v1.34.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 + cloud.google.com/go/storage v1.41.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.2.0 @@ -25,58 +25,64 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.17.0 - golang.org/x/net v0.19.0 - golang.org/x/oauth2 v0.15.0 - golang.org/x/sync v0.5.0 - golang.org/x/sys v0.15.0 - golang.org/x/term v0.15.0 - golang.org/x/text v0.14.0 + golang.org/x/crypto v0.24.0 + golang.org/x/net v0.26.0 + golang.org/x/oauth2 v0.20.0 + golang.org/x/sync v0.7.0 + golang.org/x/sys v0.21.0 + golang.org/x/term v0.21.0 + golang.org/x/text v0.16.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.149.0 + google.golang.org/api v0.182.0 ) replace github.com/klauspost/compress => github.com/klauspost/compress v1.17.2 require ( - cloud.google.com/go v0.110.9 // indirect - cloud.google.com/go/compute v1.23.1 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect + cloud.google.com/go v0.114.0 // indirect + cloud.google.com/go/auth v0.4.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect + cloud.google.com/go/iam v1.1.8 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect github.com/google/s2a-go v0.1.7 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/kr/fs v0.1.0 // indirect - github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect - google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect + google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 129eafed209..dc6e931e8e0 100644 --- a/go.sum +++ b/go.sum @@ -1,25 +1,27 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.110.9 h1:e7ITSqGFFk4rbz/JFIqZh3G4VEHguhAL4BQcFlWtU68= -cloud.google.com/go v0.110.9/go.mod h1:rpxevX/0Lqvlbc88b7Sc1SPNdyK1riNBTUU6JXhYNpM= -cloud.google.com/go/compute v1.23.1 h1:V97tBoDaZHb6leicZ1G6DLK2BAaZLJ/7+9BB/En3hR0= -cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.3 h1:18tKG7DzydKWUnLjonWcJO6wjSCAtzh4GcRKlH/Hrzc= -cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= -cloud.google.com/go/storage v1.34.0 h1:9KHBBTbaHPsNxO043SFmH3pMojjZiW+BFl9H41L7xjk= -cloud.google.com/go/storage v1.34.0/go.mod h1:Eji+S0CCQebjsiXxyIvPItC3BN3zWsdJjWfHfoLblgY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= +cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= +cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg= +cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= +cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= +cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s= github.com/Backblaze/blazer v0.6.1/go.mod h1:7/jrGx4O6OKOto6av+hLwelPR8rwZ+PLxQ5ZOiYAjwY= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -39,11 +41,9 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= @@ -56,10 +56,17 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -74,33 +81,30 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= @@ -115,9 +119,8 @@ github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/4 github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= @@ -133,8 +136,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk= github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= @@ -150,6 +153,7 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/restic/chunker v0.4.0 h1:YUPYCUn70MYP7VO4yllypp2SjmsRhRJaad3xKu1QFRw= github.com/restic/chunker v0.4.0/go.mod h1:z0cH2BejpW636LXw0R/BGyv+Ey8+m9QGiOanDHItzyw= github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -169,13 +173,24 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -183,8 +198,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -202,18 +217,18 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -221,7 +236,6 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -229,20 +243,19 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -257,29 +270,26 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= -google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= +google.golang.org/api v0.182.0 h1:if5fPvudRQ78GeRx3RayIoiuV7modtErPIZC/T2bIvE= +google.golang.org/api v0.182.0/go.mod h1:cGhjy4caqA5yXRzEhkHI8Y9mfyC2VLTlER2l08xaqtM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA= -google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= +google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 h1:W5Xj/70xIA4x60O/IFyXivR5MGqblAb8R3w26pnD6No= +google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -289,15 +299,12 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 84bc9432de4cbb91604475d49f36a7b0bdf7b1be Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 7 Jun 2024 21:00:10 +0200 Subject: [PATCH 393/893] update release verification script for latest docker --- helpers/verify-release-binaries.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/helpers/verify-release-binaries.sh b/helpers/verify-release-binaries.sh index 4e80528e2ba..5ac57027bbb 100755 --- a/helpers/verify-release-binaries.sh +++ b/helpers/verify-release-binaries.sh @@ -89,13 +89,14 @@ extract_docker() { restic_platform=$3 out=restic_${restic_version}_linux_${restic_platform}.bz2 + # requires at least docker 25.0 docker image pull --platform "linux/${docker_platform}" ${image}:${restic_version} > /dev/null docker image save ${image}:${restic_version} -o docker.tar mkdir img - tar xvf docker.tar -C img --wildcards \*/layer.tar > /dev/null + tar xvf docker.tar -C img --wildcards blobs/sha256/\* > /dev/null rm docker.tar - for i in img/*/layer.tar; do + for i in img/blobs/sha256/*; do tar -xvf "$i" -C img usr/bin/restic 2> /dev/null 1>&2 || true if [[ -f img/usr/bin/restic ]]; then if [[ -f restic-docker ]]; then From 834f08fe2db4074d2f509b82a11d9e1b3047cd8d Mon Sep 17 00:00:00 2001 From: Maik Riechert Date: Wed, 15 May 2024 16:54:28 +0000 Subject: [PATCH 394/893] Azure: add option to force use of CLI credential --- changelog/unreleased/pull-4799 | 5 +++++ doc/030_preparing_a_new_repo.rst | 16 +++++++++++++++- doc/040_backup.rst | 1 + internal/backend/azure/azure.go | 12 ++++++++++++ internal/backend/azure/config.go | 19 +++++++++++++------ 5 files changed, 46 insertions(+), 7 deletions(-) create mode 100644 changelog/unreleased/pull-4799 diff --git a/changelog/unreleased/pull-4799 b/changelog/unreleased/pull-4799 new file mode 100644 index 00000000000..0179bc51a91 --- /dev/null +++ b/changelog/unreleased/pull-4799 @@ -0,0 +1,5 @@ +Enhancement: Add option to force use of Azure CLI credential + +A new environment variable `AZURE_FORCE_CLI_CREDENTIAL=true` allows forcing the use of Azure CLI credential, ignoring other credentials like managed identity. + +https://github.com/restic/restic/pull/4799 diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 5ff26934a93..62499a1d6ff 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -548,9 +548,23 @@ For authentication export one of the following variables: # For SAS $ export AZURE_ACCOUNT_SAS= -Alternatively, if run on Azure, restic will automatically uses service accounts configured +For authentication using ``az login`` ensure the user has +the minimum permissions of the role assignment ``Storage Blob Data Contributor`` on Azure RBAC +for the storage account. + +.. code-block:: console + + $ az login + +Alternatively, if run on Azure, restic will automatically use service accounts configured via the standard environment variables or Workload / Managed Identities. +To enforce the use of the Azure CLI credential when other credentials are present, set the following environment variable: + +.. code-block:: console + + $ export AZURE_FORCE_CLI_CREDENTIAL=true + Restic will by default use Azure's global domain ``core.windows.net`` as endpoint suffix. You can specify other suffixes as follows: diff --git a/doc/040_backup.rst b/doc/040_backup.rst index c917c3c2980..621b07e2e20 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -597,6 +597,7 @@ environment variables. The following lists these environment variables: AZURE_ACCOUNT_KEY Account key for Azure AZURE_ACCOUNT_SAS Shared access signatures (SAS) for Azure AZURE_ENDPOINT_SUFFIX Endpoint suffix for Azure Storage (default: core.windows.net) + AZURE_FORCE_CLI_CREDENTIAL Force the use of Azure CLI credentials for authentication B2_ACCOUNT_ID Account ID or applicationKeyId for Backblaze B2 B2_ACCOUNT_KEY Account Key or applicationKey for Backblaze B2 diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 50be63d5a03..de58df37476 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -101,6 +101,18 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { if err != nil { return nil, errors.Wrap(err, "NewAccountSASClientFromEndpointToken") } + } else if cfg.ForceCliCredential { + debug.Log(" - using AzureCLICredential") + + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return nil, errors.Wrap(err, "NewAzureCLICredential") + } + + client, err = azContainer.NewClient(url, cred, opts) + if err != nil { + return nil, errors.Wrap(err, "NewClient") + } } else { debug.Log(" - using DefaultAzureCredential") cred, err := azidentity.NewDefaultAzureCredential(nil) diff --git a/internal/backend/azure/config.go b/internal/backend/azure/config.go index d819b35aa6b..61c413efa07 100644 --- a/internal/backend/azure/config.go +++ b/internal/backend/azure/config.go @@ -3,6 +3,7 @@ package azure import ( "os" "path" + "strconv" "strings" "github.com/restic/restic/internal/errors" @@ -13,12 +14,13 @@ import ( // Config contains all configuration necessary to connect to an azure compatible // server. type Config struct { - AccountName string - AccountSAS options.SecretString - AccountKey options.SecretString - EndpointSuffix string - Container string - Prefix string + AccountName string + AccountSAS options.SecretString + AccountKey options.SecretString + ForceCliCredential bool + EndpointSuffix string + Container string + Prefix string Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` } @@ -73,6 +75,11 @@ func (cfg *Config) ApplyEnvironment(prefix string) { cfg.AccountSAS = options.NewSecretString(os.Getenv(prefix + "AZURE_ACCOUNT_SAS")) } + var forceCliCred, err = strconv.ParseBool(os.Getenv(prefix + "AZURE_FORCE_CLI_CREDENTIAL")) + if err == nil { + cfg.ForceCliCredential = forceCliCred + } + if cfg.EndpointSuffix == "" { cfg.EndpointSuffix = os.Getenv(prefix + "AZURE_ENDPOINT_SUFFIX") } From 12e858b7af2149e680d92ceac4aece9c38079b12 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 18 May 2024 22:15:38 +0200 Subject: [PATCH 395/893] azure: deduplicate cli and default credentials case --- internal/backend/azure/azure.go | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index de58df37476..4ccfb966480 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -101,23 +101,21 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { if err != nil { return nil, errors.Wrap(err, "NewAccountSASClientFromEndpointToken") } - } else if cfg.ForceCliCredential { - debug.Log(" - using AzureCLICredential") - - cred, err := azidentity.NewAzureCLICredential(nil) - if err != nil { - return nil, errors.Wrap(err, "NewAzureCLICredential") - } - - client, err = azContainer.NewClient(url, cred, opts) - if err != nil { - return nil, errors.Wrap(err, "NewClient") - } } else { - debug.Log(" - using DefaultAzureCredential") - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - return nil, errors.Wrap(err, "NewDefaultAzureCredential") + var cred azcore.TokenCredential + + if cfg.ForceCliCredential { + debug.Log(" - using AzureCLICredential") + cred, err = azidentity.NewAzureCLICredential(nil) + if err != nil { + return nil, errors.Wrap(err, "NewAzureCLICredential") + } + } else { + debug.Log(" - using DefaultAzureCredential") + cred, err = azidentity.NewDefaultAzureCredential(nil) + if err != nil { + return nil, errors.Wrap(err, "NewDefaultAzureCredential") + } } client, err = azContainer.NewClient(url, cred, opts) From ce0cbc7a36dfcec9532c6e58537261b173d56747 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 12:03:09 +0200 Subject: [PATCH 396/893] check: fix cachedir creation when using default location --- changelog/unreleased/issue-4437 | 1 + cmd/restic/cmd_check.go | 14 ++++++++------ cmd/restic/cmd_check_test.go | 15 +++++++++++++++ 3 files changed, 24 insertions(+), 6 deletions(-) diff --git a/changelog/unreleased/issue-4437 b/changelog/unreleased/issue-4437 index 199c04cbf19..ee0981c50e8 100644 --- a/changelog/unreleased/issue-4437 +++ b/changelog/unreleased/issue-4437 @@ -7,3 +7,4 @@ The `check` command now attempts to create the cache directory before initializ https://github.com/restic/restic/issues/4437 https://github.com/restic/restic/pull/4805 +https://github.com/restic/restic/pull/4883 diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 642a46c0006..cccb1986e1e 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -176,12 +176,14 @@ func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions, printer progress cachedir = cache.EnvDir() } - // use a cache in a temporary directory - err := os.MkdirAll(cachedir, 0755) - if err != nil { - Warnf("unable to create cache directory %s, disabling cache: %v\n", cachedir, err) - gopts.NoCache = true - return cleanup + if cachedir != "" { + // use a cache in a temporary directory + err := os.MkdirAll(cachedir, 0755) + if err != nil { + Warnf("unable to create cache directory %s, disabling cache: %v\n", cachedir, err) + gopts.NoCache = true + return cleanup + } } tempdir, err := os.MkdirTemp(cachedir, "restic-check-cache-") if err != nil { diff --git a/cmd/restic/cmd_check_test.go b/cmd/restic/cmd_check_test.go index 9516ce4203f..18d607a14c3 100644 --- a/cmd/restic/cmd_check_test.go +++ b/cmd/restic/cmd_check_test.go @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/progress" @@ -229,3 +230,17 @@ func TestPrepareCheckCache(t *testing.T) { }) } } + +func TestPrepareDefaultCheckCache(t *testing.T) { + gopts := GlobalOptions{CacheDir: ""} + cleanup := prepareCheckCache(CheckOptions{}, &gopts, &progress.NoopPrinter{}) + _, err := os.ReadDir(gopts.CacheDir) + rtest.OK(t, err) + + // Call the cleanup function to remove the temporary cache directory + cleanup() + + // Verify that the cache directory has been removed + _, err = os.ReadDir(gopts.CacheDir) + rtest.Assert(t, errors.Is(err, os.ErrNotExist), "Expected cache directory to be removed, but it still exists") +} From ed904dfac0d361a2f87e62cd2b94fd78b6b65ecb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 20:32:36 +0200 Subject: [PATCH 397/893] clarify that all paths specified using --path option must match --- cmd/restic/find.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/restic/find.go b/cmd/restic/find.go index c7754d5d9bf..faf7024e108 100644 --- a/cmd/restic/find.go +++ b/cmd/restic/find.go @@ -17,7 +17,7 @@ func initMultiSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter, } flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times) (default: $RESTIC_HOST)") flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)") - flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)") + flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times, snapshots must include all specified paths)") // set default based on env if set if host := os.Getenv("RESTIC_HOST"); host != "" { @@ -30,7 +30,7 @@ func initMultiSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter, func initSingleSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter) { flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times) (default: $RESTIC_HOST)") flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)") - flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)") + flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times, snapshots must include all specified paths)") // set default based on env if set if host := os.Getenv("RESTIC_HOST"); host != "" { From aa33af3e758b29ee1918963ec7c51a827b88fc5f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 20:33:50 +0200 Subject: [PATCH 398/893] update command help docs --- doc/manual_rest.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst index 9d9d6e14103..a7a0f96e0c3 100644 --- a/doc/manual_rest.rst +++ b/doc/manual_rest.rst @@ -54,7 +54,7 @@ Usage help is available: --cleanup-cache auto remove old cache directories --compression mode compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) (default auto) -h, --help help for restic - --http-user-agent value set a custom user agent for outgoing http requests + --http-user-agent string set a http user agent for outgoing http requests --insecure-no-password use an empty password for the repository, must be passed to every restic command (insecure) --insecure-tls skip TLS certificate verification when connecting to the repository (insecure) --json set output mode to JSON for commands that support it @@ -122,6 +122,7 @@ command: -x, --one-file-system exclude other file systems, don't cross filesystem boundaries and subvolumes --parent snapshot use this parent snapshot (default: latest snapshot in the group determined by --group-by and not newer than the timestamp determined by --time) --read-concurrency n read n files concurrently (default: $RESTIC_READ_CONCURRENCY or 2) + --skip-if-unchanged skip snapshot creation if identical to parent snapshot --stdin read backup from stdin --stdin-filename filename filename to use when reading from stdin (default "stdin") --stdin-from-command interpret arguments as command to execute and store its stdout @@ -135,7 +136,7 @@ command: --cache-dir directory set the cache directory. (default: use system default cache directory) --cleanup-cache auto remove old cache directories --compression mode compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) (default auto) - --http-user-agent value set a custom user agent for outgoing http requests + --http-user-agent string set a http user agent for outgoing http requests --insecure-no-password use an empty password for the repository, must be passed to every restic command (insecure) --insecure-tls skip TLS certificate verification when connecting to the repository (insecure) --json set output mode to JSON for commands that support it From bda7aba97d2324be3ea65e0b8ca01568dccf9e2e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 01:15:18 +0000 Subject: [PATCH 399/893] build(deps): bump docker/build-push-action from 5 to 6 Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/docker.yml | 2 +- .github/workflows/tests.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 771bf9f9a2a..a943d1b15ed 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -55,7 +55,7 @@ jobs: if: github.ref != 'refs/heads/master' - name: Build and push Docker image - uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 + uses: docker/build-push-action@15560696de535e4014efeff63c48f16952e52dd1 with: push: true context: . diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 796a614b18e..3ca7a9edbce 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -325,7 +325,7 @@ jobs: - name: Build and push id: docker_build - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: push: false context: . From 61713898038fdc09bb745100b13d10ac5a58bd23 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 01:40:44 +0000 Subject: [PATCH 400/893] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore Bumps [github.com/Azure/azure-sdk-for-go/sdk/azcore](https://github.com/Azure/azure-sdk-for-go) from 1.11.1 to 1.12.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.11.1...sdk/azcore/v1.12.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azcore dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 2047d4657bb..098fe0613a1 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.41.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 github.com/Backblaze/blazer v0.6.1 @@ -43,7 +43,7 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/iam v1.1.8 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect diff --git a/go.sum b/go.sum index e229a0bdc1e..bd18fa18e4a 100644 --- a/go.sum +++ b/go.sum @@ -11,12 +11,12 @@ cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 h1:1nGuui+4POelzDwI7RG56yfQJHCnKvwfMoU7VsEp+Zg= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0/go.mod h1:99EvauvlcJ1U06amZiksfYz/3aFGyIhWGHVyiZXtBAI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 h1:H+U3Gk9zY56G3u872L82bk4thcsy2Gghb9ExT4Zvm1o= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0/go.mod h1:mgrmMSgaLp9hmax62XQTd0N4aAqSE5E0DulSpVYK7vc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E= From 3de98f11f07d784aeb8f8f9ca1ff1724641e8c75 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 01:40:50 +0000 Subject: [PATCH 401/893] build(deps): bump github.com/spf13/cobra from 1.8.0 to 1.8.1 Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.8.0 to 1.8.1. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.8.0...v1.8.1) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 2047d4657bb..cc6528983ff 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/pkg/sftp v1.13.6 github.com/pkg/xattr v0.4.10-0.20221120235825-35026bbbd013 github.com/restic/chunker v0.4.0 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 golang.org/x/crypto v0.24.0 @@ -45,7 +45,7 @@ require ( cloud.google.com/go/iam v1.1.8 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect diff --git a/go.sum b/go.sum index e229a0bdc1e..619e701763b 100644 --- a/go.sum +++ b/go.sum @@ -45,8 +45,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -185,8 +185,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc= From 59b5eacd0f61a6c665029539337daf70cc6dca84 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 01:40:59 +0000 Subject: [PATCH 402/893] build(deps): bump github.com/klauspost/compress from 1.17.8 to 1.17.9 Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.17.8 to 1.17.9. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.17.8...v1.17.9) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2047d4657bb..a06584192d4 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/go-ole/go-ole v1.3.0 github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/klauspost/compress v1.17.8 + github.com/klauspost/compress v1.17.9 github.com/minio/minio-go/v7 v7.0.66 github.com/minio/sha256-simd v1.0.1 github.com/ncw/swift/v2 v2.0.2 diff --git a/go.sum b/go.sum index e229a0bdc1e..18310efd521 100644 --- a/go.sum +++ b/go.sum @@ -121,8 +121,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= From e939c9fd6cecdc400a4e390657fc2f27ebd1f179 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 01:41:05 +0000 Subject: [PATCH 403/893] build(deps): bump golang.org/x/oauth2 from 0.20.0 to 0.21.0 Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.20.0 to 0.21.0. - [Commits](https://github.com/golang/oauth2/compare/v0.20.0...v0.21.0) --- updated-dependencies: - dependency-name: golang.org/x/oauth2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2047d4657bb..02cebccfb74 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( go.uber.org/automaxprocs v1.5.3 golang.org/x/crypto v0.24.0 golang.org/x/net v0.26.0 - golang.org/x/oauth2 v0.20.0 + golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.7.0 golang.org/x/sys v0.21.0 golang.org/x/term v0.21.0 diff --git a/go.sum b/go.sum index e229a0bdc1e..10ebc431c76 100644 --- a/go.sum +++ b/go.sum @@ -250,8 +250,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 2fa1b42706646c81687aa47f6585b1e6dd2419f5 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Jul 2024 21:25:33 +0200 Subject: [PATCH 404/893] Prepare changelog for 0.16.5 --- changelog/{unreleased => 0.16.5_2024-07-01}/issue-4873 | 0 changelog/{unreleased => 0.16.5_2024-07-01}/pull-4799 | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename changelog/{unreleased => 0.16.5_2024-07-01}/issue-4873 (100%) rename changelog/{unreleased => 0.16.5_2024-07-01}/pull-4799 (100%) diff --git a/changelog/unreleased/issue-4873 b/changelog/0.16.5_2024-07-01/issue-4873 similarity index 100% rename from changelog/unreleased/issue-4873 rename to changelog/0.16.5_2024-07-01/issue-4873 diff --git a/changelog/unreleased/pull-4799 b/changelog/0.16.5_2024-07-01/pull-4799 similarity index 100% rename from changelog/unreleased/pull-4799 rename to changelog/0.16.5_2024-07-01/pull-4799 From 6ae760751ab2685bbc33bde5bb72949899037fd3 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Jul 2024 21:25:33 +0200 Subject: [PATCH 405/893] Generate CHANGELOG.md for 0.16.5 --- CHANGELOG.md | 3468 ++++++++++++++++++++++++-------------------------- 1 file changed, 1633 insertions(+), 1835 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b8969a4438c..b7ab571588c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ # Table of Contents +* [Changelog for 0.16.5](#changelog-for-restic-0165-2024-07-01) * [Changelog for 0.16.4](#changelog-for-restic-0164-2024-02-04) * [Changelog for 0.16.3](#changelog-for-restic-0163-2024-01-14) * [Changelog for 0.16.2](#changelog-for-restic-0162-2023-10-29) @@ -33,6 +34,32 @@ * [Changelog for 0.6.0](#changelog-for-restic-060-2017-05-29) +# Changelog for restic 0.16.5 (2024-07-01) +The following sections list the changes in restic 0.16.5 relevant to +restic users. The changes are ordered by importance. + +## Summary + + * Enh #4799: Add option to force use of Azure CLI credential + * Enh #4873: Update dependencies + +## Details + + * Enhancement #4799: Add option to force use of Azure CLI credential + + A new environment variable `AZURE_FORCE_CLI_CREDENTIAL=true` allows forcing the use of + Azure CLI credential, ignoring other credentials like managed identity. + + https://github.com/restic/restic/pull/4799 + + * Enhancement #4873: Update dependencies + + A few potentially vulnerable dependencies were updated. + + https://github.com/restic/restic/issues/4873 + https://github.com/restic/restic/pull/4878 + + # Changelog for restic 0.16.4 (2024-02-04) The following sections list the changes in restic 0.16.4 relevant to restic users. The changes are ordered by importance. @@ -46,39 +73,35 @@ restic users. The changes are ordered by importance. * Bugfix #4677: Downgrade zstd library to fix rare data corruption at max. compression - In restic 0.16.3, backups where the compression level was set to `max` (using - `--compression max`) could in rare and very specific circumstances result in - data corruption due to a bug in the library used for compressing data. Restic - 0.16.1 and 0.16.2 were not affected. + In restic 0.16.3, backups where the compression level was set to `max` (using `--compression + max`) could in rare and very specific circumstances result in data corruption due to a bug in the + library used for compressing data. Restic 0.16.1 and 0.16.2 were not affected. - Restic now uses the previous version of the library used to compress data, the - same version used by restic 0.16.2. Please note that the `auto` compression - level (which restic uses by default) was never affected, and even if you used - `max` compression, chances of being affected by this issue are small. + Restic now uses the previous version of the library used to compress data, the same version used + by restic 0.16.2. Please note that the `auto` compression level (which restic uses by default) + was never affected, and even if you used `max` compression, chances of being affected by this + issue are small. - To check a repository for any corruption, run `restic check --read-data`. This - will download and verify the whole repository and can be used at any time to - completely verify the integrity of a repository. If the `check` command detects - anomalies, follow the suggested steps. + To check a repository for any corruption, run `restic check --read-data`. This will download + and verify the whole repository and can be used at any time to completely verify the integrity of + a repository. If the `check` command detects anomalies, follow the suggested steps. https://github.com/restic/restic/issues/4677 https://github.com/restic/restic/pull/4679 * Enhancement #4529: Add extra verification of data integrity before upload - Hardware issues, or a bug in restic or its dependencies, could previously cause - corruption in the files restic created and stored in the repository. Detecting - such corruption previously required explicitly running the `check --read-data` - or `check --read-data-subset` commands. + Hardware issues, or a bug in restic or its dependencies, could previously cause corruption in + the files restic created and stored in the repository. Detecting such corruption previously + required explicitly running the `check --read-data` or `check --read-data-subset` + commands. - To further ensure data integrity, even in the case of hardware issues or - software bugs, restic now performs additional verification of the files about to - be uploaded to the repository. + To further ensure data integrity, even in the case of hardware issues or software bugs, restic + now performs additional verification of the files about to be uploaded to the repository. - These extra checks will increase CPU usage during backups. They can therefore, - if absolutely necessary, be disabled using the `--no-extra-verify` global - option. Please note that this should be combined with more active checking using - the previously mentioned check commands. + These extra checks will increase CPU usage during backups. They can therefore, if absolutely + necessary, be disabled using the `--no-extra-verify` global option. Please note that this + should be combined with more active checking using the previously mentioned check commands. https://github.com/restic/restic/issues/4529 https://github.com/restic/restic/pull/4681 @@ -100,14 +123,13 @@ restic users. The changes are ordered by importance. * Bugfix #4560: Improve errors for irregular files on Windows - Since Go 1.21, most filesystem reparse points on Windows are considered to be - irregular files. This caused restic to show an `error: invalid node type ""` - error message for those files. + Since Go 1.21, most filesystem reparse points on Windows are considered to be irregular files. + This caused restic to show an `error: invalid node type ""` error message for those files. - This error message has now been improved and includes the relevant file path: - `error: nodeFromFileInfo path/to/file: unsupported file type "irregular"`. As - irregular files are not required to behave like regular files, it is not - possible to provide a generic way to back up those files. + This error message has now been improved and includes the relevant file path: `error: + nodeFromFileInfo path/to/file: unsupported file type "irregular"`. As irregular files are + not required to behave like regular files, it is not possible to provide a generic way to back up + those files. https://github.com/restic/restic/issues/4560 https://github.com/restic/restic/pull/4620 @@ -115,10 +137,9 @@ restic users. The changes are ordered by importance. * Bugfix #4574: Support backup of deduplicated files on Windows again - With the official release builds of restic 0.16.1 and 0.16.2, it was not - possible to back up files that were deduplicated by the corresponding Windows - Server feature. This also applied to restic versions built using Go - 1.21.0-1.21.4. + With the official release builds of restic 0.16.1 and 0.16.2, it was not possible to back up + files that were deduplicated by the corresponding Windows Server feature. This also applied + to restic versions built using Go 1.21.0-1.21.4. The Go version used to build restic has now been updated to fix this. @@ -127,10 +148,10 @@ restic users. The changes are ordered by importance. * Bugfix #4612: Improve error handling for `rclone` backend - Since restic 0.16.0, if rclone encountered an error while listing files, this - could in rare circumstances cause restic to assume that there are no files. - Although unlikely, this situation could result in data loss if it were to happen - right when the `prune` command is listing existing snapshots. + Since restic 0.16.0, if rclone encountered an error while listing files, this could in rare + circumstances cause restic to assume that there are no files. Although unlikely, this + situation could result in data loss if it were to happen right when the `prune` command is + listing existing snapshots. Error handling has now been improved to detect and work around this case. @@ -139,10 +160,9 @@ restic users. The changes are ordered by importance. * Bugfix #4624: Correct `restore` progress information if an error occurs - If an error occurred while restoring a snapshot, this could cause the `restore` - progress bar to show incorrect information. In addition, if a data file could - not be loaded completely, then errors would also be reported for some already - restored files. + If an error occurred while restoring a snapshot, this could cause the `restore` progress bar to + show incorrect information. In addition, if a data file could not be loaded completely, then + errors would also be reported for some already restored files. Error reporting of the `restore` command has now been made more accurate. @@ -151,12 +171,11 @@ restic users. The changes are ordered by importance. * Bugfix #4626: Improve reliability of restoring large files - In some cases restic failed to restore large files that frequently contain the - same file chunk. In combination with certain backends, this could result in - network connection timeouts that caused incomplete restores. + In some cases restic failed to restore large files that frequently contain the same file chunk. + In combination with certain backends, this could result in network connection timeouts that + caused incomplete restores. - Restic now includes special handling for such file chunks to ensure reliable - restores. + Restic now includes special handling for such file chunks to ensure reliable restores. https://github.com/restic/restic/pull/4626 https://forum.restic.net/t/errors-restoring-with-restic-on-windows-server-s3/6943 @@ -175,18 +194,16 @@ restic users. The changes are ordered by importance. * Bugfix #4540: Restore ARMv5 support for ARM binaries - The official release binaries for restic 0.16.1 were accidentally built to - require ARMv7. The build process is now updated to restore support for ARMv5. + The official release binaries for restic 0.16.1 were accidentally built to require ARMv7. The + build process is now updated to restore support for ARMv5. - Please note that restic 0.17.0 will drop support for ARMv5 and require at least - ARMv6. + Please note that restic 0.17.0 will drop support for ARMv5 and require at least ARMv6. https://github.com/restic/restic/issues/4540 * Bugfix #4545: Repair documentation build on Read the Docs - For restic 0.16.1, no documentation was available at - https://restic.readthedocs.io/ . + For restic 0.16.1, no documentation was available at https://restic.readthedocs.io/ . The documentation build process is now updated to work again. @@ -213,67 +230,65 @@ restic users. The changes are ordered by importance. * Bugfix #4513: Make `key list` command honor `--no-lock` - The `key list` command now supports the `--no-lock` options. This allows - determining which keys a repo can be accessed by without the need for having - write access (e.g., read-only sftp access, filesystem snapshot). + The `key list` command now supports the `--no-lock` options. This allows determining which + keys a repo can be accessed by without the need for having write access (e.g., read-only sftp + access, filesystem snapshot). https://github.com/restic/restic/issues/4513 https://github.com/restic/restic/pull/4514 * Bugfix #4516: Do not try to load password on command line autocomplete - The command line autocompletion previously tried to load the repository - password. This could cause the autocompletion not to work. Now, this step gets - skipped. + The command line autocompletion previously tried to load the repository password. This could + cause the autocompletion not to work. Now, this step gets skipped. https://github.com/restic/restic/issues/4516 https://github.com/restic/restic/pull/4526 * Bugfix #4523: Update zstd library to fix possible data corruption at max. compression - In restic 0.16.0, backups where the compression level was set to `max` (using - `--compression max`) could in rare and very specific circumstances result in - data corruption due to a bug in the library used for compressing data. + In restic 0.16.0, backups where the compression level was set to `max` (using `--compression + max`) could in rare and very specific circumstances result in data corruption due to a bug in the + library used for compressing data. - Restic now uses the latest version of the library used to compress data, which - includes a fix for this issue. Please note that the `auto` compression level - (which restic uses by default) was never affected, and even if you used `max` - compression, chances of being affected by this issue were very small. + Restic now uses the latest version of the library used to compress data, which includes a fix for + this issue. Please note that the `auto` compression level (which restic uses by default) was + never affected, and even if you used `max` compression, chances of being affected by this issue + were very small. - To check a repository for any corruption, run `restic check --read-data`. This - will download and verify the whole repository and can be used at any time to - completely verify the integrity of a repository. If the `check` command detects - anomalies, follow the suggested steps. + To check a repository for any corruption, run `restic check --read-data`. This will download + and verify the whole repository and can be used at any time to completely verify the integrity of + a repository. If the `check` command detects anomalies, follow the suggested steps. - To simplify any needed repository repair and minimize data loss, there is also a - new and experimental `repair packs` command that salvages all valid data from - the affected pack files (see `restic help repair packs` for more information). + To simplify any needed repository repair and minimize data loss, there is also a new and + experimental `repair packs` command that salvages all valid data from the affected pack files + (see `restic help repair packs` for more information). https://github.com/restic/restic/issues/4523 https://github.com/restic/restic/pull/4530 * Change #4532: Update dependencies and require Go 1.19 or newer - We have updated all dependencies. Since some libraries require newer Go standard - library features, support for Go 1.18 has been dropped, which means that restic - now requires at least Go 1.19 to build. + We have updated all dependencies. Since some libraries require newer Go standard library + features, support for Go 1.18 has been dropped, which means that restic now requires at least Go + 1.19 to build. https://github.com/restic/restic/pull/4532 https://github.com/restic/restic/pull/4533 * Enhancement #229: Show progress bar while loading the index - Restic did not provide any feedback while loading index files. Now, there is a - progress bar that shows the index loading progress. + Restic did not provide any feedback while loading index files. Now, there is a progress bar that + shows the index loading progress. https://github.com/restic/restic/issues/229 https://github.com/restic/restic/pull/4419 * Enhancement #4128: Automatically set `GOMAXPROCS` in resource-constrained containers - When running restic in a Linux container with CPU-usage limits, restic now - automatically adjusts `GOMAXPROCS`. This helps to reduce the memory consumption - on hosts with many CPU cores. + When running restic in a Linux container with CPU-usage limits, restic now automatically + adjusts `GOMAXPROCS`. This helps to reduce the memory consumption on hosts with many CPU + cores. https://github.com/restic/restic/issues/4128 https://github.com/restic/restic/pull/4485 @@ -281,33 +296,32 @@ restic users. The changes are ordered by importance. * Enhancement #4480: Allow setting REST password and username via environment variables - Previously, it was only possible to specify the REST-server username and - password in the repository URL, or by using the `--repository-file` option. This - meant it was not possible to use authentication in contexts where the repository - URL is stored in publicly accessible way. + Previously, it was only possible to specify the REST-server username and password in the + repository URL, or by using the `--repository-file` option. This meant it was not possible to + use authentication in contexts where the repository URL is stored in publicly accessible way. - Restic now allows setting the username and password using the - `RESTIC_REST_USERNAME` and `RESTIC_REST_PASSWORD` variables. + Restic now allows setting the username and password using the `RESTIC_REST_USERNAME` and + `RESTIC_REST_PASSWORD` variables. https://github.com/restic/restic/pull/4480 * Enhancement #4511: Include inode numbers in JSON output for `find` and `ls` commands - Restic used to omit the inode numbers in the JSON messages emitted for nodes by - the `ls` command as well as for matches by the `find` command. It now includes - those values whenever they are available. + Restic used to omit the inode numbers in the JSON messages emitted for nodes by the `ls` command + as well as for matches by the `find` command. It now includes those values whenever they are + available. https://github.com/restic/restic/pull/4511 * Enhancement #4519: Add config option to set SFTP command arguments - When using the `sftp` backend, scenarios where a custom identity file was needed - for the SSH connection, required the full command to be specified: `-o - sftp.command='ssh user@host:port -i /ssh/my_private_key -s sftp'` + When using the `sftp` backend, scenarios where a custom identity file was needed for the SSH + connection, required the full command to be specified: `-o sftp.command='ssh + user@host:port -i /ssh/my_private_key -s sftp'` - Now, the `-o sftp.args=...` option can be passed to restic to specify custom - arguments for the SSH command executed by the SFTP backend. This simplifies the - above example to `-o sftp.args='-i /ssh/my_private_key'`. + Now, the `-o sftp.args=...` option can be passed to restic to specify custom arguments for the + SSH command executed by the SFTP backend. This simplifies the above example to `-o + sftp.args='-i /ssh/my_private_key'`. https://github.com/restic/restic/issues/4241 https://github.com/restic/restic/pull/4519 @@ -353,32 +367,31 @@ restic users. The changes are ordered by importance. * Bugfix #2565: Support "unlimited" in `forget --keep-*` options - Restic would previously forget snapshots that should have been kept when a - negative value was passed to the `--keep-*` options. Negative values are now - forbidden. To keep all snapshots, the special value `unlimited` is now - supported. For example, `--keep-monthly unlimited` will keep all monthly - snapshots. + Restic would previously forget snapshots that should have been kept when a negative value was + passed to the `--keep-*` options. Negative values are now forbidden. To keep all snapshots, + the special value `unlimited` is now supported. For example, `--keep-monthly unlimited` + will keep all monthly snapshots. https://github.com/restic/restic/issues/2565 https://github.com/restic/restic/pull/4234 * Bugfix #3311: Support non-UTF8 paths as symlink target - Earlier restic versions did not correctly `backup` and `restore` symlinks that - contain a non-UTF8 target. Note that this only affected systems that still use a - non-Unicode encoding for filesystem paths. + Earlier restic versions did not correctly `backup` and `restore` symlinks that contain a + non-UTF8 target. Note that this only affected systems that still use a non-Unicode encoding + for filesystem paths. - The repository format is now extended to add support for such symlinks. Please - note that snapshots must have been created with at least restic version 0.16.0 - for `restore` to correctly handle non-UTF8 symlink targets when restoring them. + The repository format is now extended to add support for such symlinks. Please note that + snapshots must have been created with at least restic version 0.16.0 for `restore` to + correctly handle non-UTF8 symlink targets when restoring them. https://github.com/restic/restic/issues/3311 https://github.com/restic/restic/pull/3802 * Bugfix #4199: Avoid lock refresh issues on slow network connections - On network connections with a low upload speed, backups and other operations - could fail with the error message `Fatal: failed to refresh lock in time`. + On network connections with a low upload speed, backups and other operations could fail with + the error message `Fatal: failed to refresh lock in time`. This has now been fixed by reworking the lock refresh handling. @@ -387,21 +400,21 @@ restic users. The changes are ordered by importance. * Bugfix #4274: Improve lock refresh handling after standby - If the restic process was stopped or the host running restic entered standby - during a long running operation such as a backup, this previously resulted in - the operation failing with `Fatal: failed to refresh lock in time`. + If the restic process was stopped or the host running restic entered standby during a long + running operation such as a backup, this previously resulted in the operation failing with + `Fatal: failed to refresh lock in time`. - This has now been fixed such that restic first checks whether it is safe to - continue the current operation and only throws an error if not. + This has now been fixed such that restic first checks whether it is safe to continue the current + operation and only throws an error if not. https://github.com/restic/restic/issues/4274 https://github.com/restic/restic/pull/4374 * Bugfix #4319: Correctly clean up status bar output of the `backup` command - Due to a regression in restic 0.15.2, the status bar of the `backup` command - could leave some output behind. This happened if filenames were printed that are - wider than the current terminal width. This has now been fixed. + Due to a regression in restic 0.15.2, the status bar of the `backup` command could leave some + output behind. This happened if filenames were printed that are wider than the current + terminal width. This has now been fixed. https://github.com/restic/restic/issues/4319 https://github.com/restic/restic/pull/4318 @@ -412,26 +425,25 @@ restic users. The changes are ordered by importance. * Bugfix #4400: Ignore missing folders in `rest` backend - If a repository accessed via the REST backend was missing folders, then restic - would fail with an error while trying to list the data in the repository. This - has been now fixed. + If a repository accessed via the REST backend was missing folders, then restic would fail with + an error while trying to list the data in the repository. This has been now fixed. https://github.com/restic/rest-server/issues/235 https://github.com/restic/restic/pull/4400 * Change #4176: Fix JSON message type of `scan_finished` for the `backup` command - Restic incorrectly set the `message_type` of the `scan_finished` message to - `status` instead of `verbose_status`. This has now been corrected so that the - messages report the correct type. + Restic incorrectly set the `message_type` of the `scan_finished` message to `status` + instead of `verbose_status`. This has now been corrected so that the messages report the + correct type. https://github.com/restic/restic/pull/4176 * Change #4201: Require Go 1.20 for Solaris builds - Building restic on Solaris now requires Go 1.20, as the library used to access - Azure uses the mmap syscall, which is only available on Solaris starting from Go - 1.20. All other platforms however continue to build with Go 1.18. + Building restic on Solaris now requires Go 1.20, as the library used to access Azure uses the + mmap syscall, which is only available on Solaris starting from Go 1.20. All other platforms + however continue to build with Go 1.18. https://github.com/restic/restic/pull/4201 @@ -452,8 +464,8 @@ restic users. The changes are ordered by importance. * Enhancement #719: Add `--retry-lock` option - This option allows specifying a duration for which restic will wait if the - repository is already locked. + This option allows specifying a duration for which restic will wait if the repository is + already locked. https://github.com/restic/restic/issues/719 https://github.com/restic/restic/pull/2214 @@ -461,25 +473,24 @@ restic users. The changes are ordered by importance. * Enhancement #1495: Sort snapshots by timestamp in `restic find` - The `find` command used to print snapshots in an arbitrary order. Restic now - prints snapshots sorted by timestamp. + The `find` command used to print snapshots in an arbitrary order. Restic now prints snapshots + sorted by timestamp. https://github.com/restic/restic/issues/1495 https://github.com/restic/restic/pull/4409 * Enhancement #1759: Add `repair index` and `repair snapshots` commands - The `rebuild-index` command has been renamed to `repair index`. The old name - will still work, but is deprecated. + The `rebuild-index` command has been renamed to `repair index`. The old name will still work, + but is deprecated. - When a snapshot was damaged, the only option up to now was to completely forget - the snapshot, even if only some unimportant files in it were damaged and other - files were still fine. + When a snapshot was damaged, the only option up to now was to completely forget the snapshot, + even if only some unimportant files in it were damaged and other files were still fine. - Restic now has a `repair snapshots` command, which can salvage any non-damaged - files and parts of files in the snapshots by removing damaged directories and - missing file contents. Please note that the damaged data may still be lost and - see the "Troubleshooting" section in the documentation for more details. + Restic now has a `repair snapshots` command, which can salvage any non-damaged files and parts + of files in the snapshots by removing damaged directories and missing file contents. Please + note that the damaged data may still be lost and see the "Troubleshooting" section in the + documentation for more details. https://github.com/restic/restic/issues/1759 https://github.com/restic/restic/issues/1714 @@ -491,20 +502,19 @@ restic users. The changes are ordered by importance. * Enhancement #1926: Allow certificate paths to be passed through environment variables - Restic will now read paths to certificates from the environment variables - `RESTIC_CACERT` or `RESTIC_TLS_CLIENT_CERT` if `--cacert` or `--tls-client-cert` - are not specified. + Restic will now read paths to certificates from the environment variables `RESTIC_CACERT` or + `RESTIC_TLS_CLIENT_CERT` if `--cacert` or `--tls-client-cert` are not specified. https://github.com/restic/restic/issues/1926 https://github.com/restic/restic/pull/4384 * Enhancement #2359: Provide multi-platform Docker images - The official Docker images are now built for the architectures linux/386, - linux/amd64, linux/arm and linux/arm64. + The official Docker images are now built for the architectures linux/386, linux/amd64, + linux/arm and linux/arm64. - As an alternative to the Docker Hub, the Docker images are also available on - ghcr.io, the GitHub Container Registry. + As an alternative to the Docker Hub, the Docker images are also available on ghcr.io, the GitHub + Container Registry. https://github.com/restic/restic/issues/2359 https://github.com/restic/restic/issues/4269 @@ -514,26 +524,25 @@ restic users. The changes are ordered by importance. The `azure` backend previously only supported storages using the global domain `core.windows.net`. This meant that backups to other domains such as Azure China - (`core.chinacloudapi.cn`) or Azure Germany (`core.cloudapi.de`) were not - supported. Restic now allows overriding the global domain using the environment - variable `AZURE_ENDPOINT_SUFFIX`. + (`core.chinacloudapi.cn`) or Azure Germany (`core.cloudapi.de`) were not supported. + Restic now allows overriding the global domain using the environment variable + `AZURE_ENDPOINT_SUFFIX`. https://github.com/restic/restic/issues/2468 https://github.com/restic/restic/pull/4387 * Enhancement #2679: Reduce file fragmentation for local backend - Before this change, local backend files could become fragmented. Now restic will - try to preallocate space for pack files to avoid their fragmentation. + Before this change, local backend files could become fragmented. Now restic will try to + preallocate space for pack files to avoid their fragmentation. https://github.com/restic/restic/issues/2679 https://github.com/restic/restic/pull/3261 * Enhancement #3328: Reduce memory usage by up to 25% - The in-memory index has been optimized to be more garbage collection friendly. - Restic now defaults to `GOGC=50` to run the Go garbage collector more - frequently. + The in-memory index has been optimized to be more garbage collection friendly. Restic now + defaults to `GOGC=50` to run the Go garbage collector more frequently. https://github.com/restic/restic/issues/3328 https://github.com/restic/restic/pull/4352 @@ -541,21 +550,21 @@ restic users. The changes are ordered by importance. * Enhancement #3397: Improve accuracy of ETA displayed during backup - Restic's `backup` command displayed an ETA that did not adapt when the rate of - progress made during the backup changed during the course of the backup. + Restic's `backup` command displayed an ETA that did not adapt when the rate of progress made + during the backup changed during the course of the backup. - Restic now uses recent progress when computing the ETA. It is important to - realize that the estimate may still be wrong, because restic cannot predict the - future, but the hope is that the ETA will be more accurate in most cases. + Restic now uses recent progress when computing the ETA. It is important to realize that the + estimate may still be wrong, because restic cannot predict the future, but the hope is that the + ETA will be more accurate in most cases. https://github.com/restic/restic/issues/3397 https://github.com/restic/restic/pull/3563 * Enhancement #3624: Keep oldest snapshot when there are not enough snapshots - The `forget` command now additionally preserves the oldest snapshot if fewer - snapshots than allowed by the `--keep-*` parameters would otherwise be kept. - This maximizes the amount of history kept within the specified limits. + The `forget` command now additionally preserves the oldest snapshot if fewer snapshots than + allowed by the `--keep-*` parameters would otherwise be kept. This maximizes the amount of + history kept within the specified limits. https://github.com/restic/restic/issues/3624 https://github.com/restic/restic/pull/4366 @@ -563,106 +572,99 @@ restic users. The changes are ordered by importance. * Enhancement #3698: Add support for Managed / Workload Identity to `azure` backend - Restic now additionally supports authenticating to Azure using Workload Identity - or Managed Identity credentials, which are automatically injected in several - environments such as a managed Kubernetes cluster. + Restic now additionally supports authenticating to Azure using Workload Identity or Managed + Identity credentials, which are automatically injected in several environments such as a + managed Kubernetes cluster. https://github.com/restic/restic/issues/3698 https://github.com/restic/restic/pull/4029 * Enhancement #3871: Support `:` syntax to select subfolders - Commands like `diff` or `restore` always worked with the full snapshot. This did - not allow comparing only a specific subfolder or only restoring that folder - (`restore --include subfolder` filters the restored files, but still creates the - directories included in `subfolder`). + Commands like `diff` or `restore` always worked with the full snapshot. This did not allow + comparing only a specific subfolder or only restoring that folder (`restore --include + subfolder` filters the restored files, but still creates the directories included in + `subfolder`). - The commands `diff`, `dump`, `ls` and `restore` now support the - `:` syntax, where `snapshot` is the ID of a snapshot (or - the string `latest`) and `subfolder` is a path within the snapshot. The commands - will then only work with the specified path of the snapshot. The `subfolder` - must be a path to a folder as returned by `ls`. Two examples: + The commands `diff`, `dump`, `ls` and `restore` now support the `:` + syntax, where `snapshot` is the ID of a snapshot (or the string `latest`) and `subfolder` is a + path within the snapshot. The commands will then only work with the specified path of the + snapshot. The `subfolder` must be a path to a folder as returned by `ls`. Two examples: `restic restore -t target latest:/some/path` `restic diff 12345678:/some/path 90abcef:/some/path` - For debugging purposes, the `cat` command now supports `cat tree - :` to return the directory metadata for the given - subfolder. + For debugging purposes, the `cat` command now supports `cat tree :` to + return the directory metadata for the given subfolder. https://github.com/restic/restic/issues/3871 https://github.com/restic/restic/pull/4334 * Enhancement #3941: Support `--group-by` for backup parent selection - Previously, the `backup` command by default selected the parent snapshot based - on the hostname and the backup targets. When the backup path list changed, the - `backup` command was unable to determine a suitable parent snapshot and had to - read all files again. + Previously, the `backup` command by default selected the parent snapshot based on the + hostname and the backup targets. When the backup path list changed, the `backup` command was + unable to determine a suitable parent snapshot and had to read all files again. - The new `--group-by` option for the `backup` command allows filtering snapshots - for the parent selection by `host`, `paths` and `tags`. It defaults to - `host,paths` which selects the latest snapshot with hostname and paths matching - those of the backup run. This matches the behavior of prior restic versions. + The new `--group-by` option for the `backup` command allows filtering snapshots for the + parent selection by `host`, `paths` and `tags`. It defaults to `host,paths` which selects the + latest snapshot with hostname and paths matching those of the backup run. This matches the + behavior of prior restic versions. - The new `--group-by` option should be set to the same value as passed to `forget - --group-by`. + The new `--group-by` option should be set to the same value as passed to `forget --group-by`. https://github.com/restic/restic/issues/3941 https://github.com/restic/restic/pull/4081 * Enhancement #4130: Cancel current command if cache becomes unusable - If the cache directory was removed or ran out of space while restic was running, - this would previously cause further caching attempts to fail and thereby - drastically slow down the command execution. Now, the currently running command - is instead canceled. + If the cache directory was removed or ran out of space while restic was running, this would + previously cause further caching attempts to fail and thereby drastically slow down the + command execution. Now, the currently running command is instead canceled. https://github.com/restic/restic/issues/4130 https://github.com/restic/restic/pull/4166 * Enhancement #4159: Add `--human-readable` option to `ls` and `find` commands - Previously, when using the `-l` option with the `ls` and `find` commands, the - displayed size was always in bytes, without an option for a more human readable - format such as MiB or GiB. + Previously, when using the `-l` option with the `ls` and `find` commands, the displayed size + was always in bytes, without an option for a more human readable format such as MiB or GiB. - The new `--human-readable` option will convert longer size values into more - human friendly values with an appropriate suffix depending on the output size. - For example, a size of `14680064` will be shown as `14.000 MiB`. + The new `--human-readable` option will convert longer size values into more human friendly + values with an appropriate suffix depending on the output size. For example, a size of + `14680064` will be shown as `14.000 MiB`. https://github.com/restic/restic/issues/4159 https://github.com/restic/restic/pull/4351 * Enhancement #4188: Include restic version in snapshot metadata - The restic version used to backup a snapshot is now included in its metadata and - shown when inspecting a snapshot using `restic cat snapshot ` or - `restic snapshots --json`. + The restic version used to backup a snapshot is now included in its metadata and shown when + inspecting a snapshot using `restic cat snapshot ` or `restic snapshots + --json`. https://github.com/restic/restic/issues/4188 https://github.com/restic/restic/pull/4378 * Enhancement #4220: Add `jq` binary to Docker image - The Docker image now contains `jq`, which can be useful to process JSON data - output by restic. + The Docker image now contains `jq`, which can be useful to process JSON data output by restic. https://github.com/restic/restic/pull/4220 * Enhancement #4226: Allow specifying region of new buckets in the `gs` backend - Previously, buckets used by the Google Cloud Storage backend would always get - created in the "us" region. It is now possible to specify the region where a - bucket should be created by using the `-o gs.region=us` option. + Previously, buckets used by the Google Cloud Storage backend would always get created in the + "us" region. It is now possible to specify the region where a bucket should be created by using + the `-o gs.region=us` option. https://github.com/restic/restic/pull/4226 * Enhancement #4375: Add support for extended attributes on symlinks - Restic now supports extended attributes on symlinks when backing up, restoring, - or FUSE-mounting snapshots. This includes, for example, the `security.selinux` - xattr on Linux distributions that use SELinux. + Restic now supports extended attributes on symlinks when backing up, restoring, or + FUSE-mounting snapshots. This includes, for example, the `security.selinux` xattr on Linux + distributions that use SELinux. https://github.com/restic/restic/issues/4375 https://github.com/restic/restic/pull/4379 @@ -691,12 +693,12 @@ restic users. The changes are ordered by importance. * Bugfix #2260: Sanitize filenames printed by `backup` during processing - The `backup` command would previously not sanitize the filenames it printed - during processing, potentially causing newlines or terminal control characters - to mangle the status output or even change the state of a terminal. + The `backup` command would previously not sanitize the filenames it printed during + processing, potentially causing newlines or terminal control characters to mangle the + status output or even change the state of a terminal. - Filenames are now checked and quoted if they contain non-printable or - non-Unicode characters. + Filenames are now checked and quoted if they contain non-printable or non-Unicode + characters. https://github.com/restic/restic/issues/2260 https://github.com/restic/restic/issues/4191 @@ -705,47 +707,44 @@ restic users. The changes are ordered by importance. * Bugfix #4211: Make `dump` interpret `--host` and `--path` correctly A regression in restic 0.15.0 caused `dump` to confuse its `--host=` and - `--path=` options: it looked for snapshots with paths called `` from - hosts called ``. It now treats the options as intended. + `--path=` options: it looked for snapshots with paths called `` from hosts + called ``. It now treats the options as intended. https://github.com/restic/restic/issues/4211 https://github.com/restic/restic/pull/4212 * Bugfix #4239: Correct number of blocks reported in mount point - Restic mount points reported an incorrect number of 512-byte (POSIX standard) - blocks for files and links due to a rounding bug. In particular, empty files - were reported as taking one block instead of zero. + Restic mount points reported an incorrect number of 512-byte (POSIX standard) blocks for + files and links due to a rounding bug. In particular, empty files were reported as taking one + block instead of zero. - The rounding is now fixed: the number of blocks reported is the file size (or - link target size) divided by 512 and rounded up to a whole number. + The rounding is now fixed: the number of blocks reported is the file size (or link target size) + divided by 512 and rounded up to a whole number. https://github.com/restic/restic/issues/4239 https://github.com/restic/restic/pull/4240 * Bugfix #4253: Minimize risk of spurious filesystem loops with `mount` - When a backup contains a directory that has the same name as its parent, say - `a/b/b`, and the GNU `find` command was run on this backup in a restic mount, - `find` would refuse to traverse the lowest `b` directory, instead printing `File - system loop detected`. This was due to the way the restic mount command - generates inode numbers for directories in the mount point. + When a backup contains a directory that has the same name as its parent, say `a/b/b`, and the GNU + `find` command was run on this backup in a restic mount, `find` would refuse to traverse the + lowest `b` directory, instead printing `File system loop detected`. This was due to the way the + restic mount command generates inode numbers for directories in the mount point. - The rule for generating these inode numbers was changed in 0.15.0. It has now - been changed again to avoid this issue. A perfect rule does not exist, but the - probability of this behavior occurring is now extremely small. + The rule for generating these inode numbers was changed in 0.15.0. It has now been changed again + to avoid this issue. A perfect rule does not exist, but the probability of this behavior + occurring is now extremely small. - When it does occur, the mount point is not broken, and scripts that traverse the - mount point should work as long as they don't rely on inode numbers for - detecting filesystem loops. + When it does occur, the mount point is not broken, and scripts that traverse the mount point + should work as long as they don't rely on inode numbers for detecting filesystem loops. https://github.com/restic/restic/issues/4253 https://github.com/restic/restic/pull/4255 * Enhancement #4180: Add release binaries for riscv64 architecture on Linux - Builds for the `riscv64` architecture on Linux are now included in the release - binaries. + Builds for the `riscv64` architecture on Linux are now included in the release binaries. https://github.com/restic/restic/pull/4180 @@ -772,8 +771,8 @@ restic users. The changes are ordered by importance. * Bugfix #3750: Remove `b2_download_file_by_name: 404` warning from B2 backend - In some cases the B2 backend could print `b2_download_file_by_name: 404: : - b2.b2err` warnings. These are only debug messages and can be safely ignored. + In some cases the B2 backend could print `b2_download_file_by_name: 404: : b2.b2err` + warnings. These are only debug messages and can be safely ignored. Restic now uses an updated library for accessing B2, which removes the warning. @@ -783,19 +782,19 @@ restic users. The changes are ordered by importance. * Bugfix #4147: Make `prune --quiet` not print progress bar - A regression in restic 0.15.0 caused `prune --quiet` to show a progress bar - while deciding how to process each pack files. This has now been fixed. + A regression in restic 0.15.0 caused `prune --quiet` to show a progress bar while deciding how + to process each pack files. This has now been fixed. https://github.com/restic/restic/issues/4147 https://github.com/restic/restic/pull/4153 * Bugfix #4163: Make `self-update --output` work with new filename on Windows - Since restic 0.14.0 the `self-update` command did not work when a custom output - filename was specified via the `--output` option. This has now been fixed. + Since restic 0.14.0 the `self-update` command did not work when a custom output filename was + specified via the `--output` option. This has now been fixed. - As a workaround, either use an older restic version to run the self-update or - create an empty file with the output filename before updating e.g. using CMD: + As a workaround, either use an older restic version to run the self-update or create an empty + file with the output filename before updating e.g. using CMD: `type nul > new-file.exe` `restic self-update --output new-file.exe` @@ -804,27 +803,24 @@ restic users. The changes are ordered by importance. * Bugfix #4167: Add missing ETA in `backup` progress bar - A regression in restic 0.15.0 caused the ETA to be missing from the progress bar - displayed by the `backup` command. This has now been fixed. + A regression in restic 0.15.0 caused the ETA to be missing from the progress bar displayed by the + `backup` command. This has now been fixed. https://github.com/restic/restic/pull/4167 * Enhancement #4143: Ignore empty lock files - With restic 0.15.0 the checks for stale locks became much stricter than before. - In particular, empty or unreadable locks were no longer silently ignored. This - made restic to complain with `Load(, 0, 0) returned error, - retrying after 552.330144ms: load(): invalid data returned` and - fail in the end. + With restic 0.15.0 the checks for stale locks became much stricter than before. In particular, + empty or unreadable locks were no longer silently ignored. This made restic to complain with + `Load(, 0, 0) returned error, retrying after 552.330144ms: + load(): invalid data returned` and fail in the end. - The error message is now clarified and the implementation changed to ignore - empty lock files which are sometimes created as the result of a failed uploads - on some backends. + The error message is now clarified and the implementation changed to ignore empty lock files + which are sometimes created as the result of a failed uploads on some backends. - Please note that unreadable lock files still have to cleaned up manually. To do - so, you can run `restic unlock --remove-all` which removes all existing lock - files. But first make sure that no other restic process is currently using the - repository. + Please note that unreadable lock files still have to cleaned up manually. To do so, you can run + `restic unlock --remove-all` which removes all existing lock files. But first make sure that + no other restic process is currently using the repository. https://github.com/restic/restic/issues/4143 https://github.com/restic/restic/pull/4152 @@ -880,65 +876,63 @@ restic users. The changes are ordered by importance. * Bugfix #2015: Make `mount` return exit code 0 after receiving Ctrl-C / SIGINT - To stop the `mount` command, a user has to press Ctrl-C or send a SIGINT signal - to restic. This used to cause restic to exit with a non-zero exit code. + To stop the `mount` command, a user has to press Ctrl-C or send a SIGINT signal to restic. This + used to cause restic to exit with a non-zero exit code. - The exit code has now been changed to zero as the above is the expected way to - stop the `mount` command and should therefore be considered successful. + The exit code has now been changed to zero as the above is the expected way to stop the `mount` + command and should therefore be considered successful. https://github.com/restic/restic/issues/2015 https://github.com/restic/restic/pull/3894 * Bugfix #2578: Make `restore` replace existing symlinks - When restoring a symlink, restic used to report an error if the target path - already existed. This has now been fixed such that the potentially existing - target path is first removed before the symlink is restored. + When restoring a symlink, restic used to report an error if the target path already existed. + This has now been fixed such that the potentially existing target path is first removed before + the symlink is restored. https://github.com/restic/restic/issues/2578 https://github.com/restic/restic/pull/3780 * Bugfix #2591: Don't read password from stdin for `backup --stdin` - The `backup` command when used with `--stdin` previously tried to read first the - password, then the data to be backed up from standard input. This meant it would - often confuse part of the data for the password. + The `backup` command when used with `--stdin` previously tried to read first the password, + then the data to be backed up from standard input. This meant it would often confuse part of the + data for the password. - From now on, it will instead exit with the message `Fatal: cannot read both - password and data from stdin` unless the password is passed in some other way - (such as `--restic-password-file`, `RESTIC_PASSWORD`, etc). + From now on, it will instead exit with the message `Fatal: cannot read both password and data + from stdin` unless the password is passed in some other way (such as + `--restic-password-file`, `RESTIC_PASSWORD`, etc). - To enter the password interactively a password command has to be used. For - example on Linux, `mysqldump somedatabase | restic backup --stdin - --password-command='sh -c "systemd-ask-password < /dev/tty"'` securely reads the - password from the terminal. + To enter the password interactively a password command has to be used. For example on Linux, + `mysqldump somedatabase | restic backup --stdin --password-command='sh -c + "systemd-ask-password < /dev/tty"'` securely reads the password from the terminal. https://github.com/restic/restic/issues/2591 https://github.com/restic/restic/pull/4011 * Bugfix #3161: Delete files on Backblaze B2 more reliably - Restic used to only delete the latest version of files stored in B2. In most - cases this worked well as there was only a single version of the file. However, - due to retries while uploading it is possible for multiple file versions to be - stored at B2. This could lead to various problems for files that should have - been deleted but still existed. + Restic used to only delete the latest version of files stored in B2. In most cases this worked + well as there was only a single version of the file. However, due to retries while uploading it is + possible for multiple file versions to be stored at B2. This could lead to various problems for + files that should have been deleted but still existed. - The implementation has now been changed to delete all versions of files, which - doubles the amount of Class B transactions necessary to delete files, but - assures that no file versions are left behind. + The implementation has now been changed to delete all versions of files, which doubles the + amount of Class B transactions necessary to delete files, but assures that no file versions are + left behind. https://github.com/restic/restic/issues/3161 https://github.com/restic/restic/pull/3885 * Bugfix #3336: Make SFTP backend report no space left on device - Backing up to an SFTP backend would spew repeated SSH_FX_FAILURE messages when - the remote disk was full. Restic now reports "sftp: no space left on device" and - exits immediately when it detects this condition. + Backing up to an SFTP backend would spew repeated SSH_FX_FAILURE messages when the remote disk + was full. Restic now reports "sftp: no space left on device" and exits immediately when it + detects this condition. - A fix for this issue was implemented in restic 0.12.1, but unfortunately the fix - itself contained a bug that prevented it from taking effect. + A fix for this issue was implemented in restic 0.12.1, but unfortunately the fix itself + contained a bug that prevented it from taking effect. https://github.com/restic/restic/issues/3336 https://github.com/restic/restic/pull/3345 @@ -946,10 +940,9 @@ restic users. The changes are ordered by importance. * Bugfix #3567: Improve handling of interrupted syscalls in `mount` command - Accessing restic's FUSE mount could result in "input/output" errors when using - programs in which syscalls can be interrupted. This is for example the case for - Go programs. This has now been fixed by improved error handling of interrupted - syscalls. + Accessing restic's FUSE mount could result in "input/output" errors when using programs in + which syscalls can be interrupted. This is for example the case for Go programs. This has now + been fixed by improved error handling of interrupted syscalls. https://github.com/restic/restic/issues/3567 https://github.com/restic/restic/issues/3694 @@ -957,53 +950,50 @@ restic users. The changes are ordered by importance. * Bugfix #3897: Fix stuck `copy` command when `-o .connections=1` - When running the `copy` command with `-o .connections=1` the command - would be infinitely stuck. This has now been fixed. + When running the `copy` command with `-o .connections=1` the command would be + infinitely stuck. This has now been fixed. https://github.com/restic/restic/issues/3897 https://github.com/restic/restic/pull/3898 * Bugfix #3918: Correct prune statistics for partially compressed repositories - In a partially compressed repository, one data blob can exist both in an - uncompressed and a compressed version. This caused the `prune` statistics to - become inaccurate and e.g. report a too high value for the unused size, such as - "unused size after prune: 16777215.991 TiB". This has now been fixed. + In a partially compressed repository, one data blob can exist both in an uncompressed and a + compressed version. This caused the `prune` statistics to become inaccurate and e.g. report a + too high value for the unused size, such as "unused size after prune: 16777215.991 TiB". This + has now been fixed. https://github.com/restic/restic/issues/3918 https://github.com/restic/restic/pull/3980 * Bugfix #3951: Make `ls` return exit code 1 if snapshot cannot be loaded - The `ls` command used to show a warning and return exit code 0 when failing to - load a snapshot. This has now been fixed such that it instead returns exit code - 1 (still showing a warning). + The `ls` command used to show a warning and return exit code 0 when failing to load a snapshot. + This has now been fixed such that it instead returns exit code 1 (still showing a warning). https://github.com/restic/restic/pull/3951 * Bugfix #4003: Make `backup` no longer hang on Solaris when seeing a FIFO file - The `backup` command used to hang on Solaris whenever it encountered a FIFO file - (named pipe), due to a bug in the handling of extended attributes. This bug has - now been fixed. + The `backup` command used to hang on Solaris whenever it encountered a FIFO file (named pipe), + due to a bug in the handling of extended attributes. This bug has now been fixed. https://github.com/restic/restic/issues/4003 https://github.com/restic/restic/pull/4053 * Bugfix #4016: Support ExFAT-formatted local backends on macOS Ventura - ExFAT-formatted disks could not be used as local backends starting from macOS - Ventura. Restic commands would fail with an "inappropriate ioctl for device" - error. This has now been fixed. + ExFAT-formatted disks could not be used as local backends starting from macOS Ventura. Restic + commands would fail with an "inappropriate ioctl for device" error. This has now been fixed. https://github.com/restic/restic/issues/4016 https://github.com/restic/restic/pull/4021 * Bugfix #4085: Make `init` ignore "Access Denied" errors when creating S3 buckets - In restic 0.9.0 through 0.13.0, the `init` command ignored some permission - errors from S3 backends when trying to check for bucket existence, so that - manually created buckets with custom permissions could be used for backups. + In restic 0.9.0 through 0.13.0, the `init` command ignored some permission errors from S3 + backends when trying to check for bucket existence, so that manually created buckets with + custom permissions could be used for backups. This feature became broken in 0.14.0, but has now been restored again. @@ -1012,21 +1002,20 @@ restic users. The changes are ordered by importance. * Bugfix #4100: Make `self-update` enabled by default only in release builds - The `self-update` command was previously included by default in all builds of - restic as opposed to only in official release builds, even if the `selfupdate` - tag was not explicitly enabled when building. + The `self-update` command was previously included by default in all builds of restic as + opposed to only in official release builds, even if the `selfupdate` tag was not explicitly + enabled when building. - This has now been corrected, and the `self-update` command is only available if - restic was built with `-tags selfupdate` (as done for official release builds by - `build.go`). + This has now been corrected, and the `self-update` command is only available if restic was + built with `-tags selfupdate` (as done for official release builds by `build.go`). https://github.com/restic/restic/pull/4100 * Bugfix #4103: Don't generate negative UIDs and GIDs in tar files from `dump` - When using a 32-bit build of restic, the `dump` command could in some cases - create tar files containing negative UIDs and GIDs, which cannot be read by GNU - tar. This corner case especially applies to backups from stdin on Windows. + When using a 32-bit build of restic, the `dump` command could in some cases create tar files + containing negative UIDs and GIDs, which cannot be read by GNU tar. This corner case especially + applies to backups from stdin on Windows. This is now fixed such that `dump` creates valid tar files in these cases too. @@ -1035,50 +1024,48 @@ restic users. The changes are ordered by importance. * Change #2724: Include full snapshot ID in JSON output of `backup` - We have changed the JSON output of the backup command to include the full - snapshot ID instead of just a shortened version, as the latter can be ambiguous - in some rare cases. To derive the short ID, please truncate the full ID down to - eight characters. + We have changed the JSON output of the backup command to include the full snapshot ID instead of + just a shortened version, as the latter can be ambiguous in some rare cases. To derive the short + ID, please truncate the full ID down to eight characters. https://github.com/restic/restic/issues/2724 https://github.com/restic/restic/pull/3993 * Change #3929: Make `unlock` display message only when locks were actually removed - The `unlock` command used to print the "successfully removed locks" message - whenever it was run, regardless of lock files having being removed or not. + The `unlock` command used to print the "successfully removed locks" message whenever it was + run, regardless of lock files having being removed or not. - This has now been changed such that it only prints the message if any lock files - were actually removed. In addition, it also reports the number of removed lock - files. + This has now been changed such that it only prints the message if any lock files were actually + removed. In addition, it also reports the number of removed lock files. https://github.com/restic/restic/issues/3929 https://github.com/restic/restic/pull/3935 * Change #4033: Don't print skipped snapshots by default in `copy` command - The `copy` command used to print each snapshot that was skipped because it - already existed in the target repository. The amount of this output could - practically bury the list of snapshots that were actually copied. + The `copy` command used to print each snapshot that was skipped because it already existed in + the target repository. The amount of this output could practically bury the list of snapshots + that were actually copied. - From now on, the skipped snapshots are by default not printed at all, but this - can be re-enabled by increasing the verbosity level of the command. + From now on, the skipped snapshots are by default not printed at all, but this can be re-enabled + by increasing the verbosity level of the command. https://github.com/restic/restic/issues/4033 https://github.com/restic/restic/pull/4066 * Change #4041: Update dependencies and require Go 1.18 or newer - Most dependencies have been updated. Since some libraries require newer language - features, support for Go 1.15-1.17 has been dropped, which means that restic now - requires at least Go 1.18 to build. + Most dependencies have been updated. Since some libraries require newer language features, + support for Go 1.15-1.17 has been dropped, which means that restic now requires at least Go 1.18 + to build. https://github.com/restic/restic/pull/4041 * Enhancement #14: Implement `rewrite` command - Restic now has a `rewrite` command which allows to rewrite existing snapshots to - remove unwanted files. + Restic now has a `rewrite` command which allows to rewrite existing snapshots to remove + unwanted files. https://github.com/restic/restic/issues/14 https://github.com/restic/restic/pull/2731 @@ -1086,15 +1073,15 @@ restic users. The changes are ordered by importance. * Enhancement #79: Restore files with long runs of zeros as sparse files - When using `restore --sparse`, the restorer may now write files containing long - runs of zeros as sparse files (also called files with holes), where the zeros - are not actually written to disk. + When using `restore --sparse`, the restorer may now write files containing long runs of zeros + as sparse files (also called files with holes), where the zeros are not actually written to + disk. - How much space is saved by writing sparse files depends on the operating system, - file system and the distribution of zeros in the file. + How much space is saved by writing sparse files depends on the operating system, file system and + the distribution of zeros in the file. - During backup restic still reads the whole file including sparse regions, but - with optimized processing speed of sparse regions. + During backup restic still reads the whole file including sparse regions, but with optimized + processing speed of sparse regions. https://github.com/restic/restic/issues/79 https://github.com/restic/restic/issues/3903 @@ -1104,9 +1091,9 @@ restic users. The changes are ordered by importance. * Enhancement #1078: Support restoring symbolic links on Windows - The `restore` command now supports restoring symbolic links on Windows. Because - of Windows specific restrictions this is only possible when running restic with - the `SeCreateSymbolicLinkPrivilege` privilege or as an administrator. + The `restore` command now supports restoring symbolic links on Windows. Because of Windows + specific restrictions this is only possible when running restic with the + `SeCreateSymbolicLinkPrivilege` privilege or as an administrator. https://github.com/restic/restic/issues/1078 https://github.com/restic/restic/issues/2699 @@ -1114,14 +1101,14 @@ restic users. The changes are ordered by importance. * Enhancement #1734: Inform about successful retries after errors - When a recoverable error is encountered, restic shows a warning message saying - that it's retrying, e.g.: + When a recoverable error is encountered, restic shows a warning message saying that it's + retrying, e.g.: `Save() returned error, retrying after 357.131936ms: ...` - This message can be confusing in that it never clearly states whether the retry - is successful or not. This has now been fixed such that restic follows up with a - message confirming a successful retry, e.g.: + This message can be confusing in that it never clearly states whether the retry is successful or + not. This has now been fixed such that restic follows up with a message confirming a successful + retry, e.g.: `Save() operation successful after 1 retries` @@ -1130,12 +1117,12 @@ restic users. The changes are ordered by importance. * Enhancement #1866: Improve handling of directories with duplicate entries - If for some reason a directory contains a duplicate entry, the `backup` command - would previously fail with a `node "path/to/file" already present` or `nodes are - not ordered got "path/to/file", last "path/to/file"` error. + If for some reason a directory contains a duplicate entry, the `backup` command would + previously fail with a `node "path/to/file" already present` or `nodes are not ordered got + "path/to/file", last "path/to/file"` error. - The error handling has been improved to only report a warning in this case. Make - sure to check that the filesystem in question is not damaged if you see this! + The error handling has been improved to only report a warning in this case. Make sure to check + that the filesystem in question is not damaged if you see this! https://github.com/restic/restic/issues/1866 https://github.com/restic/restic/issues/3937 @@ -1143,31 +1130,29 @@ restic users. The changes are ordered by importance. * Enhancement #2134: Support B2 API keys restricted to hiding but not deleting files - When the B2 backend does not have the necessary permissions to permanently - delete files, it now automatically falls back to hiding files. This allows using - restic with an application key which is not allowed to delete files. This can - prevent an attacker from deleting backups with such an API key. + When the B2 backend does not have the necessary permissions to permanently delete files, it now + automatically falls back to hiding files. This allows using restic with an application key + which is not allowed to delete files. This can prevent an attacker from deleting backups with + such an API key. - To use this feature create an application key without the `deleteFiles` - capability. It is recommended to restrict the key to just one bucket. For - example using the `b2` command line tool: + To use this feature create an application key without the `deleteFiles` capability. It is + recommended to restrict the key to just one bucket. For example using the `b2` command line + tool: `b2 create-key --bucket listBuckets,readFiles,writeFiles,listFiles` - Alternatively, you can use the S3 backend to access B2, as described in the - documentation. In this mode, files are also only hidden instead of being deleted - permanently. + Alternatively, you can use the S3 backend to access B2, as described in the documentation. In + this mode, files are also only hidden instead of being deleted permanently. https://github.com/restic/restic/issues/2134 https://github.com/restic/restic/pull/2398 * Enhancement #2152: Make `init` open only one connection for the SFTP backend - The `init` command using the SFTP backend used to connect twice to the - repository. This could be inconvenient if the user must enter a password, or - cause `init` to fail if the server does not correctly close the first SFTP - connection. + The `init` command using the SFTP backend used to connect twice to the repository. This could be + inconvenient if the user must enter a password, or cause `init` to fail if the server does not + correctly close the first SFTP connection. This has now been fixed by reusing the first/initial SFTP connection opened. @@ -1176,44 +1161,40 @@ restic users. The changes are ordered by importance. * Enhancement #2533: Handle cache corruption on disk and in downloads - In rare situations, like for example after a system crash, the data stored in - the cache might be corrupted. This could cause restic to fail and required - manually deleting the cache. + In rare situations, like for example after a system crash, the data stored in the cache might be + corrupted. This could cause restic to fail and required manually deleting the cache. - Restic now automatically removes broken data from the cache, allowing it to - recover from such a situation without user intervention. In addition, restic - retries downloads which return corrupt data in order to also handle temporary - download problems. + Restic now automatically removes broken data from the cache, allowing it to recover from such a + situation without user intervention. In addition, restic retries downloads which return + corrupt data in order to also handle temporary download problems. https://github.com/restic/restic/issues/2533 https://github.com/restic/restic/pull/3521 * Enhancement #2715: Stricter repository lock handling - Previously, restic commands kept running even if they failed to refresh their - locks in time. This could be a problem e.g. in case the client system running a - backup entered the standby power mode while the backup was still in progress - (which would prevent the client from refreshing its lock), and after a short - delay another host successfully runs `unlock` and `prune` on the repository, - which would remove all data added by the in-progress backup. If the backup - client later continues its backup, even though its lock had expired in the - meantime, this would lead to an incomplete snapshot. - - To address this, lock handling is now much stricter. Commands requiring a lock - are canceled if the lock is not refreshed successfully in time. In addition, if - a lock file is not readable restic will not allow starting a command. It may be - necessary to remove invalid lock files manually or use `unlock --remove-all`. - Please make sure that no other restic processes are running concurrently before - doing this, however. + Previously, restic commands kept running even if they failed to refresh their locks in time. + This could be a problem e.g. in case the client system running a backup entered the standby power + mode while the backup was still in progress (which would prevent the client from refreshing its + lock), and after a short delay another host successfully runs `unlock` and `prune` on the + repository, which would remove all data added by the in-progress backup. If the backup client + later continues its backup, even though its lock had expired in the meantime, this would lead to + an incomplete snapshot. + + To address this, lock handling is now much stricter. Commands requiring a lock are canceled if + the lock is not refreshed successfully in time. In addition, if a lock file is not readable + restic will not allow starting a command. It may be necessary to remove invalid lock files + manually or use `unlock --remove-all`. Please make sure that no other restic processes are + running concurrently before doing this, however. https://github.com/restic/restic/issues/2715 https://github.com/restic/restic/pull/3569 * Enhancement #2750: Make backup file read concurrency configurable - The `backup` command now supports a `--read-concurrency` option which allows - tuning restic for very fast storage like NVMe disks by controlling the number of - concurrent file reads during the backup process. + The `backup` command now supports a `--read-concurrency` option which allows tuning restic + for very fast storage like NVMe disks by controlling the number of concurrent file reads during + the backup process. https://github.com/restic/restic/pull/2750 @@ -1228,78 +1209,75 @@ restic users. The changes are ordered by importance. * Enhancement #3096: Make `mount` command support macOS using macFUSE 4.x - Restic now uses a different FUSE library for mounting snapshots and making them - available as a FUSE filesystem using the `mount` command. This adds support for - macFUSE 4.x which can be used to make this work on recent macOS versions. + Restic now uses a different FUSE library for mounting snapshots and making them available as a + FUSE filesystem using the `mount` command. This adds support for macFUSE 4.x which can be used + to make this work on recent macOS versions. https://github.com/restic/restic/issues/3096 https://github.com/restic/restic/pull/4024 * Enhancement #3124: Support JSON output for the `init` command - The `init` command used to ignore the `--json` option, but now outputs a JSON - message if the repository was created successfully. + The `init` command used to ignore the `--json` option, but now outputs a JSON message if the + repository was created successfully. https://github.com/restic/restic/issues/3124 https://github.com/restic/restic/pull/3132 * Enhancement #3899: Optimize prune memory usage - The `prune` command needs large amounts of memory in order to determine what to - keep and what to remove. This is now optimized to use up to 30% less memory. + The `prune` command needs large amounts of memory in order to determine what to keep and what to + remove. This is now optimized to use up to 30% less memory. https://github.com/restic/restic/pull/3899 * Enhancement #3905: Improve speed of parent snapshot detection in `backup` command - Backing up a large number of files using `--files-from-verbatim` or - `--files-from-raw` options could require a long time to find the parent - snapshot. This has been improved. + Backing up a large number of files using `--files-from-verbatim` or `--files-from-raw` + options could require a long time to find the parent snapshot. This has been improved. https://github.com/restic/restic/pull/3905 * Enhancement #3915: Add compression statistics to the `stats` command - When executed with `--mode raw-data` on a repository that supports compression, - the `stats` command now calculates and displays, for the selected repository or - snapshots: the uncompressed size of the data; the compression progress - (percentage of data that has been compressed); the compression ratio of the - compressed data; the total space saving. + When executed with `--mode raw-data` on a repository that supports compression, the `stats` + command now calculates and displays, for the selected repository or snapshots: the + uncompressed size of the data; the compression progress (percentage of data that has been + compressed); the compression ratio of the compressed data; the total space saving. - It also takes into account both the compressed and uncompressed data if the - repository is only partially compressed. + It also takes into account both the compressed and uncompressed data if the repository is only + partially compressed. https://github.com/restic/restic/pull/3915 * Enhancement #3925: Provide command completion for PowerShell - Restic already provided generation of completion files for bash, fish and zsh. - Now powershell is supported, too. + Restic already provided generation of completion files for bash, fish and zsh. Now powershell + is supported, too. https://github.com/restic/restic/pull/3925/files * Enhancement #3931: Allow `backup` file tree scanner to be disabled - The `backup` command walks the file tree in a separate scanner process to find - the total size and file/directory count, and uses this to provide an ETA. This - can slow down backups, especially of network filesystems. + The `backup` command walks the file tree in a separate scanner process to find the total size and + file/directory count, and uses this to provide an ETA. This can slow down backups, especially + of network filesystems. - The command now has a new option `--no-scan` which can be used to disable this - scanning in order to speed up backups when needed. + The command now has a new option `--no-scan` which can be used to disable this scanning in order + to speed up backups when needed. https://github.com/restic/restic/pull/3931 * Enhancement #3932: Improve handling of ErrDot errors in rclone and sftp backends - Since Go 1.19, restic can no longer implicitly run relative executables which - are found in the current directory (e.g. `rclone` if found in `.`). This is a - security feature of Go to prevent against running unintended and possibly - harmful executables. + Since Go 1.19, restic can no longer implicitly run relative executables which are found in the + current directory (e.g. `rclone` if found in `.`). This is a security feature of Go to prevent + against running unintended and possibly harmful executables. - The error message for this was just "cannot run executable found relative to - current directory". This has now been improved to yield a more specific error - message, informing the user how to explicitly allow running the executable using - the `-o rclone.program` and `-o sftp.command` extended options with `./`. + The error message for this was just "cannot run executable found relative to current + directory". This has now been improved to yield a more specific error message, informing the + user how to explicitly allow running the executable using the `-o rclone.program` and `-o + sftp.command` extended options with `./`. https://github.com/restic/restic/issues/3932 https://pkg.go.dev/os/exec#hdr-Executables_in_the_current_directory @@ -1307,21 +1285,20 @@ restic users. The changes are ordered by importance. * Enhancement #3943: Ignore additional/unknown files in repository - If a restic repository had additional files in it (not created by restic), - commands like `find` and `restore` could become confused and fail with an - `multiple IDs with prefix "12345678" found` error. These commands now ignore - such additional files. + If a restic repository had additional files in it (not created by restic), commands like `find` + and `restore` could become confused and fail with an `multiple IDs with prefix "12345678" + found` error. These commands now ignore such additional files. https://github.com/restic/restic/pull/3943 https://forum.restic.net/t/which-protocol-should-i-choose-for-remote-linux-backups/5446/17 * Enhancement #3955: Improve `backup` performance for small files - When backing up small files restic was slower than it could be. In particular - this affected backups using maximum compression. + When backing up small files restic was slower than it could be. In particular this affected + backups using maximum compression. - This has been fixed by reworking the internal parallelism of the backup command, - making it back up small files around two times faster. + This has been fixed by reworking the internal parallelism of the backup command, making it back + up small files around two times faster. https://github.com/restic/restic/pull/3955 @@ -1370,23 +1347,22 @@ restic users. The changes are ordered by importance. * Bugfix #2248: Support `self-update` on Windows - Restic `self-update` would fail in situations where the operating system locks - running binaries, including Windows. The new behavior works around this by - renaming the running file and swapping the updated file in place. + Restic `self-update` would fail in situations where the operating system locks running + binaries, including Windows. The new behavior works around this by renaming the running file + and swapping the updated file in place. https://github.com/restic/restic/issues/2248 https://github.com/restic/restic/pull/3675 * Bugfix #3428: List snapshots in backend at most once to resolve snapshot IDs - Many commands support specifying a list of snapshot IDs which are then used to - determine the snapshots to be processed by the command. To resolve snapshot IDs - or `latest`, and check that these exist, restic previously listed all snapshots - stored in the repository. Depending on the backend this could be a slow and/or - expensive operation. + Many commands support specifying a list of snapshot IDs which are then used to determine the + snapshots to be processed by the command. To resolve snapshot IDs or `latest`, and check that + these exist, restic previously listed all snapshots stored in the repository. Depending on + the backend this could be a slow and/or expensive operation. - Restic now lists the snapshots only once and remembers the result in order to - resolve all further snapshot IDs swiftly. + Restic now lists the snapshots only once and remembers the result in order to resolve all + further snapshot IDs swiftly. https://github.com/restic/restic/issues/3428 https://github.com/restic/restic/pull/3570 @@ -1394,28 +1370,27 @@ restic users. The changes are ordered by importance. * Bugfix #3432: Fix rare 'not found in repository' error for `copy` command - In rare cases `copy` (and other commands) would report that `LoadTree(...)` - returned an `id [...] not found in repository` error. This could be caused by a - backup or copy command running concurrently. The error was only temporary; - running the failed restic command a second time as a workaround did resolve the - error. + In rare cases `copy` (and other commands) would report that `LoadTree(...)` returned an `id + [...] not found in repository` error. This could be caused by a backup or copy command running + concurrently. The error was only temporary; running the failed restic command a second time as + a workaround did resolve the error. - This issue has now been fixed by correcting the order in which restic reads data - from the repository. It is now guaranteed that restic only loads snapshots for - which all necessary data is already available. + This issue has now been fixed by correcting the order in which restic reads data from the + repository. It is now guaranteed that restic only loads snapshots for which all necessary data + is already available. https://github.com/restic/restic/issues/3432 https://github.com/restic/restic/pull/3570 * Bugfix #3681: Fix rclone (shimmed by Scoop) and sftp not working on Windows - In #3602 a fix was introduced to address the problem of `rclone` prematurely - exiting when Ctrl+C is pressed on Windows. The solution was to create the - subprocess with its console detached from the restic console. + In #3602 a fix was introduced to address the problem of `rclone` prematurely exiting when + Ctrl+C is pressed on Windows. The solution was to create the subprocess with its console + detached from the restic console. - However, this solution failed when using `rclone` installed by Scoop or using - `sftp` with a passphrase-protected private key. We've now fixed this by using a - different approach to prevent Ctrl-C from passing down too early. + However, this solution failed when using `rclone` installed by Scoop or using `sftp` with a + passphrase-protected private key. We've now fixed this by using a different approach to + prevent Ctrl-C from passing down too early. https://github.com/restic/restic/issues/3681 https://github.com/restic/restic/issues/3692 @@ -1423,28 +1398,28 @@ restic users. The changes are ordered by importance. * Bugfix #3685: The `diff` command incorrectly listed some files as added - There was a bug in the `diff` command, causing it to always show files in a - removed directory as added. This has now been fixed. + There was a bug in the `diff` command, causing it to always show files in a removed directory as + added. This has now been fixed. https://github.com/restic/restic/issues/3685 https://github.com/restic/restic/pull/3686 * Bugfix #3716: Print "wrong password" to stderr instead of stdout - If an invalid password was entered, the error message was printed on stdout and - not on stderr as intended. This has now been fixed. + If an invalid password was entered, the error message was printed on stdout and not on stderr as + intended. This has now been fixed. https://github.com/restic/restic/pull/3716 https://forum.restic.net/t/4965 * Bugfix #3720: Directory sync errors for repositories accessed via SMB - On Linux and macOS, accessing a repository via a SMB/CIFS mount resulted in - restic failing to save the lock file, yielding the following errors: + On Linux and macOS, accessing a repository via a SMB/CIFS mount resulted in restic failing to + save the lock file, yielding the following errors: - Save() returned error, retrying after 552.330144ms: sync - /repo/locks: no such file or directory Save() returned error, - retrying after 552.330144ms: sync /repo/locks: invalid argument + Save() returned error, retrying after 552.330144ms: sync /repo/locks: + no such file or directory Save() returned error, retrying after + 552.330144ms: sync /repo/locks: invalid argument This has now been fixed by ignoring the relevant error codes. @@ -1454,23 +1429,22 @@ restic users. The changes are ordered by importance. * Bugfix #3736: The `stats` command miscalculated restore size for multiple snapshots - Since restic 0.10.0 the restore size calculated by the `stats` command for - multiple snapshots was too low. The hardlink detection was accidentally applied - across multiple snapshots and thus ignored many files. This has now been fixed. + Since restic 0.10.0 the restore size calculated by the `stats` command for multiple snapshots + was too low. The hardlink detection was accidentally applied across multiple snapshots and + thus ignored many files. This has now been fixed. https://github.com/restic/restic/issues/3736 https://github.com/restic/restic/pull/3740 * Bugfix #3772: Correctly rebuild index for legacy repositories - After running `rebuild-index` on a legacy repository containing mixed pack files - (that is, pack files which store both metadata and file data), `check` printed - warnings like `pack 12345678 contained in several indexes: ...`. This warning - was not critical, but has now nonetheless been fixed by properly handling mixed - pack files while rebuilding the index. + After running `rebuild-index` on a legacy repository containing mixed pack files (that is, + pack files which store both metadata and file data), `check` printed warnings like `pack + 12345678 contained in several indexes: ...`. This warning was not critical, but has now + nonetheless been fixed by properly handling mixed pack files while rebuilding the index. - Running `prune` for such legacy repositories will also fix the warning by - reorganizing the pack files which caused it. + Running `prune` for such legacy repositories will also fix the warning by reorganizing the + pack files which caused it. https://github.com/restic/restic/pull/3772 https://github.com/restic/restic/pull/3884 @@ -1478,20 +1452,18 @@ restic users. The changes are ordered by importance. * Bugfix #3776: Limit number of key files tested while opening a repository - Previously, restic tested the password against every key in the repository when - opening a repository. The more keys there were in the repository, the slower - this operation became. + Previously, restic tested the password against every key in the repository when opening a + repository. The more keys there were in the repository, the slower this operation became. - Restic now tests the password against up to 20 key files in the repository. - Alternatively, you can use the `--key-hint=` option to specify a - specific key file to use instead. + Restic now tests the password against up to 20 key files in the repository. Alternatively, you + can use the `--key-hint=` option to specify a specific key file to use instead. https://github.com/restic/restic/pull/3776 * Bugfix #3861: Yield error on invalid policy to `forget` - The `forget` command previously silently ignored invalid/unsupported units in - the duration options, such as e.g. `--keep-within-daily 2w`. + The `forget` command previously silently ignored invalid/unsupported units in the duration + options, such as e.g. `--keep-within-daily 2w`. Specifying an invalid/unsupported duration unit now results in an error. @@ -1500,78 +1472,71 @@ restic users. The changes are ordered by importance. * Change #1842: Support debug log creation in release builds - Creating a debug log was only possible in debug builds which required users to - manually build restic. We changed the release builds to allow creating debug - logs by simply setting the environment variable `DEBUG_LOG=logname.log`. + Creating a debug log was only possible in debug builds which required users to manually build + restic. We changed the release builds to allow creating debug logs by simply setting the + environment variable `DEBUG_LOG=logname.log`. https://github.com/restic/restic/issues/1842 https://github.com/restic/restic/pull/3826 * Change #3295: Deprecate `check --check-unused` and add further checks - Since restic 0.12.0, it is expected to still have unused blobs after running - `prune`. This made the `--check-unused` option of the `check` command rather - useless and tended to confuse users. This option has been deprecated and is now - ignored. + Since restic 0.12.0, it is expected to still have unused blobs after running `prune`. This made + the `--check-unused` option of the `check` command rather useless and tended to confuse + users. This option has been deprecated and is now ignored. - The `check` command now also warns if a repository is using either the legacy S3 - layout or mixed pack files with both tree and data blobs. The latter is known to - cause performance problems. + The `check` command now also warns if a repository is using either the legacy S3 layout or mixed + pack files with both tree and data blobs. The latter is known to cause performance problems. https://github.com/restic/restic/issues/3295 https://github.com/restic/restic/pull/3730 * Change #3680: Update dependencies and require Go 1.15 or newer - We've updated most dependencies. Since some libraries require newer language - features we're dropping support for Go 1.14, which means that restic now - requires at least Go 1.15 to build. + We've updated most dependencies. Since some libraries require newer language features we're + dropping support for Go 1.14, which means that restic now requires at least Go 1.15 to build. https://github.com/restic/restic/issues/3680 https://github.com/restic/restic/issues/3883 * Change #3742: Replace `--repo2` option used by `init`/`copy` with `--from-repo` - The `init` and `copy` commands can read data from another repository. However, - confusingly `--repo2` referred to the repository *from* which the `init` command - copies parameters, but for the `copy` command `--repo2` referred to the copy - *destination*. + The `init` and `copy` commands can read data from another repository. However, confusingly + `--repo2` referred to the repository *from* which the `init` command copies parameters, but + for the `copy` command `--repo2` referred to the copy *destination*. - We've introduced a new option, `--from-repo`, which always refers to the source - repository for both commands. The old parameter names have been deprecated but - still work. To create a new repository and copy all snapshots to it, the - commands are now as follows: + We've introduced a new option, `--from-repo`, which always refers to the source repository + for both commands. The old parameter names have been deprecated but still work. To create a new + repository and copy all snapshots to it, the commands are now as follows: - ``` - restic -r /srv/restic-repo-copy init --from-repo /srv/restic-repo --copy-chunker-params - restic -r /srv/restic-repo-copy copy --from-repo /srv/restic-repo - ``` + ``` restic -r /srv/restic-repo-copy init --from-repo /srv/restic-repo + --copy-chunker-params restic -r /srv/restic-repo-copy copy --from-repo + /srv/restic-repo ``` https://github.com/restic/restic/pull/3742 https://forum.restic.net/t/5017 * Enhancement #21: Add compression support - We've added compression support to the restic repository format. To create a - repository using the new format run `init --repository-version 2`. Please note - that the repository cannot be read by restic versions prior to 0.14.0. + We've added compression support to the restic repository format. To create a repository using + the new format run `init --repository-version 2`. Please note that the repository cannot be + read by restic versions prior to 0.14.0. - You can configure whether data is compressed with the option `--compression`. It - can be set to `auto` (the default, which will compress very fast), `max` (which - will trade backup speed and CPU usage for better compression), or `off` (which - disables compression). Each setting is only applied for the current run of - restic and does *not* apply to future runs. The option can also be set via the - environment variable `RESTIC_COMPRESSION`. + You can configure whether data is compressed with the option `--compression`. It can be set to + `auto` (the default, which will compress very fast), `max` (which will trade backup speed and + CPU usage for better compression), or `off` (which disables compression). Each setting is + only applied for the current run of restic and does *not* apply to future runs. The option can + also be set via the environment variable `RESTIC_COMPRESSION`. - To upgrade in place run `migrate upgrade_repo_v2` followed by `prune`. See the - documentation for more details. The migration checks the repository integrity - and upgrades the repository format, but will not change any data. Afterwards, - prune will rewrite the metadata to make use of compression. + To upgrade in place run `migrate upgrade_repo_v2` followed by `prune`. See the documentation + for more details. The migration checks the repository integrity and upgrades the repository + format, but will not change any data. Afterwards, prune will rewrite the metadata to make use of + compression. - As an alternative you can use the `copy` command to migrate snapshots; First - create a new repository using `init --repository-version 2 --copy-chunker-params - --repo2 path/to/old/repo`, and then use the `copy` command to copy all snapshots - to the new repository. + As an alternative you can use the `copy` command to migrate snapshots; First create a new + repository using `init --repository-version 2 --copy-chunker-params --repo2 + path/to/old/repo`, and then use the `copy` command to copy all snapshots to the new + repository. https://github.com/restic/restic/issues/21 https://github.com/restic/restic/issues/3779 @@ -1581,28 +1546,25 @@ restic users. The changes are ordered by importance. * Enhancement #1153: Support pruning even when the disk is full - When running out of disk space it was no longer possible to add or remove data - from a repository. To help with recovering from such a deadlock, the prune - command now supports an `--unsafe-recover-no-free-space` option to recover from - these situations. Make sure to read the documentation first! + When running out of disk space it was no longer possible to add or remove data from a repository. + To help with recovering from such a deadlock, the prune command now supports an + `--unsafe-recover-no-free-space` option to recover from these situations. Make sure to + read the documentation first! https://github.com/restic/restic/issues/1153 https://github.com/restic/restic/pull/3481 * Enhancement #2162: Adaptive IO concurrency based on backend connections - Many commands used hard-coded limits for the number of concurrent operations. - This prevented speed improvements by increasing the number of connections used - by a backend. + Many commands used hard-coded limits for the number of concurrent operations. This prevented + speed improvements by increasing the number of connections used by a backend. - These limits have now been replaced by using the configured number of backend - connections instead, which can be controlled using the `-o - .connections=5` option. Commands will then automatically scale - their parallelism accordingly. + These limits have now been replaced by using the configured number of backend connections + instead, which can be controlled using the `-o .connections=5` option. + Commands will then automatically scale their parallelism accordingly. - To limit the number of CPU cores used by restic, you can set the environment - variable `GOMAXPROCS` accordingly. For example to use a single CPU core, use - `GOMAXPROCS=1`. + To limit the number of CPU cores used by restic, you can set the environment variable + `GOMAXPROCS` accordingly. For example to use a single CPU core, use `GOMAXPROCS=1`. https://github.com/restic/restic/issues/2162 https://github.com/restic/restic/issues/1467 @@ -1610,47 +1572,45 @@ restic users. The changes are ordered by importance. * Enhancement #2291: Allow pack size customization - Restic now uses a target pack size of 16 MiB by default. This can be customized - using the `--pack-size size` option. Supported pack sizes range between 4 and - 128 MiB. + Restic now uses a target pack size of 16 MiB by default. This can be customized using the + `--pack-size size` option. Supported pack sizes range between 4 and 128 MiB. - It is possible to migrate an existing repository to _larger_ pack files using - `prune --repack-small`. This will rewrite every pack file which is significantly - smaller than the target size. + It is possible to migrate an existing repository to _larger_ pack files using `prune + --repack-small`. This will rewrite every pack file which is significantly smaller than the + target size. https://github.com/restic/restic/issues/2291 https://github.com/restic/restic/pull/3731 * Enhancement #2295: Allow use of SAS token to authenticate to Azure - Previously restic only supported AccountKeys to authenticate to Azure storage - accounts, which necessitates giving a significant amount of access. + Previously restic only supported AccountKeys to authenticate to Azure storage accounts, + which necessitates giving a significant amount of access. - We added support for Azure SAS tokens which are a more fine-grained and - time-limited manner of granting access. Set the `AZURE_ACCOUNT_NAME` and - `AZURE_ACCOUNT_SAS` environment variables to use a SAS token for authentication. - Note that if `AZURE_ACCOUNT_KEY` is set, it will take precedence. + We added support for Azure SAS tokens which are a more fine-grained and time-limited manner of + granting access. Set the `AZURE_ACCOUNT_NAME` and `AZURE_ACCOUNT_SAS` environment + variables to use a SAS token for authentication. Note that if `AZURE_ACCOUNT_KEY` is set, it + will take precedence. https://github.com/restic/restic/issues/2295 https://github.com/restic/restic/pull/3661 * Enhancement #2351: Use config file permissions to control file group access - Previously files in a local/SFTP repository would always end up with very - restrictive access permissions, allowing access only to the owner. This - prevented a number of valid use-cases involving groups and ACLs. + Previously files in a local/SFTP repository would always end up with very restrictive access + permissions, allowing access only to the owner. This prevented a number of valid use-cases + involving groups and ACLs. - We now use the permissions of the config file in the repository to decide - whether group access should be given to newly created repository files or not. - We arrange for repository files to be created group readable exactly when the - repository config file is group readable. + We now use the permissions of the config file in the repository to decide whether group access + should be given to newly created repository files or not. We arrange for repository files to be + created group readable exactly when the repository config file is group readable. - To opt-in to group readable repositories, a simple `chmod -R g+r` or equivalent - on the config file can be used. For repositories that should be writable by - group members a tad more setup is required, see the docs. + To opt-in to group readable repositories, a simple `chmod -R g+r` or equivalent on the config + file can be used. For repositories that should be writable by group members a tad more setup is + required, see the docs. - Posix ACLs can also be used now that the group permissions being forced to zero - no longer masks the effect of ACL entries. + Posix ACLs can also be used now that the group permissions being forced to zero no longer masks + the effect of ACL entries. https://github.com/restic/restic/issues/2351 https://github.com/restic/restic/pull/3419 @@ -1658,29 +1618,27 @@ restic users. The changes are ordered by importance. * Enhancement #2696: Improve backup speed with many small files - We have restructured the backup pipeline to continue reading files while all - upload connections are busy. This allows the backup to already prepare the next - data file such that the upload can continue as soon as a connection becomes - available. This can especially improve the backup performance for high latency - backends. + We have restructured the backup pipeline to continue reading files while all upload + connections are busy. This allows the backup to already prepare the next data file such that the + upload can continue as soon as a connection becomes available. This can especially improve the + backup performance for high latency backends. - The upload concurrency is now controlled using the `-o - .connections=5` option. + The upload concurrency is now controlled using the `-o .connections=5` + option. https://github.com/restic/restic/issues/2696 https://github.com/restic/restic/pull/3489 * Enhancement #2907: Make snapshot directory structure of `mount` command customizable - We've added the possibility to customize the snapshot directory structure of the - `mount` command using templates passed to the `--snapshot-template` option. The - formatting of snapshots' timestamps is now controlled using `--time-template` - and supports subdirectories to for example group snapshots by year. Please see - `restic help mount` for further details. + We've added the possibility to customize the snapshot directory structure of the `mount` + command using templates passed to the `--snapshot-template` option. The formatting of + snapshots' timestamps is now controlled using `--time-template` and supports + subdirectories to for example group snapshots by year. Please see `restic help mount` for + further details. - Characters in tag names which are not allowed in a filename are replaced by - underscores `_`. For example a tag `foo/bar` will result in a directory name of - `foo_bar`. + Characters in tag names which are not allowed in a filename are replaced by underscores `_`. For + example a tag `foo/bar` will result in a directory name of `foo_bar`. https://github.com/restic/restic/issues/2907 https://github.com/restic/restic/pull/2913 @@ -1688,9 +1646,8 @@ restic users. The changes are ordered by importance. * Enhancement #2923: Improve speed of `copy` command - The `copy` command could require a long time to copy snapshots for non-local - backends. This has been improved to provide a throughput comparable to the - `restore` command. + The `copy` command could require a long time to copy snapshots for non-local backends. This has + been improved to provide a throughput comparable to the `restore` command. Additionally, `copy` now displays a progress bar. @@ -1699,23 +1656,21 @@ restic users. The changes are ordered by importance. * Enhancement #3114: Optimize handling of duplicate blobs in `prune` - Restic `prune` always used to repack all data files containing duplicate blobs. - This effectively removed all duplicates during prune. However, as a consequence - all these data files were repacked even if the unused repository space threshold - could be reached with less work. + Restic `prune` always used to repack all data files containing duplicate blobs. This + effectively removed all duplicates during prune. However, as a consequence all these data + files were repacked even if the unused repository space threshold could be reached with less + work. - This is now changed and `prune` works nice and fast even when there are lots of - duplicate blobs. + This is now changed and `prune` works nice and fast even when there are lots of duplicate blobs. https://github.com/restic/restic/issues/3114 https://github.com/restic/restic/pull/3290 * Enhancement #3465: Improve handling of temporary files on Windows - In some cases restic failed to delete temporary files, causing the current - command to fail. This has now been fixed by ensuring that Windows automatically - deletes the file. In addition, temporary files are only written to disk when - necessary, reducing disk writes. + In some cases restic failed to delete temporary files, causing the current command to fail. + This has now been fixed by ensuring that Windows automatically deletes the file. In addition, + temporary files are only written to disk when necessary, reducing disk writes. https://github.com/restic/restic/issues/3465 https://github.com/restic/restic/issues/1551 @@ -1723,23 +1678,22 @@ restic users. The changes are ordered by importance. * Enhancement #3475: Allow limiting IO concurrency for local and SFTP backend - Restic did not support limiting the IO concurrency / number of connections for - accessing repositories stored using the local or SFTP backends. The number of - connections is now limited as for other backends, and can be configured via the - `-o local.connections=2` and `-o sftp.connections=5` options. This ensures that - restic does not overwhelm the backend with concurrent IO operations. + Restic did not support limiting the IO concurrency / number of connections for accessing + repositories stored using the local or SFTP backends. The number of connections is now limited + as for other backends, and can be configured via the `-o local.connections=2` and `-o + sftp.connections=5` options. This ensures that restic does not overwhelm the backend with + concurrent IO operations. https://github.com/restic/restic/pull/3475 * Enhancement #3484: Stream data in `check` and `prune` commands - The commands `check --read-data` and `prune` previously downloaded data files - into temporary files which could end up being written to disk. This could cause - a large amount of data being written to disk. + The commands `check --read-data` and `prune` previously downloaded data files into + temporary files which could end up being written to disk. This could cause a large amount of data + being written to disk. - The pack files are now instead streamed, which removes the need for temporary - files. Please note that *uploads* during `backup` and `prune` still require - temporary files. + The pack files are now instead streamed, which removes the need for temporary files. Please + note that *uploads* during `backup` and `prune` still require temporary files. https://github.com/restic/restic/issues/3710 https://github.com/restic/restic/pull/3484 @@ -1748,19 +1702,19 @@ restic users. The changes are ordered by importance. * Enhancement #3709: Validate exclude patterns before backing up Exclude patterns provided via `--exclude`, `--iexclude`, `--exclude-file` or - `--iexclude-file` previously weren't validated. As a consequence, invalid - patterns resulted in files that were meant to be excluded being backed up. + `--iexclude-file` previously weren't validated. As a consequence, invalid patterns + resulted in files that were meant to be excluded being backed up. - Restic now validates all patterns before running the backup and aborts with a - fatal error if an invalid pattern is detected. + Restic now validates all patterns before running the backup and aborts with a fatal error if an + invalid pattern is detected. https://github.com/restic/restic/issues/3709 https://github.com/restic/restic/pull/3734 * Enhancement #3729: Display full IDs in `check` warnings - When running commands to inspect or repair a damaged repository, it is often - necessary to supply the full IDs of objects stored in the repository. + When running commands to inspect or repair a damaged repository, it is often necessary to + supply the full IDs of objects stored in the repository. The output of `check` now includes full IDs instead of their shortened variant. @@ -1768,29 +1722,28 @@ restic users. The changes are ordered by importance. * Enhancement #3773: Optimize memory usage for directories with many files - Backing up a directory with hundreds of thousands or more files caused restic to - require large amounts of memory. We've now optimized the `backup` command such - that it requires up to 30% less memory. + Backing up a directory with hundreds of thousands or more files caused restic to require large + amounts of memory. We've now optimized the `backup` command such that it requires up to 30% less + memory. https://github.com/restic/restic/pull/3773 * Enhancement #3819: Validate include/exclude patterns before restoring Patterns provided to `restore` via `--exclude`, `--iexclude`, `--include` and - `--iinclude` weren't validated before running the restore. Invalid patterns - would result in error messages being printed repeatedly, and possibly unwanted - files being restored. + `--iinclude` weren't validated before running the restore. Invalid patterns would result in + error messages being printed repeatedly, and possibly unwanted files being restored. - Restic now validates all patterns before running the restore, and aborts with a - fatal error if an invalid pattern is detected. + Restic now validates all patterns before running the restore, and aborts with a fatal error if + an invalid pattern is detected. https://github.com/restic/restic/pull/3819 * Enhancement #3837: Improve SFTP repository initialization over slow links - The `init` command, when used on an SFTP backend, now sends multiple `mkdir` - commands to the backend concurrently. This reduces the waiting times when - creating a repository over a very slow connection. + The `init` command, when used on an SFTP backend, now sends multiple `mkdir` commands to the + backend concurrently. This reduces the waiting times when creating a repository over a very + slow connection. https://github.com/restic/restic/issues/3837 https://github.com/restic/restic/pull/3840 @@ -1841,9 +1794,9 @@ restic users. The changes are ordered by importance. * Bugfix #1106: Never lock repository for `list locks` - The `list locks` command previously locked to the repository by default. This - had the problem that it wouldn't work for an exclusively locked repository and - that the command would also display its own lock file which can be confusing. + The `list locks` command previously locked to the repository by default. This had the problem + that it wouldn't work for an exclusively locked repository and that the command would also + display its own lock file which can be confusing. Now, the `list locks` command never locks the repository. @@ -1852,24 +1805,22 @@ restic users. The changes are ordered by importance. * Bugfix #2345: Make cache crash-resistant and usable by multiple concurrent processes - The restic cache directory (`RESTIC_CACHE_DIR`) could end up in a broken state - in the event of restic (or the OS) crashing. This is now less likely to occur as - files are downloaded to a temporary location before being moved to their proper - location. + The restic cache directory (`RESTIC_CACHE_DIR`) could end up in a broken state in the event of + restic (or the OS) crashing. This is now less likely to occur as files are downloaded to a + temporary location before being moved to their proper location. - This also allows multiple concurrent restic processes to operate on a single - repository without conflicts. Previously, concurrent operations could cause - segfaults because the processes saw each other's partially downloaded files. + This also allows multiple concurrent restic processes to operate on a single repository + without conflicts. Previously, concurrent operations could cause segfaults because the + processes saw each other's partially downloaded files. https://github.com/restic/restic/issues/2345 https://github.com/restic/restic/pull/2838 * Bugfix #2452: Improve error handling of repository locking - Previously, when the lock refresh failed to delete the old lock file, it forgot - about the newly created one. Instead it continued trying to delete the old - (usually no longer existing) lock file and thus over time lots of lock files - accumulated. This has now been fixed. + Previously, when the lock refresh failed to delete the old lock file, it forgot about the newly + created one. Instead it continued trying to delete the old (usually no longer existing) lock + file and thus over time lots of lock files accumulated. This has now been fixed. https://github.com/restic/restic/issues/2452 https://github.com/restic/restic/issues/2473 @@ -1878,45 +1829,43 @@ restic users. The changes are ordered by importance. * Bugfix #2738: Don't print progress for `backup --json --quiet` - Unlike the text output, the `--json` output format still printed progress - information even in `--quiet` mode. This has now been fixed by always disabling - the progress output in quiet mode. + Unlike the text output, the `--json` output format still printed progress information even in + `--quiet` mode. This has now been fixed by always disabling the progress output in quiet mode. https://github.com/restic/restic/issues/2738 https://github.com/restic/restic/pull/3264 * Bugfix #3382: Make `check` command honor `RESTIC_CACHE_DIR` environment variable - Previously, the `check` command didn't honor the `RESTIC_CACHE_DIR` environment - variable, which caused problems in certain system/usage configurations. This has - now been fixed. + Previously, the `check` command didn't honor the `RESTIC_CACHE_DIR` environment variable, + which caused problems in certain system/usage configurations. This has now been fixed. https://github.com/restic/restic/issues/3382 https://github.com/restic/restic/pull/3474 * Bugfix #3488: `rebuild-index` failed if an index file was damaged - Previously, the `rebuild-index` command would fail with an error if an index - file was damaged or truncated. This has now been fixed. + Previously, the `rebuild-index` command would fail with an error if an index file was damaged + or truncated. This has now been fixed. - On older restic versions, a (slow) workaround is to use `rebuild-index - --read-all-packs` or to manually delete the damaged index. + On older restic versions, a (slow) workaround is to use `rebuild-index --read-all-packs` or + to manually delete the damaged index. https://github.com/restic/restic/pull/3488 * Bugfix #3518: Make `copy` command honor `--no-lock` for source repository - The `copy` command previously did not respect the `--no-lock` option for the - source repository, causing failures with read-only storage backends. This has - now been fixed such that the option is now respected. + The `copy` command previously did not respect the `--no-lock` option for the source + repository, causing failures with read-only storage backends. This has now been fixed such + that the option is now respected. https://github.com/restic/restic/issues/3518 https://github.com/restic/restic/pull/3589 * Bugfix #3556: Fix hang with Backblaze B2 on SSL certificate authority error - Previously, if a request failed with an SSL unknown certificate authority error, - the B2 backend retried indefinitely and restic would appear to hang. + Previously, if a request failed with an SSL unknown certificate authority error, the B2 + backend retried indefinitely and restic would appear to hang. This has now been fixed and restic instead fails with an error message. @@ -1926,103 +1875,95 @@ restic users. The changes are ordered by importance. * Bugfix #3591: Fix handling of `prune --max-repack-size=0` - Restic ignored the `--max-repack-size` option when passing a value of 0. This - has now been fixed. + Restic ignored the `--max-repack-size` option when passing a value of 0. This has now been + fixed. - As a workaround, `--max-repack-size=1` can be used with older versions of - restic. + As a workaround, `--max-repack-size=1` can be used with older versions of restic. https://github.com/restic/restic/pull/3591 * Bugfix #3601: Fix rclone backend prematurely exiting when receiving SIGINT on Windows - Previously, pressing Ctrl+C in a Windows console where restic was running with - rclone as the backend would cause rclone to exit prematurely due to getting a - `SIGINT` signal at the same time as restic. Restic would then wait for a long - time for time with "unexpected EOF" and "rclone stdio connection already closed" - errors. + Previously, pressing Ctrl+C in a Windows console where restic was running with rclone as the + backend would cause rclone to exit prematurely due to getting a `SIGINT` signal at the same time + as restic. Restic would then wait for a long time for time with "unexpected EOF" and "rclone + stdio connection already closed" errors. - This has now been fixed by restic starting the rclone process detached from the - console restic runs in (similar to starting processes in a new process group on - Linux), which enables restic to gracefully clean up rclone (which now never gets - the `SIGINT`). + This has now been fixed by restic starting the rclone process detached from the console restic + runs in (similar to starting processes in a new process group on Linux), which enables restic to + gracefully clean up rclone (which now never gets the `SIGINT`). https://github.com/restic/restic/issues/3601 https://github.com/restic/restic/pull/3602 * Bugfix #3619: Avoid choosing parent snapshots newer than time of new snapshot - The `backup` command, when a `--parent` was not provided, previously chose the - most recent matching snapshot as the parent snapshot. However, this didn't make - sense when the user passed `--time` to create a new snapshot older than the most - recent snapshot. + The `backup` command, when a `--parent` was not provided, previously chose the most recent + matching snapshot as the parent snapshot. However, this didn't make sense when the user passed + `--time` to create a new snapshot older than the most recent snapshot. - Instead, `backup` now chooses the most recent snapshot which is not newer than - the snapshot-being-created's timestamp, to avoid any time travel. + Instead, `backup` now chooses the most recent snapshot which is not newer than the + snapshot-being-created's timestamp, to avoid any time travel. https://github.com/restic/restic/pull/3619 * Bugfix #3667: The `mount` command now reports symlinks sizes - Symlinks used to have size zero in restic mountpoints, confusing some - third-party tools. They now have a size equal to the byte length of their target - path, as required by POSIX. + Symlinks used to have size zero in restic mountpoints, confusing some third-party tools. They + now have a size equal to the byte length of their target path, as required by POSIX. https://github.com/restic/restic/issues/3667 https://github.com/restic/restic/pull/3668 * Change #3519: Require Go 1.14 or newer - Restic now requires Go 1.14 to build. This allows it to use new standard library - features instead of an external dependency. + Restic now requires Go 1.14 to build. This allows it to use new standard library features + instead of an external dependency. https://github.com/restic/restic/issues/3519 * Change #3641: Ignore parent snapshot for `backup --stdin` - Restic uses a parent snapshot to speed up directory scanning when performing - backups, but this only wasted time and memory when the backup source is stdin - (using the `--stdin` option of the `backup` command), since no directory - scanning is performed in this case. + Restic uses a parent snapshot to speed up directory scanning when performing backups, but this + only wasted time and memory when the backup source is stdin (using the `--stdin` option of the + `backup` command), since no directory scanning is performed in this case. - Snapshots made with `backup --stdin` no longer have a parent snapshot, which - allows restic to skip some startup operations and saves a bit of resources. + Snapshots made with `backup --stdin` no longer have a parent snapshot, which allows restic to + skip some startup operations and saves a bit of resources. - The `--parent` option is still available for `backup --stdin`, but is now - ignored. + The `--parent` option is still available for `backup --stdin`, but is now ignored. https://github.com/restic/restic/issues/3641 https://github.com/restic/restic/pull/3645 * Enhancement #233: Support negative include/exclude patterns - If a pattern starts with an exclamation mark and it matches a file that was - previously matched by a regular pattern, the match is cancelled. Notably, this - can be used with `--exclude-file` to cancel the exclusion of some files. + If a pattern starts with an exclamation mark and it matches a file that was previously matched by + a regular pattern, the match is cancelled. Notably, this can be used with `--exclude-file` to + cancel the exclusion of some files. - It works similarly to `.gitignore`, with the same limitation; Once a directory - is excluded, it is not possible to include files inside the directory. + It works similarly to `.gitignore`, with the same limitation; Once a directory is excluded, it + is not possible to include files inside the directory. Example of use as an exclude pattern for the `backup` command: $HOME/**/* !$HOME/Documents !$HOME/code !$HOME/.emacs.d !$HOME/games # [...] - node_modules *~ *.o *.lo *.pyc # [...] $HOME/code/linux/* !$HOME/code/linux/.git - # [...] + node_modules *~ *.o *.lo *.pyc # [...] $HOME/code/linux/* !$HOME/code/linux/.git # [...] https://github.com/restic/restic/issues/233 https://github.com/restic/restic/pull/2311 * Enhancement #1542: Add `--dry-run`/`-n` option to `backup` command - Testing exclude filters and other configuration options was error prone as wrong - filters could cause files to be uploaded unintentionally. It was also not - possible to estimate beforehand how much data would be uploaded. + Testing exclude filters and other configuration options was error prone as wrong filters + could cause files to be uploaded unintentionally. It was also not possible to estimate + beforehand how much data would be uploaded. - The `backup` command now has a `--dry-run`/`-n` option, which performs all the - normal steps of a backup without actually writing anything to the repository. + The `backup` command now has a `--dry-run`/`-n` option, which performs all the normal steps of + a backup without actually writing anything to the repository. - Passing -vv will log information about files that would be added, allowing for - verification of source and exclusion options before running the real backup. + Passing -vv will log information about files that would be added, allowing for verification of + source and exclusion options before running the real backup. https://github.com/restic/restic/issues/1542 https://github.com/restic/restic/pull/2308 @@ -2031,14 +1972,14 @@ restic users. The changes are ordered by importance. * Enhancement #2202: Add upload checksum for Azure, GS, S3 and Swift backends - Previously only the B2 and partially the Swift backends verified the integrity - of uploaded (encrypted) files. The verification works by informing the backend - about the expected hash of the uploaded file. The backend then verifies the - upload and thereby rules out any data corruption during upload. + Previously only the B2 and partially the Swift backends verified the integrity of uploaded + (encrypted) files. The verification works by informing the backend about the expected hash of + the uploaded file. The backend then verifies the upload and thereby rules out any data + corruption during upload. - We have now added upload checksums for the Azure, GS, S3 and Swift backends, - which besides integrity checking for uploads also means that restic can now be - used to store backups in S3 buckets which have Object Lock enabled. + We have now added upload checksums for the Azure, GS, S3 and Swift backends, which besides + integrity checking for uploads also means that restic can now be used to store backups in S3 + buckets which have Object Lock enabled. https://github.com/restic/restic/issues/2202 https://github.com/restic/restic/issues/2700 @@ -2047,68 +1988,65 @@ restic users. The changes are ordered by importance. * Enhancement #2388: Add warning for S3 if partial credentials are provided - Previously restic did not notify about incomplete credentials when using the S3 - backend, instead just reporting access denied. + Previously restic did not notify about incomplete credentials when using the S3 backend, + instead just reporting access denied. - Restic now checks that both the AWS key ID and secret environment variables are - set before connecting to the remote server, and reports an error if not. + Restic now checks that both the AWS key ID and secret environment variables are set before + connecting to the remote server, and reports an error if not. https://github.com/restic/restic/issues/2388 https://github.com/restic/restic/pull/3532 * Enhancement #2508: Support JSON output and quiet mode for the `diff` command - The `diff` command now supports outputting machine-readable output in JSON - format. To enable this, pass the `--json` option to the command. To only print - the summary and suppress detailed output, pass the `--quiet` option. + The `diff` command now supports outputting machine-readable output in JSON format. To enable + this, pass the `--json` option to the command. To only print the summary and suppress detailed + output, pass the `--quiet` option. https://github.com/restic/restic/issues/2508 https://github.com/restic/restic/pull/3592 * Enhancement #2594: Speed up the `restore --verify` command - The `--verify` option lets the `restore` command verify the file content after - it has restored a snapshot. The performance of this operation has now been - improved by up to a factor of two. + The `--verify` option lets the `restore` command verify the file content after it has restored + a snapshot. The performance of this operation has now been improved by up to a factor of two. https://github.com/restic/restic/pull/2594 * Enhancement #2656: Add flag to disable TLS verification for self-signed certificates - There is now an `--insecure-tls` global option in restic, which disables TLS - verification for self-signed certificates in order to support some development - workflows. + There is now an `--insecure-tls` global option in restic, which disables TLS verification for + self-signed certificates in order to support some development workflows. https://github.com/restic/restic/issues/2656 https://github.com/restic/restic/pull/2657 * Enhancement #2816: The `backup` command no longer updates file access times on Linux - When reading files during backup, restic used to cause the operating system to - update the files' access times. Note that this did not apply to filesystems with - disabled file access times. + When reading files during backup, restic used to cause the operating system to update the + files' access times. Note that this did not apply to filesystems with disabled file access + times. - Restic now instructs the operating system not to update the file access time, if - the user running restic is the file owner or has root permissions. + Restic now instructs the operating system not to update the file access time, if the user + running restic is the file owner or has root permissions. https://github.com/restic/restic/pull/2816 * Enhancement #2880: Make `recover` collect only unreferenced trees - Previously, the `recover` command used to generate a snapshot containing *all* - root trees, even those which were already referenced by a snapshot. + Previously, the `recover` command used to generate a snapshot containing *all* root trees, + even those which were already referenced by a snapshot. - This has been improved such that it now only processes trees not already - referenced by any snapshot. + This has been improved such that it now only processes trees not already referenced by any + snapshot. https://github.com/restic/restic/pull/2880 * Enhancement #3003: Atomic uploads for the SFTP backend - The SFTP backend did not upload files atomically. An interrupted upload could - leave an incomplete file behind which could prevent restic from accessing the - repository. This has now been fixed and uploads in the SFTP backend are done - atomically. + The SFTP backend did not upload files atomically. An interrupted upload could leave an + incomplete file behind which could prevent restic from accessing the repository. This has now + been fixed and uploads in the SFTP backend are done atomically. https://github.com/restic/restic/issues/3003 https://github.com/restic/restic/pull/3524 @@ -2122,27 +2060,25 @@ restic users. The changes are ordered by importance. * Enhancement #3429: Verify that new or modified keys are stored correctly - When adding a new key or changing the password of a key, restic used to just - create the new key (and remove the old one, when changing the password). There - was no verification that the new key was stored correctly and works properly. As - the repository cannot be decrypted without a valid key file, this could in rare - cases cause the repository to become inaccessible. + When adding a new key or changing the password of a key, restic used to just create the new key (and + remove the old one, when changing the password). There was no verification that the new key was + stored correctly and works properly. As the repository cannot be decrypted without a valid key + file, this could in rare cases cause the repository to become inaccessible. - Restic now checks that new key files actually work before continuing. This can - protect against some (rare) cases of hardware or storage problems. + Restic now checks that new key files actually work before continuing. This can protect against + some (rare) cases of hardware or storage problems. https://github.com/restic/restic/pull/3429 * Enhancement #3436: Improve local backend's resilience to (system) crashes - Restic now ensures that files stored using the `local` backend are created - atomically (that is, files are either stored completely or not at all). This - ensures that no incomplete files are left behind even if restic is terminated - while writing a file. + Restic now ensures that files stored using the `local` backend are created atomically (that + is, files are either stored completely or not at all). This ensures that no incomplete files are + left behind even if restic is terminated while writing a file. - In addition, restic now tries to ensure that the directory in the repository - which contains a newly uploaded file is also written to disk. This can prevent - missing files if the system crashes or the disk is not properly unmounted. + In addition, restic now tries to ensure that the directory in the repository which contains a + newly uploaded file is also written to disk. This can prevent missing files if the system + crashes or the disk is not properly unmounted. https://github.com/restic/restic/pull/3436 @@ -2150,56 +2086,54 @@ restic users. The changes are ordered by importance. Restic used to silently ignore the `--no-lock` option of the `forget` command. - It now skips creation of lock file in case both `--dry-run` and `--no-lock` are - specified. If `--no-lock` option is specified without `--dry-run`, restic prints - a warning message to stderr. + It now skips creation of lock file in case both `--dry-run` and `--no-lock` are specified. If + `--no-lock` option is specified without `--dry-run`, restic prints a warning message to + stderr. https://github.com/restic/restic/issues/3464 https://github.com/restic/restic/pull/3623 * Enhancement #3490: Support random subset by size in `check --read-data-subset` - The `--read-data-subset` option of the `check` command now supports a third way - of specifying the subset to check, namely `nS` where `n` is a size in bytes with - suffix `S` as k/K, m/M, g/G or t/T. + The `--read-data-subset` option of the `check` command now supports a third way of specifying + the subset to check, namely `nS` where `n` is a size in bytes with suffix `S` as k/K, m/M, g/G or + t/T. https://github.com/restic/restic/issues/3490 https://github.com/restic/restic/pull/3548 * Enhancement #3508: Cache blobs read by the `dump` command - When dumping a file using the `dump` command, restic did not cache blobs in any - way, so even consecutive runs of the same blob were loaded from the repository - again and again, slowing down the dump. + When dumping a file using the `dump` command, restic did not cache blobs in any way, so even + consecutive runs of the same blob were loaded from the repository again and again, slowing down + the dump. - Now, the caching mechanism already used by the `fuse` command is also used by - the `dump` command. This makes dumping much faster, especially for sparse files. + Now, the caching mechanism already used by the `fuse` command is also used by the `dump` + command. This makes dumping much faster, especially for sparse files. https://github.com/restic/restic/pull/3508 * Enhancement #3511: Support configurable timeout for the rclone backend - A slow rclone backend could cause restic to time out while waiting for the - repository to open. Restic now offers an `-o rclone.timeout` option to make this - timeout configurable. + A slow rclone backend could cause restic to time out while waiting for the repository to open. + Restic now offers an `-o rclone.timeout` option to make this timeout configurable. https://github.com/restic/restic/issues/3511 https://github.com/restic/restic/pull/3514 * Enhancement #3541: Improve handling of temporary B2 delete errors - Deleting files on B2 could sometimes fail temporarily, which required restic to - retry the delete operation. In some cases the file was deleted nevertheless, - causing the retries and ultimately the restic command to fail. This has now been - fixed. + Deleting files on B2 could sometimes fail temporarily, which required restic to retry the + delete operation. In some cases the file was deleted nevertheless, causing the retries and + ultimately the restic command to fail. This has now been fixed. https://github.com/restic/restic/issues/3541 https://github.com/restic/restic/pull/3544 * Enhancement #3542: Add file mode in symbolic notation to `ls --json` - The `ls --json` command now provides the file mode in symbolic notation (using - the `permissions` key), aligned with `find --json`. + The `ls --json` command now provides the file mode in symbolic notation (using the + `permissions` key), aligned with `find --json`. https://github.com/restic/restic/issues/3542 https://github.com/restic/restic/pull/3573 @@ -2207,12 +2141,11 @@ restic users. The changes are ordered by importance. * Enhancement #3593: Improve `copy` performance by parallelizing IO - Restic copy previously only used a single thread for copying blobs between - repositories, which resulted in limited performance when copying small blobs - to/from a high latency backend (i.e. any remote backend, especially b2). + Restic copy previously only used a single thread for copying blobs between repositories, + which resulted in limited performance when copying small blobs to/from a high latency backend + (i.e. any remote backend, especially b2). - Copying will now use 8 parallel threads to increase the throughput of the copy - operation. + Copying will now use 8 parallel threads to increase the throughput of the copy operation. https://github.com/restic/restic/pull/3593 @@ -2250,9 +2183,9 @@ restic users. The changes are ordered by importance. * Bugfix #2742: Improve error handling for rclone and REST backend over HTTP2 - When retrieving data from the rclone / REST backend while also using HTTP2 - restic did not detect when no data was returned at all. This could cause for - example the `check` command to report the following error: + When retrieving data from the rclone / REST backend while also using HTTP2 restic did not detect + when no data was returned at all. This could cause for example the `check` command to report the + following error: Pack ID does not match, want [...], got e3b0c442 @@ -2264,105 +2197,98 @@ restic users. The changes are ordered by importance. * Bugfix #3111: Fix terminal output redirection for PowerShell - When redirecting the output of restic using PowerShell on Windows, the output - contained terminal escape characters. This has been fixed by properly detecting - the terminal type. + When redirecting the output of restic using PowerShell on Windows, the output contained + terminal escape characters. This has been fixed by properly detecting the terminal type. - In addition, the mintty terminal now shows progress output for the backup - command. + In addition, the mintty terminal now shows progress output for the backup command. https://github.com/restic/restic/issues/3111 https://github.com/restic/restic/pull/3325 * Bugfix #3184: `backup --quiet` no longer prints status information - A regression in the latest restic version caused the output of `backup --quiet` - to contain large amounts of backup progress information when run using an - interactive terminal. This is fixed now. + A regression in the latest restic version caused the output of `backup --quiet` to contain + large amounts of backup progress information when run using an interactive terminal. This is + fixed now. - A workaround for this bug is to run restic as follows: `restic backup --quiet - [..] | cat -`. + A workaround for this bug is to run restic as follows: `restic backup --quiet [..] | cat -`. https://github.com/restic/restic/issues/3184 https://github.com/restic/restic/pull/3186 * Bugfix #3214: Treat an empty password as a fatal error for repository init - When attempting to initialize a new repository, if an empty password was - supplied, the repository would be created but the init command would return an - error with a stack trace. Now, if an empty password is provided, it is treated - as a fatal error, and no repository is created. + When attempting to initialize a new repository, if an empty password was supplied, the + repository would be created but the init command would return an error with a stack trace. Now, + if an empty password is provided, it is treated as a fatal error, and no repository is created. https://github.com/restic/restic/issues/3214 https://github.com/restic/restic/pull/3283 * Bugfix #3267: `copy` failed to copy snapshots in rare cases - The `copy` command could in rare cases fail with the error message - `SaveTree(...) returned unexpected id ...`. This has been fixed. + The `copy` command could in rare cases fail with the error message `SaveTree(...) returned + unexpected id ...`. This has been fixed. - On Linux/BSDs, the error could be caused by backing up symlinks with non-UTF-8 - target paths. Note that, due to limitations in the repository format, these are - not stored properly and should be avoided if possible. + On Linux/BSDs, the error could be caused by backing up symlinks with non-UTF-8 target paths. + Note that, due to limitations in the repository format, these are not stored properly and + should be avoided if possible. https://github.com/restic/restic/issues/3267 https://github.com/restic/restic/pull/3310 * Bugfix #3296: Fix crash of `check --read-data-subset=x%` run for an empty repository - The command `restic check --read-data-subset=x%` crashed when run for an empty - repository. This has been fixed. + The command `restic check --read-data-subset=x%` crashed when run for an empty repository. + This has been fixed. https://github.com/restic/restic/issues/3296 https://github.com/restic/restic/pull/3309 * Bugfix #3302: Fix `fdopendir: not a directory` error for local backend - The `check`, `list packs`, `prune` and `rebuild-index` commands failed for the - local backend when the `data` folder in the repository contained files. This has - been fixed. + The `check`, `list packs`, `prune` and `rebuild-index` commands failed for the local backend + when the `data` folder in the repository contained files. This has been fixed. https://github.com/restic/restic/issues/3302 https://github.com/restic/restic/pull/3308 * Bugfix #3305: Fix possibly missing backup summary of JSON output in case of error - When using `--json` output it happened from time to time that the summary output - was missing in case an error occurred. This has been fixed. + When using `--json` output it happened from time to time that the summary output was missing in + case an error occurred. This has been fixed. https://github.com/restic/restic/pull/3305 * Bugfix #3334: Print `created new cache` message only on a terminal - The message `created new cache` was printed even when the output wasn't a - terminal. That broke piping `restic dump` output to tar or zip if cache - directory didn't exist. The message is now only printed on a terminal. + The message `created new cache` was printed even when the output wasn't a terminal. That broke + piping `restic dump` output to tar or zip if cache directory didn't exist. The message is now + only printed on a terminal. https://github.com/restic/restic/issues/3334 https://github.com/restic/restic/pull/3343 * Bugfix #3380: Fix crash of `backup --exclude='**'` - The exclude filter `**`, which excludes all files, caused restic to crash. This - has been corrected. + The exclude filter `**`, which excludes all files, caused restic to crash. This has been + corrected. https://github.com/restic/restic/issues/3380 https://github.com/restic/restic/pull/3393 * Bugfix #3439: Correctly handle download errors during `restore` - Due to a regression in restic 0.12.0, the `restore` command in some cases did - not retry download errors and only printed a warning. This has been fixed by - retrying incomplete data downloads. + Due to a regression in restic 0.12.0, the `restore` command in some cases did not retry download + errors and only printed a warning. This has been fixed by retrying incomplete data downloads. https://github.com/restic/restic/issues/3439 https://github.com/restic/restic/pull/3449 * Change #3247: Empty files now have size of 0 in `ls --json` output - The `ls --json` command used to omit the sizes of empty files in its output. It - now reports a size of zero explicitly for regular files, while omitting the size - field for all other types. + The `ls --json` command used to omit the sizes of empty files in its output. It now reports a size + of zero explicitly for regular files, while omitting the size field for all other types. https://github.com/restic/restic/issues/3247 https://github.com/restic/restic/pull/3257 @@ -2376,9 +2302,9 @@ restic users. The changes are ordered by importance. * Enhancement #3167: Allow specifying limit of `snapshots` list - The `--last` option allowed limiting the output of the `snapshots` command to - the latest snapshot for each host. The new `--latest n` option allows limiting - the output to the latest `n` snapshots. + The `--last` option allowed limiting the output of the `snapshots` command to the latest + snapshot for each host. The new `--latest n` option allows limiting the output to the latest `n` + snapshots. This change deprecates the option `--last` in favour of `--latest 1`. @@ -2386,15 +2312,13 @@ restic users. The changes are ordered by importance. * Enhancement #3293: Add `--repository-file2` option to `init` and `copy` command - The `init` and `copy` command can now be used with the `--repository-file2` - option or the `$RESTIC_REPOSITORY_FILE2` environment variable. These to options - are in addition to the `--repo2` flag and allow you to read the destination - repository from a file. + The `init` and `copy` command can now be used with the `--repository-file2` option or the + `$RESTIC_REPOSITORY_FILE2` environment variable. These to options are in addition to the + `--repo2` flag and allow you to read the destination repository from a file. - Using both `--repository-file` and `--repo2` options resulted in an error for - the `copy` or `init` command. The handling of this combination of options has - been fixed. A workaround for this issue is to only use `--repo` or `-r` and - `--repo2` for `init` or `copy`. + Using both `--repository-file` and `--repo2` options resulted in an error for the `copy` or + `init` command. The handling of this combination of options has been fixed. A workaround for + this issue is to only use `--repo` or `-r` and `--repo2` for `init` or `copy`. https://github.com/restic/restic/issues/3293 https://github.com/restic/restic/pull/3294 @@ -2407,9 +2331,9 @@ restic users. The changes are ordered by importance. * Enhancement #3336: SFTP backend now checks for disk space - Backing up over SFTP previously spewed multiple generic "failure" messages when - the remote disk was full. It now checks for disk space before writing a file and - fails immediately with a "no space left on device" message. + Backing up over SFTP previously spewed multiple generic "failure" messages when the remote + disk was full. It now checks for disk space before writing a file and fails immediately with a "no + space left on device" message. https://github.com/restic/restic/issues/3336 https://github.com/restic/restic/pull/3345 @@ -2423,17 +2347,15 @@ restic users. The changes are ordered by importance. * Enhancement #3414: Add `--keep-within-hourly` option to restic forget - The `forget` command allowed keeping a given number of hourly backups or to keep - all backups within a given interval, but it was not possible to specify keeping - hourly backups within a given interval. + The `forget` command allowed keeping a given number of hourly backups or to keep all backups + within a given interval, but it was not possible to specify keeping hourly backups within a + given interval. - The new `--keep-within-hourly` option now offers this functionality. Similar - options for daily/weekly/monthly/yearly are also implemented, the new options - are: + The new `--keep-within-hourly` option now offers this functionality. Similar options for + daily/weekly/monthly/yearly are also implemented, the new options are: - --keep-within-hourly <1y2m3d4h> --keep-within-daily <1y2m3d4h> - --keep-within-weekly <1y2m3d4h> --keep-within-monthly <1y2m3d4h> - --keep-within-yearly <1y2m3d4h> + --keep-within-hourly <1y2m3d4h> --keep-within-daily <1y2m3d4h> --keep-within-weekly + <1y2m3d4h> --keep-within-monthly <1y2m3d4h> --keep-within-yearly <1y2m3d4h> https://github.com/restic/restic/issues/3414 https://github.com/restic/restic/pull/3416 @@ -2441,32 +2363,30 @@ restic users. The changes are ordered by importance. * Enhancement #3426: Optimize read performance of mount command - Reading large files in a mounted repository may be up to five times faster. This - improvement primarily applies to repositories stored at a backend that can be - accessed with low latency, like e.g. the local backend. + Reading large files in a mounted repository may be up to five times faster. This improvement + primarily applies to repositories stored at a backend that can be accessed with low latency, + like e.g. the local backend. https://github.com/restic/restic/pull/3426 * Enhancement #3427: `find --pack` fallback to index if data file is missing - When investigating a repository with missing data files, it might be useful to - determine affected snapshots before running `rebuild-index`. Previously, `find - --pack pack-id` returned no data as it required accessing the data file. Now, if - the necessary data is still available in the repository index, it gets retrieved - from there. + When investigating a repository with missing data files, it might be useful to determine + affected snapshots before running `rebuild-index`. Previously, `find --pack pack-id` + returned no data as it required accessing the data file. Now, if the necessary data is still + available in the repository index, it gets retrieved from there. - The command now also supports looking up multiple pack files in a single `find` - run. + The command now also supports looking up multiple pack files in a single `find` run. https://github.com/restic/restic/pull/3427 https://forum.restic.net/t/missing-packs-not-found/2600 * Enhancement #3456: Support filtering and specifying untagged snapshots - It was previously not possible to specify an empty tag with the `--tag` and - `--keep-tag` options. This has now been fixed, such that `--tag ''` and - `--keep-tag ''` now matches snapshots without tags. This allows e.g. the - `snapshots` and `forget` commands to only operate on untagged snapshots. + It was previously not possible to specify an empty tag with the `--tag` and `--keep-tag` + options. This has now been fixed, such that `--tag ''` and `--keep-tag ''` now matches + snapshots without tags. This allows e.g. the `snapshots` and `forget` commands to only + operate on untagged snapshots. https://github.com/restic/restic/issues/3456 https://github.com/restic/restic/pull/3457 @@ -2518,28 +2438,28 @@ restic users. The changes are ordered by importance. * Bugfix #1681: Make `mount` not create missing mount point directory - When specifying a non-existent directory as mount point for the `mount` command, - restic used to create the specified directory automatically. + When specifying a non-existent directory as mount point for the `mount` command, restic used + to create the specified directory automatically. - This has now changed such that restic instead gives an error when the specified - directory for the mount point does not exist. + This has now changed such that restic instead gives an error when the specified directory for + the mount point does not exist. https://github.com/restic/restic/issues/1681 https://github.com/restic/restic/pull/3008 * Bugfix #1800: Ignore `no data available` filesystem error during backup - Restic was unable to backup files on some filesystems, for example certain - configurations of CIFS on Linux which return a `no data available` error when - reading extended attributes. These errors are now ignored. + Restic was unable to backup files on some filesystems, for example certain configurations of + CIFS on Linux which return a `no data available` error when reading extended attributes. These + errors are now ignored. https://github.com/restic/restic/issues/1800 https://github.com/restic/restic/pull/3034 * Bugfix #2563: Report the correct owner of directories in FUSE mounts - Restic 0.10.0 changed the FUSE mount to always report the current user as the - owner of directories within the FUSE mount, which is incorrect. + Restic 0.10.0 changed the FUSE mount to always report the current user as the owner of + directories within the FUSE mount, which is incorrect. This is now changed back to reporting the correct owner of a directory. @@ -2548,31 +2468,30 @@ restic users. The changes are ordered by importance. * Bugfix #2688: Make `backup` and `tag` commands separate tags by comma - Running `restic backup --tag foo,bar` previously created snapshots with one - single tag containing a comma (`foo,bar`) instead of two tags (`foo`, `bar`). + Running `restic backup --tag foo,bar` previously created snapshots with one single tag + containing a comma (`foo,bar`) instead of two tags (`foo`, `bar`). - Similarly, the `tag` command's `--set`, `--add` and `--remove` options would - treat `foo,bar` as one tag instead of two tags. This was inconsistent with other - commands and often unexpected when one intended `foo,bar` to mean two tags. + Similarly, the `tag` command's `--set`, `--add` and `--remove` options would treat + `foo,bar` as one tag instead of two tags. This was inconsistent with other commands and often + unexpected when one intended `foo,bar` to mean two tags. - To be consistent in all commands, restic now interprets `foo,bar` to mean two - separate tags (`foo` and `bar`) instead of one tag (`foo,bar`) everywhere, - including in the `backup` and `tag` commands. + To be consistent in all commands, restic now interprets `foo,bar` to mean two separate tags + (`foo` and `bar`) instead of one tag (`foo,bar`) everywhere, including in the `backup` and + `tag` commands. - NOTE: This change might result in unexpected behavior in cases where you use the - `forget` command and filter on tags like `foo,bar`. Snapshots previously backed - up with `--tag foo,bar` will still not match that filter, but snapshots saved - from now on will match that filter. + NOTE: This change might result in unexpected behavior in cases where you use the `forget` + command and filter on tags like `foo,bar`. Snapshots previously backed up with `--tag + foo,bar` will still not match that filter, but snapshots saved from now on will match that + filter. - To replace `foo,bar` tags with `foo` and `bar` tags in old snapshots, you can - first generate a list of the relevant snapshots using a command like: + To replace `foo,bar` tags with `foo` and `bar` tags in old snapshots, you can first generate a + list of the relevant snapshots using a command like: - Restic snapshots --json --quiet | jq '.[] | select(contains({tags: - ["foo,bar"]})) | .id' + Restic snapshots --json --quiet | jq '.[] | select(contains({tags: ["foo,bar"]})) | .id' - And then use `restic tag --set foo --set bar snapshotID [...]` to set the new - tags. Please adjust the commands to include real tag names and any additional - tags, as well as the list of snapshots to process. + And then use `restic tag --set foo --set bar snapshotID [...]` to set the new tags. Please adjust + the commands to include real tag names and any additional tags, as well as the list of snapshots + to process. https://github.com/restic/restic/issues/2688 https://github.com/restic/restic/pull/2690 @@ -2586,14 +2505,14 @@ restic users. The changes are ordered by importance. * Bugfix #3014: Fix sporadic stream reset between rclone and restic - Sometimes when using restic with the `rclone` backend, an error message similar - to the following would be printed: + Sometimes when using restic with the `rclone` backend, an error message similar to the + following would be printed: Didn't finish writing GET request (wrote 0/xxx): http2: stream closed - It was found that this was caused by restic closing the connection to rclone to - soon when downloading data. A workaround has been added which waits for the end - of the download before closing the connection. + It was found that this was caused by restic closing the connection to rclone to soon when + downloading data. A workaround has been added which waits for the end of the download before + closing the connection. https://github.com/rclone/rclone/issues/2598 https://github.com/restic/restic/pull/3014 @@ -2611,130 +2530,125 @@ restic users. The changes are ordered by importance. * Bugfix #3100: Do not require gs bucket permissions when running `init` - Restic used to require bucket level permissions for the `gs` backend in order to - initialize a restic repository. + Restic used to require bucket level permissions for the `gs` backend in order to initialize a + restic repository. - It now allows a `gs` service account to initialize a repository if the bucket - does exist and the service account has permissions to write/read to that bucket. + It now allows a `gs` service account to initialize a repository if the bucket does exist and the + service account has permissions to write/read to that bucket. https://github.com/restic/restic/issues/3100 * Bugfix #3111: Correctly detect output redirection for `backup` command on Windows - On Windows, since restic 0.10.0 the `backup` command did not properly detect - when the output was redirected to a file. This caused restic to output terminal - control characters. This has been fixed by correcting the terminal detection. + On Windows, since restic 0.10.0 the `backup` command did not properly detect when the output + was redirected to a file. This caused restic to output terminal control characters. This has + been fixed by correcting the terminal detection. https://github.com/restic/restic/issues/3111 https://github.com/restic/restic/pull/3150 * Bugfix #3151: Don't create invalid snapshots when `backup` is interrupted - When canceling a backup run at a certain moment it was possible that restic - created a snapshot with an invalid "null" tree. This caused `check` and other - operations to fail. The `backup` command now properly handles interruptions and - never saves a snapshot when interrupted. + When canceling a backup run at a certain moment it was possible that restic created a snapshot + with an invalid "null" tree. This caused `check` and other operations to fail. The `backup` + command now properly handles interruptions and never saves a snapshot when interrupted. https://github.com/restic/restic/issues/3151 https://github.com/restic/restic/pull/3164 * Bugfix #3152: Do not hang until foregrounded when completed in background - On Linux, when running in the background restic failed to stop the terminal - output of the `backup` command after it had completed. This caused restic to - hang until moved to the foreground. This has now been fixed. + On Linux, when running in the background restic failed to stop the terminal output of the + `backup` command after it had completed. This caused restic to hang until moved to the + foreground. This has now been fixed. https://github.com/restic/restic/pull/3152 https://forum.restic.net/t/restic-alpine-container-cron-hangs-epoll-pwait/3334 * Bugfix #3166: Improve error handling in the `restore` command - The `restore` command used to not print errors while downloading file contents - from the repository. It also incorrectly exited with a zero error code even when - there were errors during the restore process. This has all been fixed and - `restore` now returns with a non-zero exit code when there's an error. + The `restore` command used to not print errors while downloading file contents from the + repository. It also incorrectly exited with a zero error code even when there were errors + during the restore process. This has all been fixed and `restore` now returns with a non-zero + exit code when there's an error. https://github.com/restic/restic/issues/3166 https://github.com/restic/restic/pull/3207 * Bugfix #3232: Correct statistics for overlapping targets - A user reported that restic's statistics and progress information during backup - was not correctly calculated when the backup targets (files/dirs to save) - overlap. For example, consider a directory `foo` which contains (among others) a - file `foo/bar`. When `restic backup foo foo/bar` was run, restic counted the - size of the file `foo/bar` twice, so the completeness percentage as well as the - number of files was wrong. This is now corrected. + A user reported that restic's statistics and progress information during backup was not + correctly calculated when the backup targets (files/dirs to save) overlap. For example, + consider a directory `foo` which contains (among others) a file `foo/bar`. When `restic + backup foo foo/bar` was run, restic counted the size of the file `foo/bar` twice, so the + completeness percentage as well as the number of files was wrong. This is now corrected. https://github.com/restic/restic/issues/3232 https://github.com/restic/restic/pull/3243 * Bugfix #3249: Improve error handling in `gs` backend - The `gs` backend did not notice when the last step of completing a file upload - failed. Under rare circumstances, this could cause missing files in the backup - repository. This has now been fixed. + The `gs` backend did not notice when the last step of completing a file upload failed. Under rare + circumstances, this could cause missing files in the backup repository. This has now been + fixed. https://github.com/restic/restic/pull/3249 * Change #3095: Deleting files on Google Drive now moves them to the trash - When deleting files on Google Drive via the `rclone` backend, restic used to - bypass the trash folder required that one used the `-o rclone.args` option to - enable usage of the trash folder. This ensured that deleted files in Google - Drive were not kept indefinitely in the trash folder. However, since Google - Drive's trash retention policy changed to deleting trashed files after 30 days, - this is no longer needed. + When deleting files on Google Drive via the `rclone` backend, restic used to bypass the trash + folder required that one used the `-o rclone.args` option to enable usage of the trash folder. + This ensured that deleted files in Google Drive were not kept indefinitely in the trash folder. + However, since Google Drive's trash retention policy changed to deleting trashed files after + 30 days, this is no longer needed. - Restic now leaves it up to rclone and its configuration to use or not use the - trash folder when deleting files. The default is to use the trash folder, as of - rclone 1.53.2. To re-enable the restic 0.11 behavior, set the - `RCLONE_DRIVE_USE_TRASH` environment variable or change the rclone - configuration. See the rclone documentation for more details. + Restic now leaves it up to rclone and its configuration to use or not use the trash folder when + deleting files. The default is to use the trash folder, as of rclone 1.53.2. To re-enable the + restic 0.11 behavior, set the `RCLONE_DRIVE_USE_TRASH` environment variable or change the + rclone configuration. See the rclone documentation for more details. https://github.com/restic/restic/issues/3095 https://github.com/restic/restic/pull/3102 * Enhancement #909: Back up mountpoints as empty directories - When the `--one-file-system` option is specified to `restic backup`, it ignores - all file systems mounted below one of the target directories. This means that - when a snapshot is restored, users needed to manually recreate the mountpoint - directories. + When the `--one-file-system` option is specified to `restic backup`, it ignores all file + systems mounted below one of the target directories. This means that when a snapshot is + restored, users needed to manually recreate the mountpoint directories. - Restic now backs up mountpoints as empty directories and therefore implements - the same approach as `tar`. + Restic now backs up mountpoints as empty directories and therefore implements the same + approach as `tar`. https://github.com/restic/restic/issues/909 https://github.com/restic/restic/pull/3119 * Enhancement #2186: Allow specifying percentage in `check --read-data-subset` - We've enhanced the `check` command's `--read-data-subset` option to also accept - a percentage (e.g. `2.5%` or `10%`). This will check the given percentage of - pack files (which are randomly selected on each run). + We've enhanced the `check` command's `--read-data-subset` option to also accept a + percentage (e.g. `2.5%` or `10%`). This will check the given percentage of pack files (which + are randomly selected on each run). https://github.com/restic/restic/issues/2186 https://github.com/restic/restic/pull/3038 * Enhancement #2433: Make the `dump` command support `zip` format - Previously, restic could dump the contents of a whole folder structure only in - the `tar` format. The `dump` command now has a new flag to change output format - to `zip`. Just pass `--archive zip` as an option to `restic dump`. + Previously, restic could dump the contents of a whole folder structure only in the `tar` + format. The `dump` command now has a new flag to change output format to `zip`. Just pass + `--archive zip` as an option to `restic dump`. https://github.com/restic/restic/pull/2433 https://github.com/restic/restic/pull/3081 * Enhancement #2453: Report permanent/fatal backend errors earlier - When encountering errors in reading from or writing to storage backends, restic - retries the failing operation up to nine times (for a total of ten attempts). It - used to retry all backend operations, but now detects some permanent error - conditions so that it can report fatal errors earlier. + When encountering errors in reading from or writing to storage backends, restic retries the + failing operation up to nine times (for a total of ten attempts). It used to retry all backend + operations, but now detects some permanent error conditions so that it can report fatal errors + earlier. - Permanent failures include local disks being full, SSH connections dropping and - permission errors. + Permanent failures include local disks being full, SSH connections dropping and permission + errors. https://github.com/restic/restic/issues/2453 https://github.com/restic/restic/issues/3180 @@ -2743,26 +2657,23 @@ restic users. The changes are ordered by importance. * Enhancement #2495: Add option to let `backup` trust mtime without checking ctime - The `backup` command used to require that both `ctime` and `mtime` of a file - matched with a previously backed up version to determine that the file was - unchanged. In other words, if either `ctime` or `mtime` of the file had changed, - it would be considered changed and restic would read the file's content again to - back up the relevant (changed) parts of it. - - The new option `--ignore-ctime` makes restic look at `mtime` only, such that - `ctime` changes for a file does not cause restic to read the file's contents - again. - - The check for both `ctime` and `mtime` was introduced in restic 0.9.6 to make - backups more reliable in the face of programs that reset `mtime` (some Unix - archivers do that), but it turned out to often be expensive because it made - restic read file contents even if only the metadata (owner, permissions) of a - file had changed. The new `--ignore-ctime` option lets the user restore the - 0.9.5 behavior when needed. The existing `--ignore-inode` option already turned + The `backup` command used to require that both `ctime` and `mtime` of a file matched with a + previously backed up version to determine that the file was unchanged. In other words, if + either `ctime` or `mtime` of the file had changed, it would be considered changed and restic + would read the file's content again to back up the relevant (changed) parts of it. + + The new option `--ignore-ctime` makes restic look at `mtime` only, such that `ctime` changes + for a file does not cause restic to read the file's contents again. + + The check for both `ctime` and `mtime` was introduced in restic 0.9.6 to make backups more + reliable in the face of programs that reset `mtime` (some Unix archivers do that), but it turned + out to often be expensive because it made restic read file contents even if only the metadata + (owner, permissions) of a file had changed. The new `--ignore-ctime` option lets the user + restore the 0.9.5 behavior when needed. The existing `--ignore-inode` option already turned off this behavior, but also removed a different check. - Please note that changes in files' metadata are still recorded, regardless of - the command line options provided to the backup command. + Please note that changes in files' metadata are still recorded, regardless of the command line + options provided to the backup command. https://github.com/restic/restic/issues/2495 https://github.com/restic/restic/issues/2558 @@ -2771,21 +2682,20 @@ restic users. The changes are ordered by importance. * Enhancement #2528: Add Alibaba/Aliyun OSS support in the `s3` backend - A new extended option `s3.bucket-lookup` has been added to support - Alibaba/Aliyun OSS in the `s3` backend. The option can be set to one of the - following values: + A new extended option `s3.bucket-lookup` has been added to support Alibaba/Aliyun OSS in the + `s3` backend. The option can be set to one of the following values: - - `auto` - Existing behaviour - `dns` - Use DNS style bucket access - `path` - - Use path style bucket access + - `auto` - Existing behaviour - `dns` - Use DNS style bucket access - `path` - Use path style + bucket access - To make the `s3` backend work with Alibaba/Aliyun OSS you must set - `s3.bucket-lookup` to `dns` and set the `s3.region` parameter. For example: + To make the `s3` backend work with Alibaba/Aliyun OSS you must set `s3.bucket-lookup` to `dns` + and set the `s3.region` parameter. For example: Restic -o s3.bucket-lookup=dns -o s3.region=oss-eu-west-1 -r s3:https://oss-eu-west-1.aliyuncs.com/bucketname init - Note that `s3.region` must be set, otherwise the MinIO SDK tries to look it up - and it seems that Alibaba doesn't support that properly. + Note that `s3.region` must be set, otherwise the MinIO SDK tries to look it up and it seems that + Alibaba doesn't support that properly. https://github.com/restic/restic/issues/2528 https://github.com/restic/restic/pull/2535 @@ -2794,14 +2704,14 @@ restic users. The changes are ordered by importance. The `backup`, `check` and `prune` commands never printed any progress reports on non-interactive terminals. This behavior is now configurable using the - `RESTIC_PROGRESS_FPS` environment variable. Use for example a value of `1` for - an update every second, or `0.01666` for an update every minute. + `RESTIC_PROGRESS_FPS` environment variable. Use for example a value of `1` for an update + every second, or `0.01666` for an update every minute. - The `backup` command now also prints the current progress when restic receives a - `SIGUSR1` signal. + The `backup` command now also prints the current progress when restic receives a `SIGUSR1` + signal. - Setting the `RESTIC_PROGRESS_FPS` environment variable or sending a `SIGUSR1` - signal prints a status report even when `--quiet` was specified. + Setting the `RESTIC_PROGRESS_FPS` environment variable or sending a `SIGUSR1` signal + prints a status report even when `--quiet` was specified. https://github.com/restic/restic/issues/2706 https://github.com/restic/restic/issues/3194 @@ -2809,22 +2719,21 @@ restic users. The changes are ordered by importance. * Enhancement #2718: Improve `prune` performance and make it more customizable - The `prune` command is now much faster. This is especially the case for remote - repositories or repositories with not much data to remove. Also the memory usage - of the `prune` command is now reduced. + The `prune` command is now much faster. This is especially the case for remote repositories or + repositories with not much data to remove. Also the memory usage of the `prune` command is now + reduced. - Restic used to rebuild the index from scratch after pruning. This could lead to - missing packs in the index in some cases for eventually consistent backends such - as e.g. AWS S3. This behavior is now changed and the index rebuilding uses the - information already known by `prune`. + Restic used to rebuild the index from scratch after pruning. This could lead to missing packs in + the index in some cases for eventually consistent backends such as e.g. AWS S3. This behavior is + now changed and the index rebuilding uses the information already known by `prune`. - By default, the `prune` command no longer removes all unused data. This behavior - can be fine-tuned by new options, like the acceptable amount of unused space or - the maximum size of data to reorganize. For more details, please see + By default, the `prune` command no longer removes all unused data. This behavior can be + fine-tuned by new options, like the acceptable amount of unused space or the maximum size of + data to reorganize. For more details, please see https://restic.readthedocs.io/en/stable/060_forget.html . - Moreover, `prune` now accepts the `--dry-run` option and also running `forget - --dry-run --prune` will show what `prune` would do. + Moreover, `prune` now accepts the `--dry-run` option and also running `forget --dry-run + --prune` will show what `prune` would do. This enhancement also fixes several open issues, e.g.: - https://github.com/restic/restic/issues/1140 - @@ -2839,74 +2748,68 @@ restic users. The changes are ordered by importance. * Enhancement #2941: Speed up the repacking step of the `prune` command - The repack step of the `prune` command, which moves still used file parts into - new pack files such that the old ones can be garbage collected later on, now - processes multiple pack files in parallel. This is especially beneficial for - high latency backends or when using a fast network connection. + The repack step of the `prune` command, which moves still used file parts into new pack files + such that the old ones can be garbage collected later on, now processes multiple pack files in + parallel. This is especially beneficial for high latency backends or when using a fast network + connection. https://github.com/restic/restic/pull/2941 * Enhancement #2944: Add `backup` options `--files-from-{verbatim,raw}` - The new `backup` options `--files-from-verbatim` and `--files-from-raw` read a - list of files to back up from a file. Unlike the existing `--files-from` option, - these options do not interpret the listed filenames as glob patterns; instead, - whitespace in filenames is preserved as-is and no pattern expansion is done. - Please see the documentation for specifics. + The new `backup` options `--files-from-verbatim` and `--files-from-raw` read a list of + files to back up from a file. Unlike the existing `--files-from` option, these options do not + interpret the listed filenames as glob patterns; instead, whitespace in filenames is + preserved as-is and no pattern expansion is done. Please see the documentation for specifics. - These new options are highly recommended over `--files-from`, when using a - script to generate the list of files to back up. + These new options are highly recommended over `--files-from`, when using a script to generate + the list of files to back up. https://github.com/restic/restic/issues/2944 https://github.com/restic/restic/issues/3013 * Enhancement #3006: Speed up the `rebuild-index` command - We've optimized the `rebuild-index` command. Now, existing index entries are - used to minimize the number of pack files that must be read. This speeds up the - index rebuild a lot. + We've optimized the `rebuild-index` command. Now, existing index entries are used to + minimize the number of pack files that must be read. This speeds up the index rebuild a lot. - Additionally, the option `--read-all-packs` has been added, implementing the - previous behavior. + Additionally, the option `--read-all-packs` has been added, implementing the previous + behavior. https://github.com/restic/restic/pull/3006 https://github.com/restic/restic/issue/2547 * Enhancement #3048: Add more checks for index and pack files in the `check` command - The `check` command run with the `--read-data` or `--read-data-subset` options - used to only verify only the pack file content - it did not check if the blobs - within the pack are correctly contained in the index. + The `check` command run with the `--read-data` or `--read-data-subset` options used to only + verify only the pack file content - it did not check if the blobs within the pack are correctly + contained in the index. A check for the latter is now in place, which can print the following error: Blob ID is not contained in index or position is incorrect - Another test is also added, which compares pack file sizes computed from the - index and the pack header with the actual file size. This test is able to detect - truncated pack files. + Another test is also added, which compares pack file sizes computed from the index and the pack + header with the actual file size. This test is able to detect truncated pack files. - If the index is not correct, it can be rebuilt by using the `rebuild-index` - command. + If the index is not correct, it can be rebuilt by using the `rebuild-index` command. - Having added these tests, `restic check` is now able to detect non-existing - blobs which are wrongly referenced in the index. This situation could have lead - to missing data. + Having added these tests, `restic check` is now able to detect non-existing blobs which are + wrongly referenced in the index. This situation could have lead to missing data. https://github.com/restic/restic/pull/3048 https://github.com/restic/restic/pull/3082 * Enhancement #3083: Allow usage of deprecated S3 `ListObjects` API - Some S3 API implementations, e.g. Ceph before version 14.2.5, have a broken - `ListObjectsV2` implementation which causes problems for restic when using their - API endpoints. When a broken server implementation is used, restic prints errors - similar to the following: + Some S3 API implementations, e.g. Ceph before version 14.2.5, have a broken `ListObjectsV2` + implementation which causes problems for restic when using their API endpoints. When a broken + server implementation is used, restic prints errors similar to the following: List() returned error: Truncated response should have continuation token set - As a temporary workaround, restic now allows using the older `ListObjects` - endpoint by setting the `s3.list-objects-v1` extended option, for instance: + As a temporary workaround, restic now allows using the older `ListObjects` endpoint by + setting the `s3.list-objects-v1` extended option, for instance: Restic -o s3.list-objects-v1=true snapshots @@ -2917,30 +2820,28 @@ restic users. The changes are ordered by importance. * Enhancement #3099: Reduce memory usage of `check` command - The `check` command now requires less memory if it is run without the - `--check-unused` option. + The `check` command now requires less memory if it is run without the `--check-unused` option. https://github.com/restic/restic/pull/3099 * Enhancement #3106: Parallelize scan of snapshot content in `copy` and `prune` - The `copy` and `prune` commands used to traverse the directories of snapshots - one by one to find used data. This snapshot traversal is now parallized which - can speed up this step several times. + The `copy` and `prune` commands used to traverse the directories of snapshots one by one to find + used data. This snapshot traversal is now parallized which can speed up this step several + times. - In addition the `check` command now reports how many snapshots have already been - processed. + In addition the `check` command now reports how many snapshots have already been processed. https://github.com/restic/restic/pull/3106 * Enhancement #3130: Parallelize reading of locks and snapshots - Restic used to read snapshots sequentially. For repositories containing many - snapshots this slowed down commands which have to read all snapshots. + Restic used to read snapshots sequentially. For repositories containing many snapshots this + slowed down commands which have to read all snapshots. - Now the reading of snapshots is parallelized. This speeds up for example - `prune`, `backup` and other commands that search for snapshots with certain - properties or which have to find the `latest` snapshot. + Now the reading of snapshots is parallelized. This speeds up for example `prune`, `backup` and + other commands that search for snapshots with certain properties or which have to find the + `latest` snapshot. The speed up also applies to locks stored in the backup repository. @@ -2949,39 +2850,37 @@ restic users. The changes are ordered by importance. * Enhancement #3147: Support additional environment variables for Swift authentication - The `swift` backend now supports the following additional environment variables - for passing authentication details to restic: `OS_USER_ID`, `OS_USER_DOMAIN_ID`, + The `swift` backend now supports the following additional environment variables for passing + authentication details to restic: `OS_USER_ID`, `OS_USER_DOMAIN_ID`, `OS_PROJECT_DOMAIN_ID` and `OS_TRUST_ID` - Depending on the `openrc` configuration file these might be required when the - user and project domains differ from one another. + Depending on the `openrc` configuration file these might be required when the user and project + domains differ from one another. https://github.com/restic/restic/issues/3147 https://github.com/restic/restic/pull/3158 * Enhancement #3191: Add release binaries for MIPS architectures - We've added a few new architectures for Linux to the release binaries: `mips`, - `mipsle`, `mips64`, and `mip64le`. MIPS is mostly used for low-end embedded - systems. + We've added a few new architectures for Linux to the release binaries: `mips`, `mipsle`, + `mips64`, and `mip64le`. MIPS is mostly used for low-end embedded systems. https://github.com/restic/restic/issues/3191 https://github.com/restic/restic/pull/3208 * Enhancement #3250: Add several more error checks - We've added a lot more error checks in places where errors were previously - ignored (as hinted by the static analysis program `errcheck` via - `golangci-lint`). + We've added a lot more error checks in places where errors were previously ignored (as hinted by + the static analysis program `errcheck` via `golangci-lint`). https://github.com/restic/restic/pull/3250 * Enhancement #3254: Enable HTTP/2 for backend connections - Go's HTTP library usually automatically chooses between HTTP/1.x and HTTP/2 - depending on what the server supports. But for compatibility this mechanism is - disabled if DialContext is used (which is the case for restic). This change - allows restic's HTTP client to negotiate HTTP/2 if supported by the server. + Go's HTTP library usually automatically chooses between HTTP/1.x and HTTP/2 depending on + what the server supports. But for compatibility this mechanism is disabled if DialContext is + used (which is the case for restic). This change allows restic's HTTP client to negotiate + HTTP/2 if supported by the server. https://github.com/restic/restic/pull/3254 @@ -3012,11 +2911,11 @@ restic users. The changes are ordered by importance. * Bugfix #1212: Restore timestamps and permissions on intermediate directories - When using the `--include` option of the restore command, restic restored - timestamps and permissions only on directories selected by the include pattern. - Intermediate directories, which are necessary to restore files located in sub- - directories, were created with default permissions. We've fixed the restore - command to restore timestamps and permissions for these directories as well. + When using the `--include` option of the restore command, restic restored timestamps and + permissions only on directories selected by the include pattern. Intermediate directories, + which are necessary to restore files located in sub- directories, were created with default + permissions. We've fixed the restore command to restore timestamps and permissions for these + directories as well. https://github.com/restic/restic/issues/1212 https://github.com/restic/restic/issues/1402 @@ -3024,15 +2923,13 @@ restic users. The changes are ordered by importance. * Bugfix #1756: Mark repository files as read-only when using the local backend - Files stored in a local repository were marked as writeable on the filesystem - for non-Windows systems, which did not prevent accidental file modifications - outside of restic. In addition, the local backend did not work with certain - filesystems and network mounts which do not permit modifications of file - permissions. + Files stored in a local repository were marked as writeable on the filesystem for non-Windows + systems, which did not prevent accidental file modifications outside of restic. In addition, + the local backend did not work with certain filesystems and network mounts which do not permit + modifications of file permissions. - Restic now marks files stored in a local repository as read-only on the - filesystem on non-Windows systems. The error handling is improved to support - more filesystems. + Restic now marks files stored in a local repository as read-only on the filesystem on + non-Windows systems. The error handling is improved to support more filesystems. https://github.com/restic/restic/issues/1756 https://github.com/restic/restic/issues/2157 @@ -3040,9 +2937,8 @@ restic users. The changes are ordered by importance. * Bugfix #2241: Hide password in REST backend repository URLs - When using a password in the REST backend repository URL, the password could in - some cases be included in the output from restic, e.g. when initializing a repo - or during an error. + When using a password in the REST backend repository URL, the password could in some cases be + included in the output from restic, e.g. when initializing a repo or during an error. The password is now replaced with "***" where applicable. @@ -3051,11 +2947,10 @@ restic users. The changes are ordered by importance. * Bugfix #2319: Correctly dump directories into tar files - The dump command previously wrote directories in a tar file in a way which can - cause compatibility problems. This caused, for example, 7zip on Windows to not - open tar files containing directories. In addition it was not possible to dump - directories with extended attributes. These compatibility problems are now - corrected. + The dump command previously wrote directories in a tar file in a way which can cause + compatibility problems. This caused, for example, 7zip on Windows to not open tar files + containing directories. In addition it was not possible to dump directories with extended + attributes. These compatibility problems are now corrected. In addition, a tar file now includes the name of the owner and group of a file. @@ -3064,18 +2959,17 @@ restic users. The changes are ordered by importance. * Bugfix #2491: Don't require `self-update --output` placeholder file - `restic self-update --output /path/to/new-restic` used to require that - new-restic was an existing file, to be overwritten. Now it's possible to - download an updated restic binary to a new path, without first having to create - a placeholder file. + `restic self-update --output /path/to/new-restic` used to require that new-restic was an + existing file, to be overwritten. Now it's possible to download an updated restic binary to a + new path, without first having to create a placeholder file. https://github.com/restic/restic/issues/2491 https://github.com/restic/restic/pull/2937 * Bugfix #2834: Fix rare cases of backup command hanging forever - We've fixed an issue with the backup progress reporting which could cause restic - to hang forever right before finishing a backup. + We've fixed an issue with the backup progress reporting which could cause restic to hang + forever right before finishing a backup. https://github.com/restic/restic/issues/2834 https://github.com/restic/restic/pull/2963 @@ -3089,50 +2983,47 @@ restic users. The changes are ordered by importance. * Bugfix #2942: Make --exclude-larger-than handle disappearing files - There was a small bug in the backup command's --exclude-larger-than option where - files that disappeared between scanning and actually backing them up to the - repository caused a panic. This is now fixed. + There was a small bug in the backup command's --exclude-larger-than option where files that + disappeared between scanning and actually backing them up to the repository caused a panic. + This is now fixed. https://github.com/restic/restic/issues/2942 * Bugfix #2951: Restic generate, help and self-update no longer check passwords - The commands `restic cache`, `generate`, `help` and `self-update` don't need - passwords, but they previously did run the RESTIC_PASSWORD_COMMAND (if set in - the environment), prompting users to authenticate for no reason. They now skip - running the password command. + The commands `restic cache`, `generate`, `help` and `self-update` don't need passwords, but + they previously did run the RESTIC_PASSWORD_COMMAND (if set in the environment), prompting + users to authenticate for no reason. They now skip running the password command. https://github.com/restic/restic/issues/2951 https://github.com/restic/restic/pull/2987 * Bugfix #2979: Make snapshots --json output [] instead of null when no snapshots - Restic previously output `null` instead of `[]` for the `--json snapshots` - command, when there were no snapshots in the repository. This caused some minor - problems when parsing the output, but is now fixed such that `[]` is output when - the list of snapshots is empty. + Restic previously output `null` instead of `[]` for the `--json snapshots` command, when + there were no snapshots in the repository. This caused some minor problems when parsing the + output, but is now fixed such that `[]` is output when the list of snapshots is empty. https://github.com/restic/restic/issues/2979 https://github.com/restic/restic/pull/2984 * Enhancement #340: Add support for Volume Shadow Copy Service (VSS) on Windows - Volume Shadow Copy Service allows read access to files that are locked by - another process using an exclusive lock through a filesystem snapshot. Restic - was unable to backup those files before. This update enables backing up these - files. + Volume Shadow Copy Service allows read access to files that are locked by another process using + an exclusive lock through a filesystem snapshot. Restic was unable to backup those files + before. This update enables backing up these files. - This needs to be enabled explicitely using the --use-fs-snapshot option of the - backup command. + This needs to be enabled explicitely using the --use-fs-snapshot option of the backup + command. https://github.com/restic/restic/issues/340 https://github.com/restic/restic/pull/2274 * Enhancement #1458: New option --repository-file - We've added a new command-line option --repository-file as an alternative to -r. - This allows to read the repository URL from a file in order to prevent certain - types of information leaks, especially for URLs containing credentials. + We've added a new command-line option --repository-file as an alternative to -r. This allows + to read the repository URL from a file in order to prevent certain types of information leaks, + especially for URLs containing credentials. https://github.com/restic/restic/issues/1458 https://github.com/restic/restic/issues/2900 @@ -3140,29 +3031,27 @@ restic users. The changes are ordered by importance. * Enhancement #2849: Authenticate to Google Cloud Storage with access token - When using the GCS backend, it is now possible to authenticate with OAuth2 - access tokens instead of a credentials file by setting the GOOGLE_ACCESS_TOKEN - environment variable. + When using the GCS backend, it is now possible to authenticate with OAuth2 access tokens + instead of a credentials file by setting the GOOGLE_ACCESS_TOKEN environment variable. https://github.com/restic/restic/pull/2849 * Enhancement #2969: Optimize check for unchanged files during backup - During a backup restic skips processing files which have not changed since the - last backup run. Previously this required opening each file once which can be - slow on network filesystems. The backup command now checks for file changes - before opening a file. This considerably reduces the time to create a backup on - network filesystems. + During a backup restic skips processing files which have not changed since the last backup run. + Previously this required opening each file once which can be slow on network filesystems. The + backup command now checks for file changes before opening a file. This considerably reduces + the time to create a backup on network filesystems. https://github.com/restic/restic/issues/2969 https://github.com/restic/restic/pull/2970 * Enhancement #2978: Warn if parent snapshot cannot be loaded during backup - During a backup restic uses the parent snapshot to check whether a file was - changed and has to be backed up again. For this check the backup has to read the - directories contained in the old snapshot. If a tree blob cannot be loaded, - restic now warns about this problem with the backup repository. + During a backup restic uses the parent snapshot to check whether a file was changed and has to be + backed up again. For this check the backup has to read the directories contained in the old + snapshot. If a tree blob cannot be loaded, restic now warns about this problem with the backup + repository. https://github.com/restic/restic/pull/2978 @@ -3222,16 +3111,15 @@ restic users. The changes are ordered by importance. * Bugfix #1863: Report correct number of directories processed by backup - The directory statistics calculation was fixed to report the actual number of - processed directories instead of always zero. + The directory statistics calculation was fixed to report the actual number of processed + directories instead of always zero. https://github.com/restic/restic/issues/1863 * Bugfix #2254: Fix tar issues when dumping `/` - We've fixed an issue with dumping either `/` or files on the first sublevel e.g. - `/foo` to tar. This also fixes tar dumping issues on Windows where this issue - could also happen. + We've fixed an issue with dumping either `/` or files on the first sublevel e.g. `/foo` to tar. + This also fixes tar dumping issues on Windows where this issue could also happen. https://github.com/restic/restic/issues/2254 https://github.com/restic/restic/issues/2357 @@ -3239,63 +3127,59 @@ restic users. The changes are ordered by importance. * Bugfix #2281: Handle format verbs like '%' properly in `find` output - The JSON or "normal" output of the `find` command can now deal with file names - that contain substrings which the Golang `fmt` package considers "format verbs" - like `%s`. + The JSON or "normal" output of the `find` command can now deal with file names that contain + substrings which the Golang `fmt` package considers "format verbs" like `%s`. https://github.com/restic/restic/issues/2281 * Bugfix #2298: Do not hang when run as a background job - Restic did hang on exit while restoring the terminal configuration when it was - started as a background job, for example using `restic ... &`. This has been - fixed by only restoring the terminal configuration when restic is interrupted - while reading a password from the terminal. + Restic did hang on exit while restoring the terminal configuration when it was started as a + background job, for example using `restic ... &`. This has been fixed by only restoring the + terminal configuration when restic is interrupted while reading a password from the + terminal. https://github.com/restic/restic/issues/2298 * Bugfix #2389: Fix mangled json output of backup command - We've fixed a race condition in the json output of the backup command that could - cause multiple lines to get mixed up. We've also ensured that the backup summary - is printed last. + We've fixed a race condition in the json output of the backup command that could cause multiple + lines to get mixed up. We've also ensured that the backup summary is printed last. https://github.com/restic/restic/issues/2389 https://github.com/restic/restic/pull/2545 * Bugfix #2390: Refresh lock timestamp - Long-running operations did not refresh lock timestamp, resulting in locks - becoming stale. This is now fixed. + Long-running operations did not refresh lock timestamp, resulting in locks becoming stale. + This is now fixed. https://github.com/restic/restic/issues/2390 * Bugfix #2429: Backup --json reports total_bytes_processed as 0 - We've fixed the json output of total_bytes_processed. The non-json output was - already fixed with pull request #2138 but left the json output untouched. + We've fixed the json output of total_bytes_processed. The non-json output was already fixed + with pull request #2138 but left the json output untouched. https://github.com/restic/restic/issues/2429 * Bugfix #2469: Fix incorrect bytes stats in `diff` command - In some cases, the wrong number of bytes (e.g. 16777215.998 TiB) were reported - by the `diff` command. This is now fixed. + In some cases, the wrong number of bytes (e.g. 16777215.998 TiB) were reported by the `diff` + command. This is now fixed. https://github.com/restic/restic/issues/2469 * Bugfix #2518: Do not crash with Synology NAS sftp server - It was found that when restic is used to store data on an sftp server on a - Synology NAS with a relative path (one which does not start with a slash), it - may go into an endless loop trying to create directories on the server. We've - fixed this bug by using a function in the sftp library instead of our own - implementation. + It was found that when restic is used to store data on an sftp server on a Synology NAS with a + relative path (one which does not start with a slash), it may go into an endless loop trying to + create directories on the server. We've fixed this bug by using a function in the sftp library + instead of our own implementation. - The bug was discovered because the Synology sftp server behaves erratic with - non-absolute path (e.g. `home/restic-repo`). This can be resolved by just using - an absolute path instead (`/home/restic-repo`). We've also added a paragraph in - the FAQ. + The bug was discovered because the Synology sftp server behaves erratic with non-absolute + path (e.g. `home/restic-repo`). This can be resolved by just using an absolute path instead + (`/home/restic-repo`). We've also added a paragraph in the FAQ. https://github.com/restic/restic/issues/2518 https://github.com/restic/restic/issues/2363 @@ -3303,90 +3187,84 @@ restic users. The changes are ordered by importance. * Bugfix #2531: Fix incorrect size calculation in `stats --mode restore-size` - The restore-size mode of stats was counting hard-linked files as if they were - independent. + The restore-size mode of stats was counting hard-linked files as if they were independent. https://github.com/restic/restic/issues/2531 * Bugfix #2537: Fix incorrect file counts in `stats --mode restore-size` - The restore-size mode of stats was failing to count empty directories and some - files with hard links. + The restore-size mode of stats was failing to count empty directories and some files with hard + links. https://github.com/restic/restic/issues/2537 * Bugfix #2592: SFTP backend supports IPv6 addresses - The SFTP backend now supports IPv6 addresses natively, without relying on - aliases in the external SSH configuration. + The SFTP backend now supports IPv6 addresses natively, without relying on aliases in the + external SSH configuration. https://github.com/restic/restic/pull/2592 * Bugfix #2607: Honor RESTIC_CACHE_DIR environment variable on Mac and Windows - On Mac and Windows, the RESTIC_CACHE_DIR environment variable was ignored. This - variable can now be used on all platforms to set the directory where restic - stores caches. + On Mac and Windows, the RESTIC_CACHE_DIR environment variable was ignored. This variable can + now be used on all platforms to set the directory where restic stores caches. https://github.com/restic/restic/pull/2607 * Bugfix #2668: Don't abort the stats command when data blobs are missing - Runing the stats command in the blobs-per-file mode on a repository with missing - data blobs previously resulted in a crash. + Runing the stats command in the blobs-per-file mode on a repository with missing data blobs + previously resulted in a crash. https://github.com/restic/restic/pull/2668 * Bugfix #2674: Add stricter prune error checks - Additional checks were added to the prune command in order to improve resiliency - to backend, hardware and/or networking issues. The checks now detect a few more - cases where such outside factors could potentially cause data loss. + Additional checks were added to the prune command in order to improve resiliency to backend, + hardware and/or networking issues. The checks now detect a few more cases where such outside + factors could potentially cause data loss. https://github.com/restic/restic/pull/2674 * Bugfix #2899: Fix possible crash in the progress bar of check --read-data - We've fixed a possible crash while displaying the progress bar for the check - --read-data command. The crash occurred when the length of the progress bar - status exceeded the terminal width, which only happened for very narrow terminal - windows. + We've fixed a possible crash while displaying the progress bar for the check --read-data + command. The crash occurred when the length of the progress bar status exceeded the terminal + width, which only happened for very narrow terminal windows. https://github.com/restic/restic/pull/2899 https://forum.restic.net/t/restic-rclone-pcloud-connection-issues/2963/15 * Change #1597: Honor the --no-lock flag in the mount command - The mount command now does not lock the repository if given the --no-lock flag. - This allows to mount repositories which are archived on a read only - backend/filesystem. + The mount command now does not lock the repository if given the --no-lock flag. This allows to + mount repositories which are archived on a read only backend/filesystem. https://github.com/restic/restic/issues/1597 https://github.com/restic/restic/pull/2821 * Change #2482: Remove vendored dependencies - We've removed the vendored dependencies (in the subdir `vendor/`). When building - restic, the Go compiler automatically fetches the dependencies. It will also - cryptographically verify that the correct code has been fetched by using the - hashes in `go.sum` (see the link to the documentation below). + We've removed the vendored dependencies (in the subdir `vendor/`). When building restic, the + Go compiler automatically fetches the dependencies. It will also cryptographically verify + that the correct code has been fetched by using the hashes in `go.sum` (see the link to the + documentation below). https://github.com/restic/restic/issues/2482 https://golang.org/cmd/go/#hdr-Module_downloading_and_verification * Change #2546: Return exit code 3 when failing to backup all source data - The backup command used to return a zero exit code as long as a snapshot could - be created successfully, even if some of the source files could not be read (in - which case the snapshot would contain the rest of the files). + The backup command used to return a zero exit code as long as a snapshot could be created + successfully, even if some of the source files could not be read (in which case the snapshot + would contain the rest of the files). - This made it hard for automation/scripts to detect failures/incomplete backups - by looking at the exit code. Restic now returns the following exit codes for the - backup command: + This made it hard for automation/scripts to detect failures/incomplete backups by looking at + the exit code. Restic now returns the following exit codes for the backup command: - - 0 when the command was successful - 1 when there was a fatal error (no - snapshot created) - 3 when some source data could not be read (incomplete - snapshot created) + - 0 when the command was successful - 1 when there was a fatal error (no snapshot created) - 3 when + some source data could not be read (incomplete snapshot created) https://github.com/restic/restic/issues/956 https://github.com/restic/restic/issues/2064 @@ -3396,12 +3274,12 @@ restic users. The changes are ordered by importance. * Change #2600: Update dependencies, require Go >= 1.13 - Restic now requires Go to be at least 1.13. This allows simplifications in the - build process and removing workarounds. + Restic now requires Go to be at least 1.13. This allows simplifications in the build process and + removing workarounds. - This is also probably the last version of restic still supporting mounting - repositories via fuse on macOS. The library we're using for fuse does not - support macOS any more and osxfuse is not open source any more. + This is also probably the last version of restic still supporting mounting repositories via + fuse on macOS. The library we're using for fuse does not support macOS any more and osxfuse is not + open source any more. https://github.com/bazil/fuse/issues/224 https://github.com/osxfuse/osxfuse/issues/590 @@ -3411,20 +3289,17 @@ restic users. The changes are ordered by importance. * Enhancement #323: Add command for copying snapshots between repositories - We've added a copy command, allowing you to copy snapshots from one repository - to another. + We've added a copy command, allowing you to copy snapshots from one repository to another. - Note that this process will have to read (download) and write (upload) the - entire snapshot(s) due to the different encryption keys used on the source and - destination repository. Also, the transferred files are not re-chunked, which - may break deduplication between files already stored in the destination repo and - files copied there using this command. + Note that this process will have to read (download) and write (upload) the entire snapshot(s) + due to the different encryption keys used on the source and destination repository. Also, the + transferred files are not re-chunked, which may break deduplication between files already + stored in the destination repo and files copied there using this command. - To fully support deduplication between repositories when the copy command is - used, the init command now supports the `--copy-chunker-params` option, which - initializes the new repository with identical parameters for splitting files - into chunks as an already existing repository. This allows copied snapshots to - be equally deduplicated in both repositories. + To fully support deduplication between repositories when the copy command is used, the init + command now supports the `--copy-chunker-params` option, which initializes the new + repository with identical parameters for splitting files into chunks as an already existing + repository. This allows copied snapshots to be equally deduplicated in both repositories. https://github.com/restic/restic/issues/323 https://github.com/restic/restic/pull/2606 @@ -3432,29 +3307,29 @@ restic users. The changes are ordered by importance. * Enhancement #551: Use optimized library for hash calculation of file chunks - We've switched the library used to calculate the hashes of file chunks, which - are used for deduplication, to the optimized Minio SHA-256 implementation. + We've switched the library used to calculate the hashes of file chunks, which are used for + deduplication, to the optimized Minio SHA-256 implementation. - Depending on the CPU it improves the hashing throughput by 10-30%. Modern x86 - CPUs with the SHA Extension should be about two to three times faster. + Depending on the CPU it improves the hashing throughput by 10-30%. Modern x86 CPUs with the SHA + Extension should be about two to three times faster. https://github.com/restic/restic/issues/551 https://github.com/restic/restic/pull/2709 * Enhancement #1570: Support specifying multiple host flags for various commands - Previously commands didn't take more than one `--host` or `-H` argument into - account, which could be limiting with e.g. the `forget` command. + Previously commands didn't take more than one `--host` or `-H` argument into account, which + could be limiting with e.g. the `forget` command. - The `dump`, `find`, `forget`, `ls`, `mount`, `restore`, `snapshots`, `stats` and - `tag` commands will now take into account multiple `--host` and `-H` flags. + The `dump`, `find`, `forget`, `ls`, `mount`, `restore`, `snapshots`, `stats` and `tag` + commands will now take into account multiple `--host` and `-H` flags. https://github.com/restic/restic/issues/1570 * Enhancement #1680: Optimize `restic mount` - We've optimized the FUSE implementation used within restic. `restic mount` is - now more responsive and uses less memory. + We've optimized the FUSE implementation used within restic. `restic mount` is now more + responsive and uses less memory. https://github.com/restic/restic/issues/1680 https://github.com/restic/restic/pull/2587 @@ -3468,11 +3343,10 @@ restic users. The changes are ordered by importance. * Enhancement #2175: Allow specifying user and host when creating keys - When adding a new key to the repository, the username and hostname for the new - key can be specified on the command line. This allows overriding the defaults, - for example if you would prefer to use the FQDN to identify the host or if you - want to add keys for several different hosts without having to run the key add - command on those hosts. + When adding a new key to the repository, the username and hostname for the new key can be + specified on the command line. This allows overriding the defaults, for example if you would + prefer to use the FQDN to identify the host or if you want to add keys for several different hosts + without having to run the key add command on those hosts. https://github.com/restic/restic/issues/2175 @@ -3486,16 +3360,15 @@ restic users. The changes are ordered by importance. Fixes "not enough cache capacity" error during restore: https://github.com/restic/restic/issues/2244 - NOTE: This new implementation does not guarantee order in which blobs are - written to the target files and, for example, the last blob of a file can be - written to the file before any of the preceeding file blobs. It is therefore - possible to have gaps in the data written to the target files if restore fails - or interrupted by the user. + NOTE: This new implementation does not guarantee order in which blobs are written to the target + files and, for example, the last blob of a file can be written to the file before any of the + preceeding file blobs. It is therefore possible to have gaps in the data written to the target + files if restore fails or interrupted by the user. - The implementation will try to preallocate space for the restored files on the - filesystem to prevent file fragmentation. This ensures good read performance for - large files, like for example VM images. If preallocating space is not supported - by the filesystem, then this step is silently skipped. + The implementation will try to preallocate space for the restored files on the filesystem to + prevent file fragmentation. This ensures good read performance for large files, like for + example VM images. If preallocating space is not supported by the filesystem, then this step is + silently skipped. https://github.com/restic/restic/pull/2195 https://github.com/restic/restic/pull/2893 @@ -3508,73 +3381,69 @@ restic users. The changes are ordered by importance. * Enhancement #2328: Improve speed of check command - We've improved the check command to traverse trees only once independent of - whether they are contained in multiple snapshots. The check command is now much - faster for repositories with a large number of snapshots. + We've improved the check command to traverse trees only once independent of whether they are + contained in multiple snapshots. The check command is now much faster for repositories with a + large number of snapshots. https://github.com/restic/restic/issues/2284 https://github.com/restic/restic/pull/2328 * Enhancement #2395: Ignore sync errors when operation not supported by local filesystem - The local backend has been modified to work with filesystems which doesn't - support the `sync` operation. This operation is normally used by restic to - ensure that data files are fully written to disk before continuing. + The local backend has been modified to work with filesystems which doesn't support the `sync` + operation. This operation is normally used by restic to ensure that data files are fully + written to disk before continuing. - For these limited filesystems, saving a file in the backend would previously - fail with an "operation not supported" error. This error is now ignored, which - means that e.g. an SMB mount on macOS can now be used as storage location for a - repository. + For these limited filesystems, saving a file in the backend would previously fail with an + "operation not supported" error. This error is now ignored, which means that e.g. an SMB mount + on macOS can now be used as storage location for a repository. https://github.com/restic/restic/issues/2395 https://forum.restic.net/t/sync-errors-on-mac-over-smb/1859 * Enhancement #2423: Support user@domain parsing as user - Added the ability for user@domain-like users to be authenticated over SFTP - servers. + Added the ability for user@domain-like users to be authenticated over SFTP servers. https://github.com/restic/restic/pull/2423 * Enhancement #2427: Add flag `--iexclude-file` to backup command - The backup command now supports the flag `--iexclude-file` which is a - case-insensitive version of `--exclude-file`. + The backup command now supports the flag `--iexclude-file` which is a case-insensitive + version of `--exclude-file`. https://github.com/restic/restic/issues/2427 https://github.com/restic/restic/pull/2898 * Enhancement #2569: Support excluding files by their size - The `backup` command now supports the `--exclude-larger-than` option to exclude - files which are larger than the specified maximum size. This can for example be - useful to exclude unimportant files with a large file size. + The `backup` command now supports the `--exclude-larger-than` option to exclude files which + are larger than the specified maximum size. This can for example be useful to exclude + unimportant files with a large file size. https://github.com/restic/restic/issues/2569 https://github.com/restic/restic/pull/2914 * Enhancement #2571: Self-heal missing file parts during backup of unchanged files - We've improved the resilience of restic to certain types of repository - corruption. + We've improved the resilience of restic to certain types of repository corruption. - For files that are unchanged since the parent snapshot, the backup command now - verifies that all parts of the files still exist in the repository. Parts that - are missing, e.g. from a damaged repository, are backed up again. This - verification was already run for files that were modified since the parent - snapshot, but is now also done for unchanged files. + For files that are unchanged since the parent snapshot, the backup command now verifies that + all parts of the files still exist in the repository. Parts that are missing, e.g. from a damaged + repository, are backed up again. This verification was already run for files that were + modified since the parent snapshot, but is now also done for unchanged files. - Note that restic will not backup file parts that are referenced in the index but - where the actual data is not present on disk, as this situation can only be - detected by restic check. Please ensure that you run `restic check` regularly. + Note that restic will not backup file parts that are referenced in the index but where the actual + data is not present on disk, as this situation can only be detected by restic check. Please + ensure that you run `restic check` regularly. https://github.com/restic/restic/issues/2571 https://github.com/restic/restic/pull/2827 * Enhancement #2576: Improve the chunking algorithm - We've updated the chunker library responsible for splitting files into smaller - blocks. It should improve the chunking throughput by 5-15% depending on the CPU. + We've updated the chunker library responsible for splitting files into smaller blocks. It + should improve the chunking throughput by 5-15% depending on the CPU. https://github.com/restic/restic/issues/2820 https://github.com/restic/restic/pull/2576 @@ -3582,68 +3451,65 @@ restic users. The changes are ordered by importance. * Enhancement #2598: Improve speed of diff command - We've improved the performance of the diff command when comparing snapshots with - similar content. It should run up to twice as fast as before. + We've improved the performance of the diff command when comparing snapshots with similar + content. It should run up to twice as fast as before. https://github.com/restic/restic/pull/2598 * Enhancement #2599: Slightly reduce memory usage of prune and stats commands - The prune and the stats command kept directory identifiers in memory twice while - searching for used blobs. + The prune and the stats command kept directory identifiers in memory twice while searching for + used blobs. https://github.com/restic/restic/pull/2599 * Enhancement #2733: S3 backend: Add support for WebIdentityTokenFile - We've added support for EKS IAM roles for service accounts feature to the S3 - backend. + We've added support for EKS IAM roles for service accounts feature to the S3 backend. https://github.com/restic/restic/issues/2703 https://github.com/restic/restic/pull/2733 * Enhancement #2773: Optimize handling of new index entries - Restic now uses less memory for backups which add a lot of data, e.g. large - initial backups. In addition, we've improved the stability in some edge cases. + Restic now uses less memory for backups which add a lot of data, e.g. large initial backups. In + addition, we've improved the stability in some edge cases. https://github.com/restic/restic/pull/2773 * Enhancement #2781: Reduce memory consumption of in-memory index - We've improved how the index is stored in memory. This change can reduce memory - usage for large repositories by up to 50% (depending on the operation). + We've improved how the index is stored in memory. This change can reduce memory usage for large + repositories by up to 50% (depending on the operation). https://github.com/restic/restic/pull/2781 https://github.com/restic/restic/pull/2812 * Enhancement #2786: Optimize `list blobs` command - We've changed the implementation of `list blobs` which should be now a bit - faster and consume almost no memory even for large repositories. + We've changed the implementation of `list blobs` which should be now a bit faster and consume + almost no memory even for large repositories. https://github.com/restic/restic/pull/2786 * Enhancement #2790: Optimized file access in restic mount - Reading large (> 100GiB) files from restic mountpoints is now faster, and the - speedup is greater for larger files. + Reading large (> 100GiB) files from restic mountpoints is now faster, and the speedup is + greater for larger files. https://github.com/restic/restic/pull/2790 * Enhancement #2840: Speed-up file deletion in forget, prune and rebuild-index - We've sped up the file deletion for the commands forget, prune and - rebuild-index, especially for remote repositories. Deletion was sequential - before and is now run in parallel. + We've sped up the file deletion for the commands forget, prune and rebuild-index, especially + for remote repositories. Deletion was sequential before and is now run in parallel. https://github.com/restic/restic/pull/2840 * Enhancement #2858: Support filtering snapshots by tag and path in the stats command - We've added filtering snapshots by `--tag tagList` and by `--path path` to the - `stats` command. This includes filtering of only 'latest' snapshots or all - snapshots in a repository. + We've added filtering snapshots by `--tag tagList` and by `--path path` to the `stats` + command. This includes filtering of only 'latest' snapshots or all snapshots in a repository. https://github.com/restic/restic/issues/2858 https://github.com/restic/restic/pull/2859 @@ -3670,85 +3536,81 @@ restic users. The changes are ordered by importance. * Bugfix #2063: Allow absolute path for filename when backing up from stdin - When backing up from stdin, handle directory path for `--stdin-filename`. This - can be used to specify the full path for the backed-up file. + When backing up from stdin, handle directory path for `--stdin-filename`. This can be used to + specify the full path for the backed-up file. https://github.com/restic/restic/issues/2063 * Bugfix #2174: Save files with invalid timestamps - When restic reads invalid timestamps (year is before 0000 or after 9999) it - refused to read and archive the file. We've changed the behavior and will now - save modified timestamps with the year set to either 0000 or 9999, the rest of - the timestamp stays the same, so the file will be saved (albeit with a bogus - timestamp). + When restic reads invalid timestamps (year is before 0000 or after 9999) it refused to read and + archive the file. We've changed the behavior and will now save modified timestamps with the + year set to either 0000 or 9999, the rest of the timestamp stays the same, so the file will be saved + (albeit with a bogus timestamp). https://github.com/restic/restic/issues/2174 https://github.com/restic/restic/issues/1173 * Bugfix #2249: Read fresh metadata for unmodified files - Restic took all metadata for files which were detected as unmodified, not taking - into account changed metadata (ownership, mode). This is now corrected. + Restic took all metadata for files which were detected as unmodified, not taking into account + changed metadata (ownership, mode). This is now corrected. https://github.com/restic/restic/issues/2249 https://github.com/restic/restic/pull/2252 * Bugfix #2301: Add upper bound for t in --read-data-subset=n/t - 256 is the effective maximum for t, but restic would allow larger values, - leading to strange behavior. + 256 is the effective maximum for t, but restic would allow larger values, leading to strange + behavior. https://github.com/restic/restic/issues/2301 https://github.com/restic/restic/pull/2304 * Bugfix #2321: Check errors when loading index files - Restic now checks and handles errors which occur when loading index files, the - missing check leads to odd errors (and a stack trace printed to users) later. - This was reported in the forum. + Restic now checks and handles errors which occur when loading index files, the missing check + leads to odd errors (and a stack trace printed to users) later. This was reported in the forum. https://github.com/restic/restic/pull/2321 https://forum.restic.net/t/check-rebuild-index-prune/1848/13 * Enhancement #2179: Use ctime when checking for file changes - Previously, restic only checked a file's mtime (along with other non-timestamp - metadata) to decide if a file has changed. This could cause restic to not notice - that a file has changed (and therefore continue to store the old version, as - opposed to the modified version) if something edits the file and then resets the - timestamp. Restic now also checks the ctime of files, so any modifications to a - file should be noticed, and the modified file will be backed up. The ctime check - will be disabled if the --ignore-inode flag was given. + Previously, restic only checked a file's mtime (along with other non-timestamp metadata) to + decide if a file has changed. This could cause restic to not notice that a file has changed (and + therefore continue to store the old version, as opposed to the modified version) if something + edits the file and then resets the timestamp. Restic now also checks the ctime of files, so any + modifications to a file should be noticed, and the modified file will be backed up. The ctime + check will be disabled if the --ignore-inode flag was given. - If this change causes problems for you, please open an issue, and we can look in - to adding a seperate flag to disable just the ctime check. + If this change causes problems for you, please open an issue, and we can look in to adding a + seperate flag to disable just the ctime check. https://github.com/restic/restic/issues/2179 https://github.com/restic/restic/pull/2212 * Enhancement #2306: Allow multiple retries for interactive password input - Restic used to quit if the repository password was typed incorrectly once. - Restic will now ask the user again for the repository password if typed - incorrectly. The user will now get three tries to input the correct password - before restic quits. + Restic used to quit if the repository password was typed incorrectly once. Restic will now ask + the user again for the repository password if typed incorrectly. The user will now get three + tries to input the correct password before restic quits. https://github.com/restic/restic/issues/2306 * Enhancement #2330: Make `--group-by` accept both singular and plural - One can now use the values `host`/`hosts`, `path`/`paths` and `tag` / `tags` - interchangeably in the `--group-by` argument. + One can now use the values `host`/`hosts`, `path`/`paths` and `tag` / `tags` interchangeably + in the `--group-by` argument. https://github.com/restic/restic/issues/2330 * Enhancement #2350: Add option to configure S3 region - We've added a new option for setting the region when accessing an S3-compatible - service. For some providers, it is required to set this to a valid value. You - can do that either by setting the environment variable `AWS_DEFAULT_REGION` or - using the option `s3.region`, e.g. like this: `-o s3.region="us-east-1"`. + We've added a new option for setting the region when accessing an S3-compatible service. For + some providers, it is required to set this to a valid value. You can do that either by setting the + environment variable `AWS_DEFAULT_REGION` or using the option `s3.region`, e.g. like this: + `-o s3.region="us-east-1"`. https://github.com/restic/restic/pull/2350 @@ -3777,11 +3639,10 @@ restic users. The changes are ordered by importance. * Bugfix #2135: Return error when no bytes could be read from stdin - We assume that users reading backup data from stdin want to know when no data - could be read, so now restic returns an error when `backup --stdin` is called - but no bytes could be read. Usually, this means that an earlier command in a - pipe has failed. The documentation was amended and now recommends setting the - `pipefail` option (`set -o pipefail`). + We assume that users reading backup data from stdin want to know when no data could be read, so now + restic returns an error when `backup --stdin` is called but no bytes could be read. Usually, + this means that an earlier command in a pipe has failed. The documentation was amended and now + recommends setting the `pipefail` option (`set -o pipefail`). https://github.com/restic/restic/pull/2135 https://github.com/restic/restic/pull/2139 @@ -3792,88 +3653,84 @@ restic users. The changes are ordered by importance. * Bugfix #2203: Fix reading passwords from stdin - Passwords for the `init`, `key add`, and `key passwd` commands can now be read - from non-terminal stdin. + Passwords for the `init`, `key add`, and `key passwd` commands can now be read from + non-terminal stdin. https://github.com/restic/restic/issues/2203 * Bugfix #2224: Don't abort the find command when a tree can't be loaded - Change the find command so that missing trees don't result in a crash. Instead, - the error is logged to the debug log, and the tree ID is displayed along with - the snapshot it belongs to. This makes it possible to recover repositories that - are missing trees by forgetting the snapshots they are used in. + Change the find command so that missing trees don't result in a crash. Instead, the error is + logged to the debug log, and the tree ID is displayed along with the snapshot it belongs to. This + makes it possible to recover repositories that are missing trees by forgetting the snapshots + they are used in. https://github.com/restic/restic/issues/2224 * Enhancement #1895: Add case insensitive include & exclude options - The backup and restore commands now have --iexclude and --iinclude flags as case - insensitive variants of --exclude and --include. + The backup and restore commands now have --iexclude and --iinclude flags as case insensitive + variants of --exclude and --include. https://github.com/restic/restic/issues/1895 https://github.com/restic/restic/pull/2032 * Enhancement #1937: Support streaming JSON output for backup - We've added support for getting machine-readable status output during backup, - just pass the flag `--json` for `restic backup` and restic will output a stream - of JSON objects which contain the current progress. + We've added support for getting machine-readable status output during backup, just pass the + flag `--json` for `restic backup` and restic will output a stream of JSON objects which contain + the current progress. https://github.com/restic/restic/issues/1937 https://github.com/restic/restic/pull/1944 * Enhancement #2037: Add group-by option to snapshots command - We have added an option to group the output of the snapshots command, similar to - the output of the forget command. The option has been called "--group-by" and - accepts any combination of the values "host", "paths" and "tags", separated by - commas. Default behavior (not specifying --group-by) has not been changed. We - have added support of the grouping to the JSON output. + We have added an option to group the output of the snapshots command, similar to the output of the + forget command. The option has been called "--group-by" and accepts any combination of the + values "host", "paths" and "tags", separated by commas. Default behavior (not specifying + --group-by) has not been changed. We have added support of the grouping to the JSON output. https://github.com/restic/restic/issues/2037 https://github.com/restic/restic/pull/2087 * Enhancement #2124: Ability to dump folders to tar via stdout - We've added the ability to dump whole folders to stdout via the `dump` command. - Restic now requires at least Go 1.10 due to a limitation of the standard library - for Go <= 1.9. + We've added the ability to dump whole folders to stdout via the `dump` command. Restic now + requires at least Go 1.10 due to a limitation of the standard library for Go <= 1.9. https://github.com/restic/restic/issues/2123 https://github.com/restic/restic/pull/2124 * Enhancement #2139: Return error if no bytes could be read for `backup --stdin` - When restic is used to backup the output of a program, like `mysqldump | restic - backup --stdin`, it now returns an error if no bytes could be read at all. This - catches the failure case when `mysqldump` failed for some reason and did not - output any data to stdout. + When restic is used to backup the output of a program, like `mysqldump | restic backup --stdin`, + it now returns an error if no bytes could be read at all. This catches the failure case when + `mysqldump` failed for some reason and did not output any data to stdout. https://github.com/restic/restic/pull/2139 * Enhancement #2155: Add Openstack application credential auth for Swift - Since Openstack Queens Identity (auth V3) service supports an application - credential auth method. It allows to create a technical account with the limited - roles. This commit adds an application credential authentication method for the - Swift backend. + Since Openstack Queens Identity (auth V3) service supports an application credential auth + method. It allows to create a technical account with the limited roles. This commit adds an + application credential authentication method for the Swift backend. https://github.com/restic/restic/issues/2155 * Enhancement #2184: Add --json support to forget command - The forget command now supports the --json argument, outputting the information - about what is (or would-be) kept and removed from the repository. + The forget command now supports the --json argument, outputting the information about what is + (or would-be) kept and removed from the repository. https://github.com/restic/restic/issues/2184 https://github.com/restic/restic/pull/2185 * Enhancement #2205: Add --ignore-inode option to backup cmd - This option handles backup of virtual filesystems that do not keep fixed inodes - for files, like Fuse-based, pCloud, etc. Ignoring inode changes allows to - consider the file as unchanged if last modification date and size are unchanged. + This option handles backup of virtual filesystems that do not keep fixed inodes for files, like + Fuse-based, pCloud, etc. Ignoring inode changes allows to consider the file as unchanged if + last modification date and size are unchanged. https://github.com/restic/restic/issues/1631 https://github.com/restic/restic/pull/2205 @@ -3881,17 +3738,16 @@ restic users. The changes are ordered by importance. * Enhancement #2220: Add config option to set S3 storage class - The `s3.storage-class` option can be passed to restic (using `-o`) to specify - the storage class to be used for S3 objects created by restic. + The `s3.storage-class` option can be passed to restic (using `-o`) to specify the storage + class to be used for S3 objects created by restic. - The storage class is passed as-is to S3, so it needs to be understood by the - API. On AWS, it can be one of `STANDARD`, `STANDARD_IA`, `ONEZONE_IA`, - `INTELLIGENT_TIERING` and `REDUCED_REDUNDANCY`. If unspecified, the default - storage class is used (`STANDARD` on AWS). + The storage class is passed as-is to S3, so it needs to be understood by the API. On AWS, it can be + one of `STANDARD`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING` and + `REDUCED_REDUNDANCY`. If unspecified, the default storage class is used (`STANDARD` on + AWS). - You can mix storage classes in the same bucket, and the setting isn't stored in - the restic repository, so be sure to specify it with each command that writes to - S3. + You can mix storage classes in the same bucket, and the setting isn't stored in the restic + repository, so be sure to specify it with each command that writes to S3. https://github.com/restic/restic/issues/706 https://github.com/restic/restic/pull/2220 @@ -3919,19 +3775,19 @@ restic users. The changes are ordered by importance. * Bugfix #1989: Google Cloud Storage: Respect bandwidth limit - The GCS backend did not respect the bandwidth limit configured, a previous - commit accidentally removed support for it. + The GCS backend did not respect the bandwidth limit configured, a previous commit + accidentally removed support for it. https://github.com/restic/restic/issues/1989 https://github.com/restic/restic/pull/2100 * Bugfix #2040: Add host name filter shorthand flag for `stats` command - The default value for `--host` flag was set to 'H' (the shorthand version of the - flag), this caused the lookup for the latest snapshot to fail. + The default value for `--host` flag was set to 'H' (the shorthand version of the flag), this + caused the lookup for the latest snapshot to fail. - Add shorthand flag `-H` for `--host` (with empty default so if these flags are - not specified the latest snapshot will not filter by host name). + Add shorthand flag `-H` for `--host` (with empty default so if these flags are not specified the + latest snapshot will not filter by host name). Also add shorthand `-H` for `backup` command. @@ -3939,17 +3795,17 @@ restic users. The changes are ordered by importance. * Bugfix #2068: Correctly return error loading data - In one case during `prune` and `check`, an error loading data from the backend - is not returned properly. This is now corrected. + In one case during `prune` and `check`, an error loading data from the backend is not returned + properly. This is now corrected. https://github.com/restic/restic/issues/1999#issuecomment-433737921 https://github.com/restic/restic/pull/2068 * Bugfix #2095: Consistently use local time for snapshots times - By default snapshots created with restic backup were set to local time, but when - the --time flag was used the provided timestamp was parsed as UTC. With this - change all snapshots times are set to local time. + By default snapshots created with restic backup were set to local time, but when the --time flag + was used the provided timestamp was parsed as UTC. With this change all snapshots times are set + to local time. https://github.com/restic/restic/pull/2095 @@ -3958,70 +3814,65 @@ restic users. The changes are ordered by importance. This change significantly improves restore performance, especially when using high-latency remote repositories like B2. - The implementation now uses several concurrent threads to download and process - multiple remote files concurrently. To further reduce restore time, each remote - file is downloaded using a single repository request. + The implementation now uses several concurrent threads to download and process multiple + remote files concurrently. To further reduce restore time, each remote file is downloaded + using a single repository request. https://github.com/restic/restic/issues/1605 https://github.com/restic/restic/pull/1719 * Enhancement #2017: Mount: Enforce FUSE Unix permissions with allow-other - The fuse mount (`restic mount`) now lets the kernel check the permissions of the - files within snapshots (this is done through the `DefaultPermissions` FUSE - option) when the option `--allow-other` is specified. + The fuse mount (`restic mount`) now lets the kernel check the permissions of the files within + snapshots (this is done through the `DefaultPermissions` FUSE option) when the option + `--allow-other` is specified. - To restore the old behavior, we've added the `--no-default-permissions` option. - This allows all users that have access to the mount point to access all files - within the snapshots. + To restore the old behavior, we've added the `--no-default-permissions` option. This allows + all users that have access to the mount point to access all files within the snapshots. https://github.com/restic/restic/pull/2017 * Enhancement #2070: Make all commands display timestamps in local time - Restic used to drop the timezone information from displayed timestamps, it now - converts timestamps to local time before printing them so the times can be - easily compared to. + Restic used to drop the timezone information from displayed timestamps, it now converts + timestamps to local time before printing them so the times can be easily compared to. https://github.com/restic/restic/pull/2070 * Enhancement #2085: Allow --files-from to be specified multiple times - Before, restic took only the last file specified with `--files-from` into - account, this is now corrected. + Before, restic took only the last file specified with `--files-from` into account, this is now + corrected. https://github.com/restic/restic/issues/2085 https://github.com/restic/restic/pull/2086 * Enhancement #2089: Increase granularity of the "keep within" retention policy - The `keep-within` option of the `forget` command now accepts time ranges with an - hourly granularity. For example, running `restic forget --keep-within 3d12h` - will keep all the snapshots made within three days and twelve hours from the - time of the latest snapshot. + The `keep-within` option of the `forget` command now accepts time ranges with an hourly + granularity. For example, running `restic forget --keep-within 3d12h` will keep all the + snapshots made within three days and twelve hours from the time of the latest snapshot. https://github.com/restic/restic/issues/2089 https://github.com/restic/restic/pull/2090 * Enhancement #2094: Run command to get password - We've added the `--password-command` option which allows specifying a command - that restic runs every time the password for the repository is needed, so it can - be integrated with a password manager or keyring. The option can also be set via - the environment variable `$RESTIC_PASSWORD_COMMAND`. + We've added the `--password-command` option which allows specifying a command that restic + runs every time the password for the repository is needed, so it can be integrated with a + password manager or keyring. The option can also be set via the environment variable + `$RESTIC_PASSWORD_COMMAND`. https://github.com/restic/restic/pull/2094 * Enhancement #2097: Add key hinting - Added a new option `--key-hint` and corresponding environment variable - `RESTIC_KEY_HINT`. The key hint is a key ID to try decrypting first, before - other keys in the repository. + Added a new option `--key-hint` and corresponding environment variable `RESTIC_KEY_HINT`. + The key hint is a key ID to try decrypting first, before other keys in the repository. - This change will benefit repositories with many keys; if the correct key hint is - supplied then restic only needs to check one key. If the key hint is incorrect - (the key does not exist, or the password is incorrect) then restic will check - all keys, as usual. + This change will benefit repositories with many keys; if the correct key hint is supplied then + restic only needs to check one key. If the key hint is incorrect (the key does not exist, or the + password is incorrect) then restic will check all keys, as usual. https://github.com/restic/restic/issues/2097 @@ -4051,31 +3902,29 @@ restic users. The changes are ordered by importance. * Bugfix #1935: Remove truncated files from cache - When a file in the local cache is truncated, and restic tries to access data - beyond the end of the (cached) file, it used to return an error "EOF". This is - now fixed, such truncated files are removed and the data is fetched directly - from the backend. + When a file in the local cache is truncated, and restic tries to access data beyond the end of the + (cached) file, it used to return an error "EOF". This is now fixed, such truncated files are + removed and the data is fetched directly from the backend. https://github.com/restic/restic/issues/1935 * Bugfix #1978: Do not return an error when the scanner is slower than backup - When restic makes a backup, there's a background task called "scanner" which - collects information on how many files and directories are to be saved, in order - to display progress information to the user. When the backup finishes faster - than the scanner, it is aborted because the result is not needed any more. This - logic contained a bug, where quitting the scanner process was treated as an - error, and caused restic to print an unhelpful error message ("context - canceled"). + When restic makes a backup, there's a background task called "scanner" which collects + information on how many files and directories are to be saved, in order to display progress + information to the user. When the backup finishes faster than the scanner, it is aborted + because the result is not needed any more. This logic contained a bug, where quitting the + scanner process was treated as an error, and caused restic to print an unhelpful error message + ("context canceled"). https://github.com/restic/restic/issues/1978 https://github.com/restic/restic/pull/1991 * Enhancement #1766: Restore: suppress lchown errors when not running as root - Like "cp" and "rsync" do, restic now only reports errors for changing the - ownership of files during restore if it is run as root, on non-Windows - operating systems. On Windows, the error is reported as usual. + Like "cp" and "rsync" do, restic now only reports errors for changing the ownership of files + during restore if it is run as root, on non-Windows operating systems. On Windows, the error + is reported as usual. https://github.com/restic/restic/issues/1766 @@ -4083,118 +3932,113 @@ restic users. The changes are ordered by importance. We've updated the `find` command to support multiple patterns. - `restic find` is now able to list the snapshots containing a specific tree or - blob, or even the snapshots that contain blobs belonging to a given pack. A list - of IDs can be given, as long as they all have the same type. + `restic find` is now able to list the snapshots containing a specific tree or blob, or even the + snapshots that contain blobs belonging to a given pack. A list of IDs can be given, as long as they + all have the same type. - The command `find` can also display the pack IDs the blobs belong to, if the - `--show-pack-id` flag is provided. + The command `find` can also display the pack IDs the blobs belong to, if the `--show-pack-id` + flag is provided. https://github.com/restic/restic/issues/1777 https://github.com/restic/restic/pull/1780 * Enhancement #1876: Display reason why forget keeps snapshots - We've added a column to the list of snapshots `forget` keeps which details the - reasons to keep a particuliar snapshot. This makes debugging policies for forget - much easier. Please remember to always try things out with `--dry-run`! + We've added a column to the list of snapshots `forget` keeps which details the reasons to keep a + particuliar snapshot. This makes debugging policies for forget much easier. Please remember + to always try things out with `--dry-run`! https://github.com/restic/restic/pull/1876 * Enhancement #1891: Accept glob in paths loaded via --files-from - Before that, behaviour was different if paths were appended to command line or - from a file, because wild card characters were expanded by shell if appended to - command line, but not expanded if loaded from file. + Before that, behaviour was different if paths were appended to command line or from a file, + because wild card characters were expanded by shell if appended to command line, but not + expanded if loaded from file. https://github.com/restic/restic/issues/1891 * Enhancement #1909: Reject files/dirs by name first - The current scanner/archiver code had an architectural limitation: it always ran - the `lstat()` system call on all files and directories before a decision to - include/exclude the file/dir was made. This lead to a lot of unnecessary system - calls for items that could have been rejected by their name or path only. + The current scanner/archiver code had an architectural limitation: it always ran the + `lstat()` system call on all files and directories before a decision to include/exclude the + file/dir was made. This lead to a lot of unnecessary system calls for items that could have been + rejected by their name or path only. - We've changed the archiver/scanner implementation so that it now first rejects - by name/path, and only runs the system call on the remaining items. This reduces - the number of `lstat()` system calls a lot (depending on the exclude settings). + We've changed the archiver/scanner implementation so that it now first rejects by name/path, + and only runs the system call on the remaining items. This reduces the number of `lstat()` + system calls a lot (depending on the exclude settings). https://github.com/restic/restic/issues/1909 https://github.com/restic/restic/pull/1912 * Enhancement #1920: Vendor dependencies with Go 1.11 Modules - Until now, we've used `dep` for managing dependencies, we've now switch to using - Go modules. For users this does not change much, only if you want to compile - restic without downloading anything with Go 1.11, then you need to run: `go - build -mod=vendor build.go` + Until now, we've used `dep` for managing dependencies, we've now switch to using Go modules. + For users this does not change much, only if you want to compile restic without downloading + anything with Go 1.11, then you need to run: `go build -mod=vendor build.go` https://github.com/restic/restic/pull/1920 * Enhancement #1940: Add directory filter to ls command - The ls command can now be filtered by directories, so that only files in the - given directories will be shown. If the --recursive flag is specified, then ls - will traverse subfolders and list their files as well. + The ls command can now be filtered by directories, so that only files in the given directories + will be shown. If the --recursive flag is specified, then ls will traverse subfolders and list + their files as well. - It used to be possible to specify multiple snapshots, but that has been replaced - by only one snapshot and the possibility of specifying multiple directories. + It used to be possible to specify multiple snapshots, but that has been replaced by only one + snapshot and the possibility of specifying multiple directories. - Specifying directories constrains the walk, which can significantly speed up the - listing. + Specifying directories constrains the walk, which can significantly speed up the listing. https://github.com/restic/restic/issues/1940 https://github.com/restic/restic/pull/1941 * Enhancement #1949: Add new command `self-update` - We have added a new command called `self-update` which downloads the latest - released version of restic from GitHub and replaces the current binary with it. - It does not rely on any external program (so it'll work everywhere), but still - verifies the GPG signature using the embedded GPG public key. + We have added a new command called `self-update` which downloads the latest released version + of restic from GitHub and replaces the current binary with it. It does not rely on any external + program (so it'll work everywhere), but still verifies the GPG signature using the embedded + GPG public key. - By default, the `self-update` command is hidden behind the `selfupdate` built - tag, which is only set when restic is built using `build.go` (including official - releases). The reason for this is that downstream distributions will then not - include the command by default, so users are encouraged to use the - platform-specific distribution mechanism. + By default, the `self-update` command is hidden behind the `selfupdate` built tag, which is + only set when restic is built using `build.go` (including official releases). The reason for + this is that downstream distributions will then not include the command by default, so users + are encouraged to use the platform-specific distribution mechanism. https://github.com/restic/restic/pull/1949 * Enhancement #1953: Ls: Add JSON output support for restic ls cmd - We've implemented listing files in the repository with JSON as output, just pass - `--json` as an option to `restic ls`. This makes the output of the command - machine readable. + We've implemented listing files in the repository with JSON as output, just pass `--json` as an + option to `restic ls`. This makes the output of the command machine readable. https://github.com/restic/restic/pull/1953 * Enhancement #1962: Stream JSON output for ls command - The `ls` command now supports JSON output with the global `--json` flag, and - this change streams out JSON messages one object at a time rather than en entire - array buffered in memory before encoding. The advantage is it allows large - listings to be handled efficiently. + The `ls` command now supports JSON output with the global `--json` flag, and this change + streams out JSON messages one object at a time rather than en entire array buffered in memory + before encoding. The advantage is it allows large listings to be handled efficiently. - Two message types are printed: snapshots and nodes. A snapshot object will - precede node objects which belong to that snapshot. The `struct_type` field can - be used to determine which kind of message an object is. + Two message types are printed: snapshots and nodes. A snapshot object will precede node + objects which belong to that snapshot. The `struct_type` field can be used to determine which + kind of message an object is. https://github.com/restic/restic/pull/1962 * Enhancement #1967: Use `--host` everywhere - We now use the flag `--host` for all commands which need a host name, using - `--hostname` (e.g. for `restic backup`) still works, but will print a - deprecation warning. Also, add the short option `-H` where possible. + We now use the flag `--host` for all commands which need a host name, using `--hostname` (e.g. + for `restic backup`) still works, but will print a deprecation warning. Also, add the short + option `-H` where possible. https://github.com/restic/restic/issues/1967 * Enhancement #2028: Display size of cache directories - The `cache` command now by default shows the size of the individual cache - directories. It can be disabled with `--no-size`. + The `cache` command now by default shows the size of the individual cache directories. It can be + disabled with `--no-size`. https://github.com/restic/restic/issues/2028 https://github.com/restic/restic/pull/2033 @@ -4222,25 +4066,23 @@ restic users. The changes are ordered by importance. * Bugfix #1854: Allow saving files/dirs on different fs with `--one-file-system` - Restic now allows saving files/dirs on a different file system in a subdir - correctly even when `--one-file-system` is specified. + Restic now allows saving files/dirs on a different file system in a subdir correctly even when + `--one-file-system` is specified. The first thing the restic archiver code does is to build a tree of the target - files/directories. If it detects that a parent directory is already included - (e.g. `restic backup /foo /foo/bar/baz`), it'll ignore the latter argument. + files/directories. If it detects that a parent directory is already included (e.g. `restic + backup /foo /foo/bar/baz`), it'll ignore the latter argument. - Without `--one-file-system`, that's perfectly valid: If `/foo` is to be - archived, it will include `/foo/bar/baz`. But with `--one-file-system`, - `/foo/bar/baz` may reside on a different file system, so it won't be included - with `/foo`. + Without `--one-file-system`, that's perfectly valid: If `/foo` is to be archived, it will + include `/foo/bar/baz`. But with `--one-file-system`, `/foo/bar/baz` may reside on a + different file system, so it won't be included with `/foo`. https://github.com/restic/restic/issues/1854 https://github.com/restic/restic/pull/1855 * Bugfix #1861: Fix case-insensitive search with restic find - We've fixed the behavior for `restic find -i PATTERN`, which was broken in - v0.9.1. + We've fixed the behavior for `restic find -i PATTERN`, which was broken in v0.9.1. https://github.com/restic/restic/pull/1861 @@ -4253,22 +4095,21 @@ restic users. The changes are ordered by importance. * Bugfix #1880: Use `--cache-dir` argument for `check` command - `check` command now uses a temporary sub-directory of the specified directory if - set using the `--cache-dir` argument. If not set, the cache directory is created - in the default temporary directory as before. In either case a temporary cache - is used to ensure the actual repository is checked (rather than a local copy). + `check` command now uses a temporary sub-directory of the specified directory if set using the + `--cache-dir` argument. If not set, the cache directory is created in the default temporary + directory as before. In either case a temporary cache is used to ensure the actual repository is + checked (rather than a local copy). - The `--cache-dir` argument was not used by the `check` command, instead a cache - directory was created in the temporary directory. + The `--cache-dir` argument was not used by the `check` command, instead a cache directory was + created in the temporary directory. https://github.com/restic/restic/issues/1880 * Bugfix #1893: Return error when exclude file cannot be read - A bug was found: when multiple exclude files were passed to restic and one of - them could not be read, an error was printed and restic continued, ignoring even - the existing exclude files. Now, an error message is printed and restic aborts - when an exclude file cannot be read. + A bug was found: when multiple exclude files were passed to restic and one of them could not be + read, an error was printed and restic continued, ignoring even the existing exclude files. + Now, an error message is printed and restic aborts when an exclude file cannot be read. https://github.com/restic/restic/issues/1893 @@ -4279,9 +4120,9 @@ restic users. The changes are ordered by importance. * Enhancement #1477: S3 backend: accept AWS_SESSION_TOKEN - Before, it was not possible to use s3 backend with AWS temporary security - credentials(with AWS_SESSION_TOKEN). This change gives higher priority to - credentials.EnvAWS credentials provider. + Before, it was not possible to use s3 backend with AWS temporary security credentials(with + AWS_SESSION_TOKEN). This change gives higher priority to credentials.EnvAWS credentials + provider. https://github.com/restic/restic/issues/1477 https://github.com/restic/restic/pull/1479 @@ -4289,33 +4130,33 @@ restic users. The changes are ordered by importance. * Enhancement #1772: Add restore --verify to verify restored file content - Restore will print error message if restored file content does not match - expected SHA256 checksum + Restore will print error message if restored file content does not match expected SHA256 + checksum https://github.com/restic/restic/pull/1772 * Enhancement #1853: Add JSON output support to `restic key list` - This PR enables users to get the output of `restic key list` in JSON in addition - to the existing table format. + This PR enables users to get the output of `restic key list` in JSON in addition to the existing + table format. https://github.com/restic/restic/pull/1853 * Enhancement #1901: Update the Backblaze B2 library - We've updated the library we're using for accessing the Backblaze B2 service to - 0.5.0 to include support for upcoming so-called "application keys". With this - feature, you can create access credentials for B2 which are restricted to e.g. a - single bucket or even a sub-directory of a bucket. + We've updated the library we're using for accessing the Backblaze B2 service to 0.5.0 to + include support for upcoming so-called "application keys". With this feature, you can create + access credentials for B2 which are restricted to e.g. a single bucket or even a sub-directory + of a bucket. https://github.com/restic/restic/pull/1901 https://github.com/kurin/blazer * Enhancement #1906: Add support for B2 application keys - Restic can now use so-called "application keys" which can be created in the B2 - dashboard and were only introduced recently. In contrast to the "master key", - such keys can be restricted to a specific bucket and/or path. + Restic can now use so-called "application keys" which can be created in the B2 dashboard and + were only introduced recently. In contrast to the "master key", such keys can be restricted to a + specific bucket and/or path. https://github.com/restic/restic/issues/1906 https://github.com/restic/restic/pull/1914 @@ -4337,51 +4178,48 @@ restic users. The changes are ordered by importance. * Bugfix #1801: Add limiting bandwidth to the rclone backend - The rclone backend did not respect `--limit-upload` or `--limit-download`. - Oftentimes it's not necessary to use this, as the limiting in rclone itself - should be used because it gives much better results, but in case a remote - instance of rclone is used (e.g. called via ssh), it is still relevant to limit - the bandwidth from restic to rclone. + The rclone backend did not respect `--limit-upload` or `--limit-download`. Oftentimes it's + not necessary to use this, as the limiting in rclone itself should be used because it gives much + better results, but in case a remote instance of rclone is used (e.g. called via ssh), it is still + relevant to limit the bandwidth from restic to rclone. https://github.com/restic/restic/issues/1801 * Bugfix #1822: Allow uploading large files to MS Azure - Sometimes, restic creates files to be uploaded to the repository which are quite - large, e.g. when saving directories with many entries or very large files. The - MS Azure API does not allow uploading files larger that 256MiB directly, rather - restic needs to upload them in blocks of 100MiB. This is now implemented. + Sometimes, restic creates files to be uploaded to the repository which are quite large, e.g. + when saving directories with many entries or very large files. The MS Azure API does not allow + uploading files larger that 256MiB directly, rather restic needs to upload them in blocks of + 100MiB. This is now implemented. https://github.com/restic/restic/issues/1822 * Bugfix #1825: Correct `find` to not skip snapshots - Under certain circumstances, the `find` command was found to skip snapshots - containing directories with files to look for when the directories haven't been - modified at all, and were already printed as part of a different snapshot. This - is now corrected. + Under certain circumstances, the `find` command was found to skip snapshots containing + directories with files to look for when the directories haven't been modified at all, and were + already printed as part of a different snapshot. This is now corrected. - In addition, we've switched to our own matching/pattern implementation, so now - things like `restic find "/home/user/foo/**/main.go"` are possible. + In addition, we've switched to our own matching/pattern implementation, so now things like + `restic find "/home/user/foo/**/main.go"` are possible. https://github.com/restic/restic/issues/1825 https://github.com/restic/restic/issues/1823 * Bugfix #1833: Fix caching files on error - During `check` it may happen that different threads access the same file in the - backend, which is then downloaded into the cache only once. When that fails, - only the thread which is responsible for downloading the file signals the - correct error. The other threads just assume that the file has been downloaded - successfully and then get an error when they try to access the cached file. + During `check` it may happen that different threads access the same file in the backend, which + is then downloaded into the cache only once. When that fails, only the thread which is + responsible for downloading the file signals the correct error. The other threads just assume + that the file has been downloaded successfully and then get an error when they try to access the + cached file. https://github.com/restic/restic/issues/1833 * Bugfix #1834: Resolve deadlock - When the "scanning" process restic runs to find out how much data there is does - not finish before the backup itself is done, restic stops doing anything. This - is resolved now. + When the "scanning" process restic runs to find out how much data there is does not finish before + the backup itself is done, restic stops doing anything. This is resolved now. https://github.com/restic/restic/issues/1834 https://github.com/restic/restic/pull/1835 @@ -4417,81 +4255,77 @@ restic users. The changes are ordered by importance. * Bugfix #1608: Respect time stamp for new backup when reading from stdin - When reading backups from stdin (via `restic backup --stdin`), restic now uses - the time stamp for the new backup passed in `--time`. + When reading backups from stdin (via `restic backup --stdin`), restic now uses the time stamp + for the new backup passed in `--time`. https://github.com/restic/restic/issues/1608 https://github.com/restic/restic/pull/1703 * Bugfix #1652: Ignore/remove invalid lock files - This corrects a bug introduced recently: When an invalid lock file in the repo - is encountered (e.g. if the file is empty), the code used to ignore that, but - now returns the error. Now, invalid files are ignored for the normal lock check, - and removed when `restic unlock --remove-all` is run. + This corrects a bug introduced recently: When an invalid lock file in the repo is encountered + (e.g. if the file is empty), the code used to ignore that, but now returns the error. Now, invalid + files are ignored for the normal lock check, and removed when `restic unlock --remove-all` is + run. https://github.com/restic/restic/issues/1652 https://github.com/restic/restic/pull/1653 * Bugfix #1684: Fix backend tests for rest-server - The REST server for restic now requires an explicit parameter (`--no-auth`) if - no authentication should be allowed. This is fixed in the tests. + The REST server for restic now requires an explicit parameter (`--no-auth`) if no + authentication should be allowed. This is fixed in the tests. https://github.com/restic/restic/pull/1684 * Bugfix #1730: Ignore sockets for restore - We've received a report and correct the behavior in which the restore code - aborted restoring a directory when a socket was encountered. Unix domain socket - files cannot be restored (they are created on the fly once a process starts - listening). The error handling was corrected, and in addition we're now ignoring - sockets during restore. + We've received a report and correct the behavior in which the restore code aborted restoring a + directory when a socket was encountered. Unix domain socket files cannot be restored (they are + created on the fly once a process starts listening). The error handling was corrected, and in + addition we're now ignoring sockets during restore. https://github.com/restic/restic/issues/1730 https://github.com/restic/restic/pull/1731 * Bugfix #1745: Correctly parse the argument to --tls-client-cert - Previously, the --tls-client-cert method attempt to read ARGV[1] (hardcoded) - instead of the argument that was passed to it. This has been corrected. + Previously, the --tls-client-cert method attempt to read ARGV[1] (hardcoded) instead of the + argument that was passed to it. This has been corrected. https://github.com/restic/restic/issues/1745 https://github.com/restic/restic/pull/1746 * Enhancement #549: Rework archiver code - The core archiver code and the complementary code for the `backup` command was - rewritten completely. This resolves very annoying issues such as 549. The first - backup with this release of restic will likely result in all files being re-read - locally, so it will take a lot longer. The next backup after that will be fast - again. - - Basically, with the old code, restic took the last path component of each - to-be-saved file or directory as the top-level file/directory within the - snapshot. This meant that when called as `restic backup /home/user/foo`, the - snapshot would contain the files in the directory `/home/user/foo` as `/foo`. - - This is not the case any more with the new archiver code. Now, restic works very - similar to what `tar` does: When restic is called with an absolute path to save, - then it'll preserve the directory structure within the snapshot. For the example - above, the snapshot would contain the files in the directory within - `/home/user/foo` in the snapshot. For relative directories, it only preserves - the relative path components. So `restic backup user/foo` will save the files as - `/user/foo` in the snapshot. - - While we were at it, the status display and notification system was completely - rewritten. By default, restic now shows which files are currently read (unless - `--quiet` is specified) in a multi-line status display. - - The `backup` command also gained a new option: `--verbose`. It can be specified - once (which prints a bit more detail what restic is doing) or twice (which - prints a line for each file/directory restic encountered, together with some - statistics). - - Another issue that was resolved is the new code only reads two files at most. - The old code would read way too many files in parallel, thereby slowing down the - backup process on spinning discs a lot. + The core archiver code and the complementary code for the `backup` command was rewritten + completely. This resolves very annoying issues such as 549. The first backup with this release + of restic will likely result in all files being re-read locally, so it will take a lot longer. The + next backup after that will be fast again. + + Basically, with the old code, restic took the last path component of each to-be-saved file or + directory as the top-level file/directory within the snapshot. This meant that when called as + `restic backup /home/user/foo`, the snapshot would contain the files in the directory + `/home/user/foo` as `/foo`. + + This is not the case any more with the new archiver code. Now, restic works very similar to what + `tar` does: When restic is called with an absolute path to save, then it'll preserve the + directory structure within the snapshot. For the example above, the snapshot would contain + the files in the directory within `/home/user/foo` in the snapshot. For relative + directories, it only preserves the relative path components. So `restic backup user/foo` + will save the files as `/user/foo` in the snapshot. + + While we were at it, the status display and notification system was completely rewritten. By + default, restic now shows which files are currently read (unless `--quiet` is specified) in a + multi-line status display. + + The `backup` command also gained a new option: `--verbose`. It can be specified once (which + prints a bit more detail what restic is doing) or twice (which prints a line for each + file/directory restic encountered, together with some statistics). + + Another issue that was resolved is the new code only reads two files at most. The old code would + read way too many files in parallel, thereby slowing down the backup process on spinning discs a + lot. https://github.com/restic/restic/issues/549 https://github.com/restic/restic/issues/1286 @@ -4513,11 +4347,11 @@ restic users. The changes are ordered by importance. * Enhancement #1433: Support UTF-16 encoding and process Byte Order Mark - On Windows, text editors commonly leave a Byte Order Mark at the beginning of - the file to define which encoding is used (oftentimes UTF-16). We've added code - to support processing the BOMs in text files, like the exclude files, the - password file and the file passed via `--files-from`. This does not apply to any - file being saved in a backup, those are not touched and archived as they are. + On Windows, text editors commonly leave a Byte Order Mark at the beginning of the file to define + which encoding is used (oftentimes UTF-16). We've added code to support processing the BOMs in + text files, like the exclude files, the password file and the file passed via `--files-from`. + This does not apply to any file being saved in a backup, those are not touched and archived as they + are. https://github.com/restic/restic/issues/1433 https://github.com/restic/restic/issues/1738 @@ -4525,9 +4359,9 @@ restic users. The changes are ordered by importance. * Enhancement #1477: Accept AWS_SESSION_TOKEN for the s3 backend - Before, it was not possible to use s3 backend with AWS temporary security - credentials(with AWS_SESSION_TOKEN). This change gives higher priority to - credentials.EnvAWS credentials provider. + Before, it was not possible to use s3 backend with AWS temporary security credentials(with + AWS_SESSION_TOKEN). This change gives higher priority to credentials.EnvAWS credentials + provider. https://github.com/restic/restic/issues/1477 https://github.com/restic/restic/pull/1479 @@ -4535,24 +4369,23 @@ restic users. The changes are ordered by importance. * Enhancement #1552: Use Google Application Default credentials - Google provide libraries to generate appropriate credentials with various - fallback sources. This change uses the library to generate our GCS client, which - allows us to make use of these extra methods. + Google provide libraries to generate appropriate credentials with various fallback + sources. This change uses the library to generate our GCS client, which allows us to make use of + these extra methods. - This should be backward compatible with previous restic behaviour while adding - the additional capabilities to auth from Google's internal metadata endpoints. - For users running restic in GCP this can make authentication far easier than it - was before. + This should be backward compatible with previous restic behaviour while adding the + additional capabilities to auth from Google's internal metadata endpoints. For users + running restic in GCP this can make authentication far easier than it was before. https://github.com/restic/restic/pull/1552 https://developers.google.com/identity/protocols/application-default-credentials * Enhancement #1561: Allow using rclone to access other services - We've added the ability to use rclone to store backup data on all backends that - it supports. This was done in collaboration with Nick, the author of rclone. You - can now use it to first configure a service, then restic manages the rest - (starting and stopping rclone). For details, please see the manual. + We've added the ability to use rclone to store backup data on all backends that it supports. This + was done in collaboration with Nick, the author of rclone. You can now use it to first configure a + service, then restic manages the rest (starting and stopping rclone). For details, please see + the manual. https://github.com/restic/restic/issues/1561 https://github.com/restic/restic/pull/1657 @@ -4560,9 +4393,9 @@ restic users. The changes are ordered by importance. * Enhancement #1648: Ignore AWS permission denied error when creating a repository - It's not possible to use s3 backend scoped to a subdirectory(with specific - permissions). Restic doesn't try to create repository in a subdirectory, when - 'bucket exists' of parent directory check fails due to permission issues. + It's not possible to use s3 backend scoped to a subdirectory(with specific permissions). + Restic doesn't try to create repository in a subdirectory, when 'bucket exists' of parent + directory check fails due to permission issues. https://github.com/restic/restic/pull/1648 @@ -4572,27 +4405,25 @@ restic users. The changes are ordered by importance. * Enhancement #1665: Improve cache handling for `restic check` - For safety reasons, restic does not use a local metadata cache for the `restic - check` command, so that data is loaded from the repository and restic can check - it's in good condition. When the cache is disabled, restic will fetch each tiny - blob needed for checking the integrity using a separate backend request. For - non-local backends, that will take a long time, and depending on the backend - (e.g. B2) may also be much more expensive. + For safety reasons, restic does not use a local metadata cache for the `restic check` command, + so that data is loaded from the repository and restic can check it's in good condition. When the + cache is disabled, restic will fetch each tiny blob needed for checking the integrity using a + separate backend request. For non-local backends, that will take a long time, and depending on + the backend (e.g. B2) may also be much more expensive. This PR adds a few commits which will change the behavior as follows: - * When `restic check` is called without any additional parameters, it will build - a new cache in a temporary directory, which is removed at the end of the check. - This way, we'll get readahead for metadata files (so restic will fetch the whole - file when the first blob from the file is requested), but all data is freshly - fetched from the storage backend. This is the default behavior and will work for - almost all users. + * When `restic check` is called without any additional parameters, it will build a new cache in a + temporary directory, which is removed at the end of the check. This way, we'll get readahead for + metadata files (so restic will fetch the whole file when the first blob from the file is + requested), but all data is freshly fetched from the storage backend. This is the default + behavior and will work for almost all users. - * When `restic check` is called with `--with-cache`, the default on-disc cache - is used. This behavior hasn't changed since the cache was introduced. + * When `restic check` is called with `--with-cache`, the default on-disc cache is used. This + behavior hasn't changed since the cache was introduced. - * When `--no-cache` is specified, restic falls back to the old behavior, and - read all tiny blobs in separate requests. + * When `--no-cache` is specified, restic falls back to the old behavior, and read all tiny blobs + in separate requests. https://github.com/restic/restic/issues/1665 https://github.com/restic/restic/issues/1694 @@ -4600,45 +4431,44 @@ restic users. The changes are ordered by importance. * Enhancement #1709: Improve messages `restic check` prints - Some messages `restic check` prints are not really errors, so from now on restic - does not treat them as errors any more and exits cleanly. + Some messages `restic check` prints are not really errors, so from now on restic does not treat + them as errors any more and exits cleanly. https://github.com/restic/restic/pull/1709 https://forum.restic.net/t/what-is-the-standard-procedure-to-follow-if-a-backup-or-restore-is-interrupted/571/2 * Enhancement #1721: Add `cache` command to list cache dirs - The command `cache` was added, it allows listing restic's cache directoriers - together with the last usage. It also allows removing old cache dirs without - having to access a repo, via `restic cache --cleanup` + The command `cache` was added, it allows listing restic's cache directoriers together with + the last usage. It also allows removing old cache dirs without having to access a repo, via + `restic cache --cleanup` https://github.com/restic/restic/issues/1721 https://github.com/restic/restic/pull/1749 * Enhancement #1735: Allow keeping a time range of snaphots - We've added the `--keep-within` option to the `forget` command. It instructs - restic to keep all snapshots within the given duration since the newest - snapshot. For example, running `restic forget --keep-within 5m7d` will keep all - snapshots which have been made in the five months and seven days since the - latest snapshot. + We've added the `--keep-within` option to the `forget` command. It instructs restic to keep + all snapshots within the given duration since the newest snapshot. For example, running + `restic forget --keep-within 5m7d` will keep all snapshots which have been made in the five + months and seven days since the latest snapshot. https://github.com/restic/restic/pull/1735 * Enhancement #1758: Allow saving OneDrive folders in Windows - Restic now contains a bugfix to two libraries, which allows saving OneDrive - folders in Windows. In order to use the newer versions of the libraries, the - minimal version required to compile restic is now Go 1.9. + Restic now contains a bugfix to two libraries, which allows saving OneDrive folders in + Windows. In order to use the newer versions of the libraries, the minimal version required to + compile restic is now Go 1.9. https://github.com/restic/restic/issues/1758 https://github.com/restic/restic/pull/1765 * Enhancement #1782: Use default AWS credentials chain for S3 backend - Adds support for file credentials to the S3 backend (e.g. ~/.aws/credentials), - and reorders the credentials chain for the S3 backend to match AWS's standard, - which is static credentials, env vars, credentials file, and finally remote. + Adds support for file credentials to the S3 backend (e.g. ~/.aws/credentials), and reorders + the credentials chain for the S3 backend to match AWS's standard, which is static credentials, + env vars, credentials file, and finally remote. https://github.com/restic/restic/pull/1782 @@ -4661,34 +4491,32 @@ restic users. The changes are ordered by importance. * Bugfix #1633: Fixed unexpected 'pack file cannot be listed' error - Due to a regression introduced in 0.8.2, the `rebuild-index` and `prune` - commands failed to read pack files with size of 587, 588, 589 or 590 bytes. + Due to a regression introduced in 0.8.2, the `rebuild-index` and `prune` commands failed to + read pack files with size of 587, 588, 589 or 590 bytes. https://github.com/restic/restic/issues/1633 https://github.com/restic/restic/pull/1635 * Bugfix #1638: Handle errors listing files in the backend - A user reported in the forum that restic completes a backup although a - concurrent `prune` operation was running. A few error messages were printed, but - the backup was attempted and completed successfully. No error code was returned. + A user reported in the forum that restic completes a backup although a concurrent `prune` + operation was running. A few error messages were printed, but the backup was attempted and + completed successfully. No error code was returned. - This should not happen: The repository is exclusively locked during `prune`, so - when `restic backup` is run in parallel, it should abort and return an error - code instead. + This should not happen: The repository is exclusively locked during `prune`, so when `restic + backup` is run in parallel, it should abort and return an error code instead. - It was found that the bug was in the code introduced only recently, which - retries a List() operation on the backend should that fail. It is now corrected. + It was found that the bug was in the code introduced only recently, which retries a List() + operation on the backend should that fail. It is now corrected. https://github.com/restic/restic/pull/1638 https://forum.restic.net/t/restic-backup-returns-0-exit-code-when-already-locked/484 * Bugfix #1641: Ignore files with invalid names in the repo - The release 0.8.2 introduced a bug: when restic encounters files in the repo - which do not have a valid name, it tries to load a file with a name of lots of - zeroes instead of ignoring it. This is now resolved, invalid file names are just - ignored. + The release 0.8.2 introduced a bug: when restic encounters files in the repo which do not have a + valid name, it tries to load a file with a name of lots of zeroes instead of ignoring it. This is now + resolved, invalid file names are just ignored. https://github.com/restic/restic/issues/1641 https://github.com/restic/restic/pull/1643 @@ -4696,9 +4524,8 @@ restic users. The changes are ordered by importance. * Enhancement #1497: Add --read-data-subset flag to check command - This change introduces ability to check integrity of a subset of repository data - packs. This can be used to spread integrity check of larger repositories over a - period of time. + This change introduces ability to check integrity of a subset of repository data packs. This + can be used to spread integrity check of larger repositories over a period of time. https://github.com/restic/restic/issues/1497 https://github.com/restic/restic/pull/1556 @@ -4711,22 +4538,21 @@ restic users. The changes are ordered by importance. * Enhancement #1623: Don't check for presence of files in the backend before writing - Before, all backend implementations were required to return an error if the file - that is to be written already exists in the backend. For most backends, that - means making a request (e.g. via HTTP) and returning an error when the file - already exists. + Before, all backend implementations were required to return an error if the file that is to be + written already exists in the backend. For most backends, that means making a request (e.g. via + HTTP) and returning an error when the file already exists. - This is not accurate, the file could have been created between the HTTP request - testing for it, and when writing starts, so we've relaxed this requeriment, - which saves one additional HTTP request per newly added file. + This is not accurate, the file could have been created between the HTTP request testing for it, + and when writing starts, so we've relaxed this requeriment, which saves one additional HTTP + request per newly added file. https://github.com/restic/restic/pull/1623 * Enhancement #1634: Upgrade B2 client library, reduce HTTP requests - We've upgraded the B2 client library restic uses to access BackBlaze B2. This - reduces the number of HTTP requests needed to upload a new file from two to one, - which should improve throughput to B2. + We've upgraded the B2 client library restic uses to access BackBlaze B2. This reduces the + number of HTTP requests needed to upload a new file from two to one, which should improve + throughput to B2. https://github.com/restic/restic/pull/1634 @@ -4764,10 +4590,10 @@ restic users. The changes are ordered by importance. * Bugfix #1512: Restore directory permissions as the last step - This change allows restoring into directories that were not writable during - backup. Before, restic created the directory, set the read-only mode and then - failed to create files in the directory. This change now restores the directory - (with its permissions) as the very last step. + This change allows restoring into directories that were not writable during backup. Before, + restic created the directory, set the read-only mode and then failed to create files in the + directory. This change now restores the directory (with its permissions) as the very last + step. https://github.com/restic/restic/issues/1512 https://github.com/restic/restic/pull/1536 @@ -4779,47 +4605,43 @@ restic users. The changes are ordered by importance. * Bugfix #1589: Complete intermediate index upload - After a user posted a comprehensive report of what he observed, we were able to - find a bug and correct it: During backup, restic uploads so-called - "intermediate" index files. When the backup finishes during a transfer of such - an intermediate index, the upload is cancelled, but the backup is finished - without an error. This leads to an inconsistent state, where the snapshot - references data that is contained in the repo, but is not referenced in any - index. + After a user posted a comprehensive report of what he observed, we were able to find a bug and + correct it: During backup, restic uploads so-called "intermediate" index files. When the + backup finishes during a transfer of such an intermediate index, the upload is cancelled, but + the backup is finished without an error. This leads to an inconsistent state, where the + snapshot references data that is contained in the repo, but is not referenced in any index. - The situation can be resolved by building a new index with `rebuild-index`, but - looks very confusing at first. Since all the data got uploaded to the repo - successfully, there was no risk of data loss, just minor inconvenience for our - users. + The situation can be resolved by building a new index with `rebuild-index`, but looks very + confusing at first. Since all the data got uploaded to the repo successfully, there was no risk + of data loss, just minor inconvenience for our users. https://github.com/restic/restic/pull/1589 https://forum.restic.net/t/error-loading-tree-check-prune-and-forget-gives-error-b2-backend/406 * Bugfix #1590: Strip spaces for lines read via --files-from - Leading and trailing spaces in lines read via `--files-from` are now stripped, - so it behaves the same as with lines read via `--exclude-file`. + Leading and trailing spaces in lines read via `--files-from` are now stripped, so it behaves + the same as with lines read via `--exclude-file`. https://github.com/restic/restic/issues/1590 https://github.com/restic/restic/pull/1613 * Bugfix #1594: Google Cloud Storage: Use generic HTTP transport - It was discovered that the Google Cloud Storage backend did not use the generic - HTTP transport, so things such as bandwidth limiting with `--limit-upload` did - not work. This is resolved now. + It was discovered that the Google Cloud Storage backend did not use the generic HTTP transport, + so things such as bandwidth limiting with `--limit-upload` did not work. This is resolved now. https://github.com/restic/restic/pull/1594 * Bugfix #1595: Backup: Remove bandwidth display - This commit removes the bandwidth displayed during backup process. It is - misleading and seldomly correct, because it's neither the "read bandwidth" (only - for the very first backup) nor the "upload bandwidth". Many users are confused - about (and rightly so), c.f. #1581, #1033, #1591 + This commit removes the bandwidth displayed during backup process. It is misleading and + seldomly correct, because it's neither the "read bandwidth" (only for the very first backup) + nor the "upload bandwidth". Many users are confused about (and rightly so), c.f. #1581, #1033, + #1591 - We'll eventually replace this display with something more relevant when the new - archiver code is ready. + We'll eventually replace this display with something more relevant when the new archiver code + is ready. https://github.com/restic/restic/pull/1595 @@ -4829,61 +4651,59 @@ restic users. The changes are ordered by importance. * Enhancement #1522: Add support for TLS client certificate authentication - Support has been added for using a TLS client certificate for authentication to - HTTP based backend. A file containing the PEM encoded private key and - certificate can be set using the `--tls-client-cert` option. + Support has been added for using a TLS client certificate for authentication to HTTP based + backend. A file containing the PEM encoded private key and certificate can be set using the + `--tls-client-cert` option. https://github.com/restic/restic/issues/1522 https://github.com/restic/restic/pull/1524 * Enhancement #1538: Reduce memory allocations for querying the index - This change reduces the internal memory allocations when the index data - structures in memory are queried if a blob (part of a file) already exists in - the repo. It should speed up backup a bit, and maybe even reduce RAM usage. + This change reduces the internal memory allocations when the index data structures in memory + are queried if a blob (part of a file) already exists in the repo. It should speed up backup a bit, + and maybe even reduce RAM usage. https://github.com/restic/restic/pull/1538 * Enhancement #1541: Reduce number of remote requests during repository check - This change eliminates redundant remote repository calls and significantly - improves repository check time. + This change eliminates redundant remote repository calls and significantly improves + repository check time. https://github.com/restic/restic/issues/1541 https://github.com/restic/restic/pull/1548 * Enhancement #1549: Speed up querying across indices and scanning existing files - This change increases the whenever a blob (part of a file) is searched for in a - restic repository. This will reduce cpu usage some when backing up files already - backed up by restic. Cpu usage is further decreased when scanning files. + This change increases the whenever a blob (part of a file) is searched for in a restic + repository. This will reduce cpu usage some when backing up files already backed up by restic. + Cpu usage is further decreased when scanning files. https://github.com/restic/restic/pull/1549 * Enhancement #1554: Fuse/mount: Correctly handle EOF, add template option - We've added the `--snapshot-template` string, which can be used to specify a - template for a snapshot directory. In addition, accessing data after the end of - a file via the fuse mount is now handled correctly. + We've added the `--snapshot-template` string, which can be used to specify a template for a + snapshot directory. In addition, accessing data after the end of a file via the fuse mount is now + handled correctly. https://github.com/restic/restic/pull/1554 * Enhancement #1564: Don't terminate ssh on SIGINT - We've reworked the code which runs the `ssh` login for the sftp backend so that - it can prompt for a password (if needed) but does not exit when the user presses - CTRL+C (SIGINT) e.g. during backup. This allows restic to properly shut down - when it receives SIGINT and remove the lock file from the repo, afterwards - exiting the `ssh` process. + We've reworked the code which runs the `ssh` login for the sftp backend so that it can prompt for a + password (if needed) but does not exit when the user presses CTRL+C (SIGINT) e.g. during + backup. This allows restic to properly shut down when it receives SIGINT and remove the lock + file from the repo, afterwards exiting the `ssh` process. https://github.com/restic/restic/pull/1564 https://github.com/restic/restic/pull/1588 * Enhancement #1567: Reduce number of backend requests for rebuild-index and prune - We've found a way to reduce then number of backend requests for the - `rebuild-index` and `prune` operations. This significantly speeds up the - operations for high-latency backends. + We've found a way to reduce then number of backend requests for the `rebuild-index` and `prune` + operations. This significantly speeds up the operations for high-latency backends. https://github.com/restic/restic/issues/1567 https://github.com/restic/restic/pull/1574 @@ -4895,11 +4715,10 @@ restic users. The changes are ordered by importance. * Enhancement #1584: Limit index file size - Before, restic would create a single new index file on `prune` or - `rebuild-index`, this may lead to memory problems when this huge index is - created and loaded again. We're now limiting the size of the index file, and - split newly created index files into several smaller ones. This allows restic to - be more memory-efficient. + Before, restic would create a single new index file on `prune` or `rebuild-index`, this may + lead to memory problems when this huge index is created and loaded again. We're now limiting the + size of the index file, and split newly created index files into several smaller ones. This + allows restic to be more memory-efficient. https://github.com/restic/restic/issues/1412 https://github.com/restic/restic/issues/979 @@ -4925,8 +4744,8 @@ restic users. The changes are ordered by importance. * Bugfix #1454: Correct cache dir location for Windows and Darwin - The cache directory on Windows and Darwin was not correct, instead the directory - `.cache` was used. + The cache directory on Windows and Darwin was not correct, instead the directory `.cache` was + used. https://github.com/restic/restic/pull/1454 @@ -4937,9 +4756,9 @@ restic users. The changes are ordered by importance. * Bugfix #1459: Disable handling SIGPIPE - We've disabled handling SIGPIPE again. Turns out, writing to broken TCP - connections also raised SIGPIPE, so restic exits on the first write to a broken - connection. Instead, restic should retry the request. + We've disabled handling SIGPIPE again. Turns out, writing to broken TCP connections also + raised SIGPIPE, so restic exits on the first write to a broken connection. Instead, restic + should retry the request. https://github.com/restic/restic/issues/1457 https://github.com/restic/restic/issues/1466 @@ -4947,18 +4766,16 @@ restic users. The changes are ordered by importance. * Change #1452: Do not save atime by default - By default, the access time for files and dirs is not saved any more. It is not - possible to reliably disable updating the access time during a backup, so for - the next backup the access time is different again. This means a lot of metadata - is saved. If you want to save the access time anyway, pass `--with-atime` to the - `backup` command. + By default, the access time for files and dirs is not saved any more. It is not possible to + reliably disable updating the access time during a backup, so for the next backup the access + time is different again. This means a lot of metadata is saved. If you want to save the access time + anyway, pass `--with-atime` to the `backup` command. https://github.com/restic/restic/pull/1452 * Enhancement #11: Add the `diff` command - The command `diff` was added, it allows comparing two snapshots and listing all - differences. + The command `diff` was added, it allows comparing two snapshots and listing all differences. https://github.com/restic/restic/issues/11 https://github.com/restic/restic/issues/1460 @@ -4966,18 +4783,17 @@ restic users. The changes are ordered by importance. * Enhancement #1436: Add code to detect old cache directories - We've added code to detect old cache directories of repositories that haven't - been used in a long time, restic now prints a note when it detects that such - dirs exist. Also, the option `--cleanup-cache` was added to automatically remove - such directories. That's not a problem because the cache will be rebuild once a - repo is accessed again. + We've added code to detect old cache directories of repositories that haven't been used in a + long time, restic now prints a note when it detects that such dirs exist. Also, the option + `--cleanup-cache` was added to automatically remove such directories. That's not a problem + because the cache will be rebuild once a repo is accessed again. https://github.com/restic/restic/pull/1436 * Enhancement #1439: Improve cancellation logic - The cancellation logic was improved, restic can now shut down cleanly when - requested to do so (e.g. via ctrl+c). + The cancellation logic was improved, restic can now shut down cleanly when requested to do so + (e.g. via ctrl+c). https://github.com/restic/restic/pull/1439 @@ -5012,18 +4828,17 @@ restic users. The changes are ordered by importance. * Security #1445: Prevent writing outside the target directory during restore - A vulnerability was found in the restic restorer, which allowed attackers in - special circumstances to restore files to a location outside of the target - directory. Due to the circumstances we estimate this to be a low-risk - vulnerability, but urge all users to upgrade to the latest version of restic. + A vulnerability was found in the restic restorer, which allowed attackers in special + circumstances to restore files to a location outside of the target directory. Due to the + circumstances we estimate this to be a low-risk vulnerability, but urge all users to upgrade to + the latest version of restic. - Exploiting the vulnerability requires a Linux/Unix system which saves backups - via restic and a Windows systems which restores files from the repo. In - addition, the attackers need to be able to create files with arbitrary names - which are then saved to the restic repo. For example, by creating a file named - "..\test.txt" (which is a perfectly legal filename on Linux) and restoring a - snapshot containing this file on Windows, it would be written to the parent of - the target directory. + Exploiting the vulnerability requires a Linux/Unix system which saves backups via restic and + a Windows systems which restores files from the repo. In addition, the attackers need to be able + to create files with arbitrary names which are then saved to the restic repo. For example, by + creating a file named "..\test.txt" (which is a perfectly legal filename on Linux) and + restoring a snapshot containing this file on Windows, it would be written to the parent of the + target directory. We'd like to thank Tyler Spivey for reporting this responsibly! @@ -5031,36 +4846,34 @@ restic users. The changes are ordered by importance. * Bugfix #1256: Re-enable workaround for S3 backend - We've re-enabled a workaround for `minio-go` (the library we're using to access - s3 backends), this reduces memory usage. + We've re-enabled a workaround for `minio-go` (the library we're using to access s3 backends), + this reduces memory usage. https://github.com/restic/restic/issues/1256 https://github.com/restic/restic/pull/1267 * Bugfix #1291: Reuse backend TCP connections to BackBlaze B2 - A bug was discovered in the library we're using to access Backblaze, it now - reuses already established TCP connections which should be a lot faster and not - cause network failures any more. + A bug was discovered in the library we're using to access Backblaze, it now reuses already + established TCP connections which should be a lot faster and not cause network failures any + more. https://github.com/restic/restic/issues/1291 https://github.com/restic/restic/pull/1301 * Bugfix #1317: Run prune when `forget --prune` is called with just snapshot IDs - A bug in the `forget` command caused `prune` not to be run when `--prune` was - specified without a policy, e.g. when only snapshot IDs that should be forgotten - are listed manually. + A bug in the `forget` command caused `prune` not to be run when `--prune` was specified without a + policy, e.g. when only snapshot IDs that should be forgotten are listed manually. https://github.com/restic/restic/pull/1317 * Bugfix #1437: Remove implicit path `/restic` for the s3 backend - The s3 backend used the subdir `restic` within a bucket if no explicit path - after the bucket name was specified. Since this version, restic does not use - this default path any more. If you created a repo on s3 in a bucket without - specifying a path within the bucket, you need to add `/restic` at the end of the - repository specification to access your repo: + The s3 backend used the subdir `restic` within a bucket if no explicit path after the bucket name + was specified. Since this version, restic does not use this default path any more. If you + created a repo on s3 in a bucket without specifying a path within the bucket, you need to add + `/restic` at the end of the repository specification to access your repo: `s3:s3.amazonaws.com/bucket/restic` https://github.com/restic/restic/issues/1292 @@ -5068,35 +4881,32 @@ restic users. The changes are ordered by importance. * Enhancement #448: Sftp backend prompts for password - The sftp backend now prompts for the password if a password is necessary for - login. + The sftp backend now prompts for the password if a password is necessary for login. https://github.com/restic/restic/issues/448 https://github.com/restic/restic/pull/1270 * Enhancement #510: Add `dump` command - We've added the `dump` command which prints a file from a snapshot to stdout. - This can e.g. be used to restore files read with `backup --stdin`. + We've added the `dump` command which prints a file from a snapshot to stdout. This can e.g. be + used to restore files read with `backup --stdin`. https://github.com/restic/restic/issues/510 https://github.com/restic/restic/pull/1346 * Enhancement #1040: Add local metadata cache - We've added a local cache for metadata so that restic doesn't need to load all - metadata (snapshots, indexes, ...) from the repo each time it starts. By default - the cache is active, but there's a new global option `--no-cache` that can be - used to disable the cache. By deafult, the cache a standard cache folder for the - OS, which can be overridden with `--cache-dir`. The cache will automatically - populate, indexes and snapshots are saved as they are loaded. Cache directories - for repos that haven't been used recently can automatically be removed by restic + We've added a local cache for metadata so that restic doesn't need to load all metadata + (snapshots, indexes, ...) from the repo each time it starts. By default the cache is active, but + there's a new global option `--no-cache` that can be used to disable the cache. By deafult, the + cache a standard cache folder for the OS, which can be overridden with `--cache-dir`. The cache + will automatically populate, indexes and snapshots are saved as they are loaded. Cache + directories for repos that haven't been used recently can automatically be removed by restic with the `--cleanup-cache` option. - A related change was to by default create pack files in the repo that contain - either data or metadata, not both mixed together. This allows easy caching of - only the metadata files. The next run of `restic prune` will untangle mixed - files automatically. + A related change was to by default create pack files in the repo that contain either data or + metadata, not both mixed together. This allows easy caching of only the metadata files. The + next run of `restic prune` will untangle mixed files automatically. https://github.com/restic/restic/issues/29 https://github.com/restic/restic/issues/738 @@ -5108,8 +4918,8 @@ restic users. The changes are ordered by importance. * Enhancement #1102: Add subdirectory `ids` to fuse mount - The fuse mount now has an `ids` subdirectory which contains the snapshots below - their (short) IDs. + The fuse mount now has an `ids` subdirectory which contains the snapshots below their (short) + IDs. https://github.com/restic/restic/issues/1102 https://github.com/restic/restic/pull/1299 @@ -5117,17 +4927,17 @@ restic users. The changes are ordered by importance. * Enhancement #1114: Add `--cacert` to specify TLS certificates to check against - We've added the `--cacert` option which can be used to pass one (or more) CA - certificates to restic. These are used in addition to the system CA certificates - to verify HTTPS certificates (e.g. for the REST backend). + We've added the `--cacert` option which can be used to pass one (or more) CA certificates to + restic. These are used in addition to the system CA certificates to verify HTTPS certificates + (e.g. for the REST backend). https://github.com/restic/restic/issues/1114 https://github.com/restic/restic/pull/1276 * Enhancement #1216: Add upload/download limiting - We've added support for rate limiting through `--limit-upload` and - `--limit-download` flags. + We've added support for rate limiting through `--limit-upload` and `--limit-download` + flags. https://github.com/restic/restic/issues/1216 https://github.com/restic/restic/pull/1336 @@ -5135,15 +4945,15 @@ restic users. The changes are ordered by importance. * Enhancement #1249: Add `latest` symlink in fuse mount - The directory structure in the fuse mount now exposes a symlink `latest` which - points to the latest snapshot in that particular directory. + The directory structure in the fuse mount now exposes a symlink `latest` which points to the + latest snapshot in that particular directory. https://github.com/restic/restic/pull/1249 * Enhancement #1269: Add `--compact` to `forget` command - The option `--compact` was added to the `forget` command to provide the same - compact view as the `snapshots` command. + The option `--compact` was added to the `forget` command to provide the same compact view as the + `snapshots` command. https://github.com/restic/restic/pull/1269 @@ -5156,26 +4966,25 @@ restic users. The changes are ordered by importance. * Enhancement #1274: Add `generate` command, replaces `manpage` and `autocomplete` - The `generate` command has been added, which replaces the now removed commands - `manpage` and `autocomplete`. This release of restic contains the most recent - manpages in `doc/man` and the auto-completion files for bash and zsh in - `doc/bash-completion.sh` and `doc/zsh-completion.zsh` + The `generate` command has been added, which replaces the now removed commands `manpage` and + `autocomplete`. This release of restic contains the most recent manpages in `doc/man` and the + auto-completion files for bash and zsh in `doc/bash-completion.sh` and + `doc/zsh-completion.zsh` https://github.com/restic/restic/issues/1274 https://github.com/restic/restic/pull/1282 * Enhancement #1281: Google Cloud Storage backend needs less permissions - The Google Cloud Storage backend no longer requires the service account to have - the `storage.buckets.get` permission ("Storage Admin" role) in `restic init` if - the bucket already exists. + The Google Cloud Storage backend no longer requires the service account to have the + `storage.buckets.get` permission ("Storage Admin" role) in `restic init` if the bucket + already exists. https://github.com/restic/restic/pull/1281 * Enhancement #1319: Make `check` print `no errors found` explicitly - The `check` command now explicetly prints `No errors were found` when no errors - could be found. + The `check` command now explicetly prints `No errors were found` when no errors could be found. https://github.com/restic/restic/issues/1303 https://github.com/restic/restic/pull/1319 @@ -5186,8 +4995,8 @@ restic users. The changes are ordered by importance. * Enhancement #1367: Allow comments in files read from via `--file-from` - When the list of files/dirs to be saved is read from a file with `--files-from`, - comment lines (starting with `#`) are now ignored. + When the list of files/dirs to be saved is read from a file with `--files-from`, comment lines + (starting with `#`) are now ignored. https://github.com/restic/restic/issues/1367 https://github.com/restic/restic/pull/1368 @@ -5205,10 +5014,9 @@ restic users. The changes are ordered by importance. * Bugfix #1246: List all files stored in Google Cloud Storage - For large backups stored in Google Cloud Storage, the `prune` command fails - because listing only returns the first 1000 files. This has been corrected, no - data is lost in the process. In addition, a plausibility check was added to - `prune`. + For large backups stored in Google Cloud Storage, the `prune` command fails because listing + only returns the first 1000 files. This has been corrected, no data is lost in the process. In + addition, a plausibility check was added to `prune`. https://github.com/restic/restic/issues/1246 https://github.com/restic/restic/pull/1247 @@ -5246,28 +5054,26 @@ restic users. The changes are ordered by importance. * Bugfix #1167: Do not create a local repo unless `init` is used - When a restic command other than `init` is used with a local repository and the - repository directory does not exist, restic creates the directory structure. - That's an error, only the `init` command should create the dir. + When a restic command other than `init` is used with a local repository and the repository + directory does not exist, restic creates the directory structure. That's an error, only the + `init` command should create the dir. https://github.com/restic/restic/issues/1167 https://github.com/restic/restic/pull/1182 * Bugfix #1191: Make sure to write profiling files on interrupt - Since a few releases restic had the ability to write profiling files for memory - and CPU usage when `debug` is enabled. It was discovered that when restic is - interrupted (ctrl+c is pressed), the proper shutdown hook is not run. This is - now corrected. + Since a few releases restic had the ability to write profiling files for memory and CPU usage + when `debug` is enabled. It was discovered that when restic is interrupted (ctrl+c is + pressed), the proper shutdown hook is not run. This is now corrected. https://github.com/restic/restic/pull/1191 * Enhancement #317: Add `--exclude-caches` and `--exclude-if-present` - A new option `--exclude-caches` was added that allows excluding cache - directories (that are tagged as such). This is a special case of a more generic - option `--exclude-if-present` which excludes a directory if a file with a - specific name (and contents) is present. + A new option `--exclude-caches` was added that allows excluding cache directories (that are + tagged as such). This is a special case of a more generic option `--exclude-if-present` which + excludes a directory if a file with a specific name (and contents) is present. https://github.com/restic/restic/issues/317 https://github.com/restic/restic/pull/1170 @@ -5288,17 +5094,16 @@ restic users. The changes are ordered by importance. * Enhancement #1126: Use the standard Go git repository layout, use `dep` for vendoring - The git repository layout was changed to resemble the layout typically used in - Go projects, we're not using `gb` for building restic any more and vendoring the - dependencies is now taken care of by `dep`. + The git repository layout was changed to resemble the layout typically used in Go projects, + we're not using `gb` for building restic any more and vendoring the dependencies is now taken + care of by `dep`. https://github.com/restic/restic/pull/1126 * Enhancement #1132: Make `key` command always prompt for a password - The `key` command now prompts for a password even if the original password to - access a repo has been specified via the `RESTIC_PASSWORD` environment variable - or a password file. + The `key` command now prompts for a password even if the original password to access a repo has + been specified via the `RESTIC_PASSWORD` environment variable or a password file. https://github.com/restic/restic/issues/1132 https://github.com/restic/restic/pull/1133 @@ -5315,8 +5120,8 @@ restic users. The changes are ordered by importance. * Enhancement #1149: Add support for storing backups on Microsoft Azure Blob Storage - The library we're using to access the service requires Go 1.8, so restic now - needs at least Go 1.8. + The library we're using to access the service requires Go 1.8, so restic now needs at least Go + 1.8. https://github.com/restic/restic/issues/609 https://github.com/restic/restic/pull/1149 @@ -5342,8 +5147,8 @@ restic users. The changes are ordered by importance. * Enhancement #1218: Add `--compact` to `snapshots` command - The option `--compact` was added to the `snapshots` command to get a better - overview of the snapshots in a repo. It limits each snapshot to a single line. + The option `--compact` was added to the `snapshots` command to get a better overview of the + snapshots in a repo. It limits each snapshot to a single line. https://github.com/restic/restic/issues/1218 https://github.com/restic/restic/pull/1223 @@ -5367,19 +5172,18 @@ restic users. The changes are ordered by importance. * Bugfix #1115: Fix `prune`, only include existing files in indexes - A bug was found (and corrected) in the index rebuilding after prune, which led - to indexes which include blobs that were not present in the repo any more. There - were already checks in place which detected this situation and aborted with an - error message. A new run of either `prune` or `rebuild-index` corrected the - index files. This is now fixed and a test has been added to detect this. + A bug was found (and corrected) in the index rebuilding after prune, which led to indexes which + include blobs that were not present in the repo any more. There were already checks in place + which detected this situation and aborted with an error message. A new run of either `prune` or + `rebuild-index` corrected the index files. This is now fixed and a test has been added to detect + this. https://github.com/restic/restic/pull/1115 * Enhancement #1055: Create subdirs below `data/` for local/sftp backends - The local and sftp backends now create the subdirs below `data/` on open/init. - This way, restic makes sure that they always exist. This is connected to an - issue for the sftp server. + The local and sftp backends now create the subdirs below `data/` on open/init. This way, restic + makes sure that they always exist. This is connected to an issue for the sftp server. https://github.com/restic/restic/issues/1055 https://github.com/restic/rest-server/pull/11#issuecomment-309879710 @@ -5388,18 +5192,17 @@ restic users. The changes are ordered by importance. * Enhancement #1067: Allow loading credentials for s3 from IAM - When no S3 credentials are specified in the environment variables, restic now - tries to load credentials from an IAM instance profile when the s3 backend is - used. + When no S3 credentials are specified in the environment variables, restic now tries to load + credentials from an IAM instance profile when the s3 backend is used. https://github.com/restic/restic/issues/1067 https://github.com/restic/restic/pull/1086 * Enhancement #1073: Add `migrate` cmd to migrate from `s3legacy` to `default` layout - The `migrate` command for changing the `s3legacy` layout to the `default` layout - for s3 backends has been improved: It can now be restarted with `restic migrate - --force s3_layout` and automatically retries operations on error. + The `migrate` command for changing the `s3legacy` layout to the `default` layout for s3 + backends has been improved: It can now be restarted with `restic migrate --force s3_layout` + and automatically retries operations on error. https://github.com/restic/restic/issues/1073 https://github.com/restic/restic/pull/1075 @@ -5439,18 +5242,18 @@ restic users. The changes are ordered by importance. * Bugfix #965: Switch to `default` repo layout for the s3 backend - The default layout for the s3 backend is now `default` (instead of `s3legacy`). - Also, there's a new `migrate` command to convert an existing repo, it can be run - like this: `restic migrate s3_layout` + The default layout for the s3 backend is now `default` (instead of `s3legacy`). Also, there's a + new `migrate` command to convert an existing repo, it can be run like this: `restic migrate + s3_layout` https://github.com/restic/restic/issues/965 https://github.com/restic/restic/pull/1004 * Bugfix #1013: Switch back to using the high-level minio-go API for s3 - For the s3 backend we're back to using the high-level API the s3 client library - for uploading data, a few users reported dropped connections (which the library - will automatically retry now). + For the s3 backend we're back to using the high-level API the s3 client library for uploading + data, a few users reported dropped connections (which the library will automatically retry + now). https://github.com/restic/restic/issues/1013 https://github.com/restic/restic/issues/1023 @@ -5463,10 +5266,9 @@ restic users. The changes are ordered by importance. * Enhancement #636: Add dirs `tags` and `hosts` to fuse mount - The fuse mount now has two more directories: `tags` contains a subdir for each - tag, which in turn contains only the snapshots that have this tag. The subdir - `hosts` contains a subdir for each host that has a snapshot, and the subdir - contains the snapshots for that host. + The fuse mount now has two more directories: `tags` contains a subdir for each tag, which in turn + contains only the snapshots that have this tag. The subdir `hosts` contains a subdir for each + host that has a snapshot, and the subdir contains the snapshots for that host. https://github.com/restic/restic/issues/636 https://github.com/restic/restic/pull/1050 @@ -5478,9 +5280,8 @@ restic users. The changes are ordered by importance. * Enhancement #989: Improve performance of the `find` command - Improved performance for the `find` command: Restic recognizes paths it has - already checked for the files in question, so the number of backend requests is - reduced a lot. + Improved performance for the `find` command: Restic recognizes paths it has already checked + for the files in question, so the number of backend requests is reduced a lot. https://github.com/restic/restic/issues/989 https://github.com/restic/restic/pull/993 @@ -5493,17 +5294,16 @@ restic users. The changes are ordered by importance. * Enhancement #1021: Detect invalid backend name and print error - Restic now tries to detect when an invalid/unknown backend is used and returns - an error message. + Restic now tries to detect when an invalid/unknown backend is used and returns an error + message. https://github.com/restic/restic/issues/1021 https://github.com/restic/restic/pull/1070 * Enhancement #1029: Remove invalid pack files when `prune` is run - The `prune` command has been improved and will now remove invalid pack files, - for example files that have not been uploaded completely because a backup was - interrupted. + The `prune` command has been improved and will now remove invalid pack files, for example files + that have not been uploaded completely because a backup was interrupted. https://github.com/restic/restic/issues/1029 https://github.com/restic/restic/pull/1036 @@ -5523,24 +5323,24 @@ restic users. The changes are ordered by importance. * Enhancement #974: Remove regular status reports - Regular status report: We've removed the status report that was printed every 10 - seconds when restic is run non-interactively. You can still force reporting the - current status by sending a `USR1` signal to the process. + Regular status report: We've removed the status report that was printed every 10 seconds when + restic is run non-interactively. You can still force reporting the current status by sending a + `USR1` signal to the process. https://github.com/restic/restic/pull/974 * Enhancement #981: Remove temporary path from binary in `build.go` - The `build.go` now strips the temporary directory used for compilation from the - binary. This is the first step in enabling reproducible builds. + The `build.go` now strips the temporary directory used for compilation from the binary. This + is the first step in enabling reproducible builds. https://github.com/restic/restic/pull/981 * Enhancement #985: Allow multiple parallel idle HTTP connections - Backends based on HTTP now allow several idle connections in parallel. This is - especially important for the REST backend, which (when used with a local server) - may create a lot connections and exhaust available ports quickly. + Backends based on HTTP now allow several idle connections in parallel. This is especially + important for the REST backend, which (when used with a local server) may create a lot + connections and exhaust available ports quickly. https://github.com/restic/restic/issues/985 https://github.com/restic/restic/pull/986 @@ -5560,22 +5360,21 @@ restic users. The changes are ordered by importance. * Enhancement #957: Make `forget` consistent - The `forget` command was corrected to be more consistent in which snapshots are - to be forgotten. It is possible that the new code removes more snapshots than - before, so please review what would be deleted by using the `--dry-run` option. + The `forget` command was corrected to be more consistent in which snapshots are to be + forgotten. It is possible that the new code removes more snapshots than before, so please + review what would be deleted by using the `--dry-run` option. https://github.com/restic/restic/issues/953 https://github.com/restic/restic/pull/957 * Enhancement #962: Improve memory and runtime for the s3 backend - We've updated the library used for accessing s3, switched to using a lower level - API and added caching for some requests. This lead to a decrease in memory usage - and a great speedup. In addition, we added benchmark functions for all backends, - so we can track improvements over time. The Continuous Integration test service - we're using (Travis) now runs the s3 backend tests not only against a Minio - server, but also against the Amazon s3 live service, so we should be notified of - any regressions much sooner. + We've updated the library used for accessing s3, switched to using a lower level API and added + caching for some requests. This lead to a decrease in memory usage and a great speedup. In + addition, we added benchmark functions for all backends, so we can track improvements over + time. The Continuous Integration test service we're using (Travis) now runs the s3 backend + tests not only against a Minio server, but also against the Amazon s3 live service, so we should + be notified of any regressions much sooner. https://github.com/restic/restic/pull/962 https://github.com/restic/restic/pull/960 @@ -5585,12 +5384,11 @@ restic users. The changes are ordered by importance. * Enhancement #966: Unify repository layout for all backends - Up to now the s3 backend used a special repository layout. We've decided to - unify the repository layout and implemented the default layout also for the s3 - backend. For creating a new repository on s3 with the default layout, use - `restic -o s3.layout=default init`. For further commands the option is not - necessary any more, restic will automatically detect the correct layout to use. - A future version will switch to the default layout for new repositories. + Up to now the s3 backend used a special repository layout. We've decided to unify the repository + layout and implemented the default layout also for the s3 backend. For creating a new + repository on s3 with the default layout, use `restic -o s3.layout=default init`. For further + commands the option is not necessary any more, restic will automatically detect the correct + layout to use. A future version will switch to the default layout for new repositories. https://github.com/restic/restic/issues/965 https://github.com/restic/restic/pull/966 From fe9f142b5249f7db1a7f2bad1bedf9321c885e51 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Jul 2024 21:25:34 +0200 Subject: [PATCH 406/893] Add version for 0.16.5 --- VERSION | 2 +- cmd/restic/global.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index 5f2491c5adc..19270385eaf 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.16.4 +0.16.5 diff --git a/cmd/restic/global.go b/cmd/restic/global.go index c179e8d3309..9e2a8b26117 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -43,7 +43,7 @@ import ( "golang.org/x/term" ) -var version = "0.16.4-dev (compiled manually)" +var version = "0.16.5" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" From 5820dccaef42a5ae4b6fe34e44b905fe73e2d3ec Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Jul 2024 21:25:34 +0200 Subject: [PATCH 407/893] Set development version for 0.16.5 --- cmd/restic/global.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 9e2a8b26117..cecd79ffb33 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -43,7 +43,7 @@ import ( "golang.org/x/term" ) -var version = "0.16.5" +var version = "0.16.5-dev (compiled manually)" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" From f02cb8eb62b95eabdf8fa8462ecca7616691f13a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 20:50:05 +0000 Subject: [PATCH 408/893] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.6.0 to 1.7.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.6.0...sdk/azcore/v1.7.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azidentity dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 098fe0613a1..673744cf28b 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.41.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.3.1-0.20240611061849-e4651fbc304d diff --git a/go.sum b/go.sum index bd18fa18e4a..6d92c4aad43 100644 --- a/go.sum +++ b/go.sum @@ -13,8 +13,8 @@ cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RB cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 h1:1nGuui+4POelzDwI7RG56yfQJHCnKvwfMoU7VsEp+Zg= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0/go.mod h1:99EvauvlcJ1U06amZiksfYz/3aFGyIhWGHVyiZXtBAI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 h1:H+U3Gk9zY56G3u872L82bk4thcsy2Gghb9ExT4Zvm1o= github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0/go.mod h1:mgrmMSgaLp9hmax62XQTd0N4aAqSE5E0DulSpVYK7vc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= From a60643de9c80d5c35c8296aa52c8d33e8c6100d9 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Wed, 3 Jul 2024 14:31:11 +0900 Subject: [PATCH 409/893] docs: update 045_working_with_repos.rst colums -> columns --- doc/045_working_with_repos.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 9d6167895cd..6b96666935a 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -135,7 +135,7 @@ as separator. /home/user /home/user/work.txt -To show more details about the files in a snapshot, you can use the ``--long`` option. The colums include +To show more details about the files in a snapshot, you can use the ``--long`` option. The columns include file permissions, UID, GID, file size, modification time and file path. For scripting usage, the ``ls`` command supports the ``--json`` flag; the JSON output format is described at :ref:`ls json`. From ac002293864470cb826cb43f7ec5f4cde6460766 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Sz=C3=A9pe?= Date: Mon, 1 Jul 2024 22:45:59 +0000 Subject: [PATCH 410/893] Fix typos --- changelog/unreleased/issue-1786 | 6 +++--- cmd/restic/cmd_forget_integration_test.go | 2 +- cmd/restic/cmd_prune.go | 14 ++++++------- cmd/restic/cmd_prune_integration_test.go | 4 ++-- cmd/restic/cmd_snapshots.go | 10 +++++----- cmd/restic/lock.go | 4 ++-- doc/030_preparing_a_new_repo.rst | 4 ++-- doc/045_working_with_repos.rst | 2 +- doc/077_troubleshooting.rst | 4 ++-- internal/backend/cache/backend_test.go | 4 ++-- internal/bloblru/cache_test.go | 4 ++-- internal/fs/file_unix.go | 2 +- internal/fs/fs_local_vss.go | 2 +- .../repository/index/associated_data_test.go | 2 +- internal/repository/index/indexmap_test.go | 2 +- internal/repository/prune.go | 8 ++++---- internal/repository/prune_test.go | 6 +++--- internal/restic/node_windows.go | 2 +- internal/restic/node_windows_test.go | 4 ++-- internal/restorer/restorer.go | 2 +- internal/restorer/restorer_test.go | 2 +- internal/restorer/restorer_unix_test.go | 2 +- internal/restorer/restorer_windows_test.go | 20 +++++++++---------- 23 files changed, 56 insertions(+), 56 deletions(-) diff --git a/changelog/unreleased/issue-1786 b/changelog/unreleased/issue-1786 index bdd0d48c37d..87d4942c8b2 100644 --- a/changelog/unreleased/issue-1786 +++ b/changelog/unreleased/issue-1786 @@ -1,6 +1,6 @@ Enhancement: Support repositories with empty password -Restic refused to create or operate on repositories with an emtpy password. +Restic refused to create or operate on repositories with an empty password. Using the new option `--insecure-no-password` it is now possible to disable this check. Restic will not prompt for a password when using this option. For security reasons, the option must always be specified when operating on @@ -10,8 +10,8 @@ Specifying `--insecure-no-password` while also passing a password to restic via a CLI option or via environment variable results in an error. The `init` and `copy` command also support the option `--from-insecure-no-password` -which applies to the source repository. The `key add` and `key passwd` comands -include the `--new-insecure-no-password` option to add or set an emtpy password. +which applies to the source repository. The `key add` and `key passwd` commands +include the `--new-insecure-no-password` option to add or set an empty password. https://github.com/restic/restic/issues/1786 https://github.com/restic/restic/issues/4326 diff --git a/cmd/restic/cmd_forget_integration_test.go b/cmd/restic/cmd_forget_integration_test.go index 06487a84126..96dd7c63e29 100644 --- a/cmd/restic/cmd_forget_integration_test.go +++ b/cmd/restic/cmd_forget_integration_test.go @@ -54,7 +54,7 @@ func TestRunForgetSafetyNet(t *testing.T) { err = testRunForgetMayFail(env.gopts, ForgetOptions{}) rtest.Assert(t, strings.Contains(err.Error(), `no policy was specified, no snapshots will be removed`), "wrong error message got %v", err) - // `forget --host example --unsafe-allow-remmove-all` should work + // `forget --host example --unsafe-allow-remove-all` should work testRunForget(t, env.gopts, ForgetOptions{ UnsafeAllowRemoveAll: true, GroupBy: restic.SnapshotGroupByOptions{Host: true, Path: true}, diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 2637fedc806..f0bfa2b949a 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -51,9 +51,9 @@ type PruneOptions struct { MaxRepackSize string MaxRepackBytes uint64 - RepackCachableOnly bool - RepackSmall bool - RepackUncompressed bool + RepackCacheableOnly bool + RepackSmall bool + RepackUncompressed bool } var pruneOptions PruneOptions @@ -70,7 +70,7 @@ func addPruneOptions(c *cobra.Command, pruneOptions *PruneOptions) { f := c.Flags() f.StringVar(&pruneOptions.MaxUnused, "max-unused", "5%", "tolerate given `limit` of unused data (absolute value in bytes with suffixes k/K, m/M, g/G, t/T, a value in % or the word 'unlimited')") f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "maximum `size` to repack (allowed suffixes: k/K, m/M, g/G, t/T)") - f.BoolVar(&pruneOptions.RepackCachableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable") + f.BoolVar(&pruneOptions.RepackCacheableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable") f.BoolVar(&pruneOptions.RepackSmall, "repack-small", false, "repack pack files below 80% of target pack size") f.BoolVar(&pruneOptions.RepackUncompressed, "repack-uncompressed", false, "repack all uncompressed data") } @@ -183,9 +183,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption MaxUnusedBytes: opts.maxUnusedBytes, MaxRepackBytes: opts.MaxRepackBytes, - RepackCachableOnly: opts.RepackCachableOnly, - RepackSmall: opts.RepackSmall, - RepackUncompressed: opts.RepackUncompressed, + RepackCacheableOnly: opts.RepackCacheableOnly, + RepackSmall: opts.RepackSmall, + RepackUncompressed: opts.RepackUncompressed, } plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet) error { diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index 19c430716fc..746eb5cc9e7 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -51,8 +51,8 @@ func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) { testPrune(t, opts, checkOpts) }) - t.Run("CachableOnly"+suffix, func(t *testing.T) { - opts := PruneOptions{MaxUnused: "5%", RepackCachableOnly: true, unsafeRecovery: unsafeNoSpaceRecovery} + t.Run("CacheableOnly"+suffix, func(t *testing.T) { + opts := PruneOptions{MaxUnused: "5%", RepackCacheableOnly: true, unsafeRecovery: unsafeNoSpaceRecovery} checkOpts := CheckOptions{ReadData: true} testPrune(t, opts, checkOpts) }) diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index faa86d3a6bc..f689e426093 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -81,9 +81,9 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions if opts.Last { // This branch should be removed in the same time // that --last. - list = FilterLastestSnapshots(list, 1) + list = FilterLatestSnapshots(list, 1) } else if opts.Latest > 0 { - list = FilterLastestSnapshots(list, opts.Latest) + list = FilterLatestSnapshots(list, opts.Latest) } sort.Sort(sort.Reverse(list)) snapshotGroups[k] = list @@ -126,11 +126,11 @@ func newFilterLastSnapshotsKey(sn *restic.Snapshot) filterLastSnapshotsKey { return filterLastSnapshotsKey{sn.Hostname, strings.Join(paths, "|")} } -// FilterLastestSnapshots filters a list of snapshots to only return +// FilterLatestSnapshots filters a list of snapshots to only return // the limit last entries for each hostname and path. If the snapshot // contains multiple paths, they will be joined and treated as one // item. -func FilterLastestSnapshots(list restic.Snapshots, limit int) restic.Snapshots { +func FilterLatestSnapshots(list restic.Snapshots, limit int) restic.Snapshots { // Sort the snapshots so that the newer ones are listed first sort.SliceStable(list, func(i, j int) bool { return list[i].Time.After(list[j].Time) @@ -307,7 +307,7 @@ type Snapshot struct { ShortID string `json:"short_id"` } -// SnapshotGroup helps to print SnaphotGroups as JSON with their GroupReasons included. +// SnapshotGroup helps to print SnapshotGroups as JSON with their GroupReasons included. type SnapshotGroup struct { GroupKey restic.SnapshotGroupKey `json:"group_key"` Snapshots []Snapshot `json:"snapshots"` diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 99e199a6773..0e3dea6d54e 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -34,12 +34,12 @@ func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun boo } func openWithReadLock(ctx context.Context, gopts GlobalOptions, noLock bool) (context.Context, *repository.Repository, func(), error) { - // TODO enfore read-only operations once the locking code has moved to the repository + // TODO enforce read-only operations once the locking code has moved to the repository return internalOpenWithLocked(ctx, gopts, noLock, false) } func openWithAppendLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { - // TODO enfore non-exclusive operations once the locking code has moved to the repository + // TODO enforce non-exclusive operations once the locking code has moved to the repository return internalOpenWithLocked(ctx, gopts, dryRun, false) } diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 5f3f3ff157d..87975f9fad2 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -873,5 +873,5 @@ with an empty password, use the following command. The ``init`` and ``copy`` command also support the option ``--from-insecure-no-password`` -which applies to the source repository. The ``key add`` and ``key passwd`` comands -include the ``--new-insecure-no-password`` option to add or set and emtpy password. +which applies to the source repository. The ``key add`` and ``key passwd`` commands +include the ``--new-insecure-no-password`` option to add or set and empty password. diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 9d6167895cd..6b96666935a 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -135,7 +135,7 @@ as separator. /home/user /home/user/work.txt -To show more details about the files in a snapshot, you can use the ``--long`` option. The colums include +To show more details about the files in a snapshot, you can use the ``--long`` option. The columns include file permissions, UID, GID, file size, modification time and file path. For scripting usage, the ``ls`` command supports the ``--json`` flag; the JSON output format is described at :ref:`ls json`. diff --git a/doc/077_troubleshooting.rst b/doc/077_troubleshooting.rst index f80df29b8c6..33302e9e0d7 100644 --- a/doc/077_troubleshooting.rst +++ b/doc/077_troubleshooting.rst @@ -71,7 +71,7 @@ some blobs in the repository, then please ask for help in the forum or our IRC channel. These errors are often caused by hardware problems which **must** be investigated and fixed. Otherwise, the backup will be damaged again and again. -Similarly, if a repository is repeatedly damaged, please open an `issue on Github +Similarly, if a repository is repeatedly damaged, please open an `issue on GitHub `_ as this could indicate a bug somewhere. Please include the check output and additional information that might help locate the problem. @@ -102,7 +102,7 @@ remove data unexpectedly. Please take the time to understand what the commands described in the following do. If you are unsure, then ask for help in the forum or our IRC channel. Search whether your issue is already known and solved. Please take a look at the -`forum`_ and `Github issues `_. +`forum`_ and `GitHub issues `_. 3. Repair the index diff --git a/internal/backend/cache/backend_test.go b/internal/backend/cache/backend_test.go index 0de90471e5c..7addc275d5a 100644 --- a/internal/backend/cache/backend_test.go +++ b/internal/backend/cache/backend_test.go @@ -135,7 +135,7 @@ func TestOutOfBoundsAccess(t *testing.T) { // load out of bounds err := wbe.Load(context.TODO(), h, 100, 100, func(rd io.Reader) error { - t.Error("cache returned non-existant file section") + t.Error("cache returned non-existent file section") return errors.New("broken") }) test.Assert(t, strings.Contains(err.Error(), " is too short"), "expected too short error, got %v", err) @@ -147,7 +147,7 @@ func TestOutOfBoundsAccess(t *testing.T) { // start within bounds, but request too large chunk err = wbe.Load(context.TODO(), h, 100, 0, func(rd io.Reader) error { - t.Error("cache returned non-existant file section") + t.Error("cache returned non-existent file section") return errors.New("broken") }) test.Assert(t, strings.Contains(err.Error(), " is too short"), "expected too short error, got %v", err) diff --git a/internal/bloblru/cache_test.go b/internal/bloblru/cache_test.go index b2becd2560a..d25daf764ff 100644 --- a/internal/bloblru/cache_test.go +++ b/internal/bloblru/cache_test.go @@ -79,14 +79,14 @@ func TestCacheGetOrCompute(t *testing.T) { return data1, nil }) rtest.OK(t, err) - rtest.Equals(t, &data1[0], &blob[0], "wrong buffer returend") + rtest.Equals(t, &data1[0], &blob[0], "wrong buffer returned") // now the buffer should be returned without calling the compute function blob, err = c.GetOrCompute(id1, func() ([]byte, error) { return nil, e }) rtest.OK(t, err) - rtest.Equals(t, &data1[0], &blob[0], "wrong buffer returend") + rtest.Equals(t, &data1[0], &blob[0], "wrong buffer returned") // check concurrency wg, _ := errgroup.WithContext(context.TODO()) diff --git a/internal/fs/file_unix.go b/internal/fs/file_unix.go index 65f10c844e3..b562d15b1db 100644 --- a/internal/fs/file_unix.go +++ b/internal/fs/file_unix.go @@ -29,7 +29,7 @@ func TempFile(dir, prefix string) (f *os.File, err error) { return f, nil } -// isNotSuported returns true if the error is caused by an unsupported file system feature. +// isNotSupported returns true if the error is caused by an unsupported file system feature. func isNotSupported(err error) bool { if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ENOTSUP { return true diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 48ab165f1d7..718dfc46d9f 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -72,7 +72,7 @@ type LocalVss struct { var _ FS = &LocalVss{} // parseMountPoints try to convert semicolon separated list of mount points -// to map of lowercased volume GUID pathes. Mountpoints already in volume +// to map of lowercased volume GUID paths. Mountpoints already in volume // GUID path format will be validated and normalized. func parseMountPoints(list string, msgError ErrorHandler) (volumes map[string]struct{}) { if list == "" { diff --git a/internal/repository/index/associated_data_test.go b/internal/repository/index/associated_data_test.go index 715e3391a1a..82dd9908d58 100644 --- a/internal/repository/index/associated_data_test.go +++ b/internal/repository/index/associated_data_test.go @@ -128,7 +128,7 @@ func TestAssociatedSetWithExtendedIndex(t *testing.T) { mi.StorePack(blob2.PackID, []restic.Blob{blob2.Blob}) test.OK(t, mi.SaveIndex(context.TODO(), &noopSaver{})) - // non-existant + // non-existent test.Equals(t, false, bs.Has(of)) // set bs.Set(of, 5) diff --git a/internal/repository/index/indexmap_test.go b/internal/repository/index/indexmap_test.go index e0db9cb404f..f34e6a1d37d 100644 --- a/internal/repository/index/indexmap_test.go +++ b/internal/repository/index/indexmap_test.go @@ -156,7 +156,7 @@ func TestIndexMapFirstIndex(t *testing.T) { for i := 1; i <= 400; i++ { r.Read(id[:]) - rtest.Equals(t, -1, m.firstIndex(id), "wrong firstIndex for nonexistant id") + rtest.Equals(t, -1, m.firstIndex(id), "wrong firstIndex for nonexistent id") m.add(id, 0, 0, 0, 0) idx := m.firstIndex(id) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index b8d3ba97bd0..d5fdbba0725 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -25,9 +25,9 @@ type PruneOptions struct { MaxUnusedBytes func(used uint64) (unused uint64) // calculates the number of unused bytes after repacking, according to MaxUnused MaxRepackBytes uint64 - RepackCachableOnly bool - RepackSmall bool - RepackUncompressed bool + RepackCacheableOnly bool + RepackSmall bool + RepackUncompressed bool } type PruneStats struct { @@ -381,7 +381,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo *Repository, stats.Blobs.Remove += p.unusedBlobs stats.Size.Remove += p.unusedSize - case opts.RepackCachableOnly && p.tpe == restic.DataBlob: + case opts.RepackCacheableOnly && p.tpe == restic.DataBlob: // if this is a data pack and --repack-cacheable-only is set => keep pack! stats.Packs.Keep++ diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go index 17dfafa4e26..02eefc46321 100644 --- a/internal/repository/prune_test.go +++ b/internal/repository/prune_test.go @@ -80,9 +80,9 @@ func TestPrune(t *testing.T) { { name: "cachableonly", opts: repository.PruneOptions{ - MaxRepackBytes: math.MaxUint64, - MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 20 }, - RepackCachableOnly: true, + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 20 }, + RepackCacheableOnly: true, }, }, { diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 05dca7cd948..2785e041230 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -228,7 +228,7 @@ func (node Node) restoreGenericAttributes(path string, warn func(msg string)) (e return errors.CombineErrors(errs...) } -// genericAttributesToWindowsAttrs converts the generic attributes map to a WindowsAttributes and also returns a string of unkown attributes that it could not convert. +// genericAttributesToWindowsAttrs converts the generic attributes map to a WindowsAttributes and also returns a string of unknown attributes that it could not convert. func genericAttributesToWindowsAttrs(attrs map[GenericAttributeType]json.RawMessage) (windowsAttributes WindowsAttributes, unknownAttribs []GenericAttributeType, err error) { waValue := reflect.ValueOf(&windowsAttributes).Elem() unknownAttribs, err = genericAttributesToOSAttrs(attrs, reflect.TypeOf(windowsAttributes), &waValue, "windows") diff --git a/internal/restic/node_windows_test.go b/internal/restic/node_windows_test.go index 29a42e9e246..4fd57bbb757 100644 --- a/internal/restic/node_windows_test.go +++ b/internal/restic/node_windows_test.go @@ -70,7 +70,7 @@ func getNode(name string, fileType string, genericAttributes map[GenericAttribut func getWindowsAttr(t *testing.T, testPath string, node *Node) WindowsAttributes { windowsAttributes, unknownAttribs, err := genericAttributesToWindowsAttrs(node.GenericAttributes) test.OK(t, errors.Wrapf(err, "Error getting windows attr from generic attr: %s", testPath)) - test.Assert(t, len(unknownAttribs) == 0, "Unkown attribs found: %s for: %s", unknownAttribs, testPath) + test.Assert(t, len(unknownAttribs) == 0, "Unknown attribs found: %s for: %s", unknownAttribs, testPath) return windowsAttributes } @@ -263,7 +263,7 @@ func TestNewGenericAttributeType(t *testing.T) { _, ua, err := genericAttributesToWindowsAttrs(node.GenericAttributes) test.OK(t, err) // Since this GenericAttribute is unknown to this version of the software, it will not get set on the file. - test.Assert(t, len(ua) == 0, "Unkown attributes: %s found for path: %s", ua, testPath) + test.Assert(t, len(ua) == 0, "Unknown attributes: %s found for path: %s", ua, testPath) } } diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 85132c8b4f8..a80714fa78a 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -44,7 +44,7 @@ type OverwriteBehavior int const ( OverwriteAlways OverwriteBehavior = iota // OverwriteIfChanged is like OverwriteAlways except that it skips restoring the content - // of files with matching size&mtime. Metatdata is always restored. + // of files with matching size&mtime. Metadata is always restored. OverwriteIfChanged OverwriteIfNewer OverwriteNever diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 3becf7c7a0b..d70f1f162e2 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -1162,7 +1162,7 @@ func TestRestoreIfChanged(t *testing.T) { rtest.Equals(t, origData, string(data), "expected original file content") } else { // restore should not have noticed the changed file content - rtest.Equals(t, modData, string(data), "expeced modified file content") + rtest.Equals(t, modData, string(data), "expected modified file content") } } } diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go index 9523440cfda..59048e9085d 100644 --- a/internal/restorer/restorer_unix_test.go +++ b/internal/restorer/restorer_unix_test.go @@ -18,7 +18,7 @@ import ( restoreui "github.com/restic/restic/internal/ui/restore" ) -func TestRestorerRestoreEmptyHardlinkedFileds(t *testing.T) { +func TestRestorerRestoreEmptyHardlinkedFields(t *testing.T) { repo := repository.TestRepository(t) sn, _ := saveSnapshot(t, repo, Snapshot{ diff --git a/internal/restorer/restorer_windows_test.go b/internal/restorer/restorer_windows_test.go index 90ece474d30..61d07506144 100644 --- a/internal/restorer/restorer_windows_test.go +++ b/internal/restorer/restorer_windows_test.go @@ -356,29 +356,29 @@ func verifyFileAttributes(t *testing.T, mainFilePath string, attr FileAttributes rtest.OK(t, err) //Test positive and negative scenarios if attr.ReadOnly { - rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY != 0, "Expected read only attibute.") + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY != 0, "Expected read only attribute.") } else { - rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY == 0, "Unexpected read only attibute.") + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY == 0, "Unexpected read only attribute.") } if attr.Hidden { - rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN != 0, "Expected hidden attibute.") + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN != 0, "Expected hidden attribute.") } else { - rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN == 0, "Unexpected hidden attibute.") + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN == 0, "Unexpected hidden attribute.") } if attr.System { - rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM != 0, "Expected system attibute.") + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM != 0, "Expected system attribute.") } else { - rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM == 0, "Unexpected system attibute.") + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM == 0, "Unexpected system attribute.") } if attr.Archive { - rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE != 0, "Expected archive attibute.") + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE != 0, "Expected archive attribute.") } else { - rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE == 0, "Unexpected archive attibute.") + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE == 0, "Unexpected archive attribute.") } if attr.Encrypted { - rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED != 0, "Expected encrypted attibute.") + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED != 0, "Expected encrypted attribute.") } else { - rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED == 0, "Unexpected encrypted attibute.") + rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED == 0, "Unexpected encrypted attribute.") } } From a44e4bdd40b1be8b117b0425c144ce48a2a8f932 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 3 Jul 2024 20:39:59 +0200 Subject: [PATCH 411/893] man: work around broken rendering of : help text --- cmd/restic/cmd_diff.go | 2 +- cmd/restic/cmd_dump.go | 2 +- cmd/restic/cmd_restore.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 28c7426254c..fd54897f14f 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -33,7 +33,7 @@ Metadata comparison will likely not work if a backup was created using the '--ignore-inode' or '--ignore-ctime' option. To only compare files in specific subfolders, you can use the -":" syntax, where "subfolder" is a path within the +"snapshotID:subfolder" syntax, where "subfolder" is a path within the snapshot. EXIT STATUS diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 39e915b4091..2a98a8ae8e9 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -28,7 +28,7 @@ The special snapshotID "latest" can be used to use the latest snapshot in the repository. To include the folder content at the root of the archive, you can use the -":" syntax, where "subfolder" is a path within the +"snapshotID:subfolder" syntax, where "subfolder" is a path within the snapshot. EXIT STATUS diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index f39db6a7619..467a7535abe 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -25,7 +25,7 @@ a directory. The special snapshotID "latest" can be used to restore the latest snapshot in the repository. -To only restore a specific subfolder, you can use the ":" +To only restore a specific subfolder, you can use the "snapshotID:subfolder" syntax, where "subfolder" is a path within the snapshot. EXIT STATUS From b1250eead9bb6f69e618993509ef1964bd3e10ce Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 10:55:44 +0200 Subject: [PATCH 412/893] check: mark s3legacy layout and legacy indexes are error --- cmd/restic/cmd_check.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index cccb1986e1e..1a27ed6ea91 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -264,7 +264,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args term.Print("Duplicate packs are non-critical, you can run `restic repair index' to correct this.\n") } if suggestLegacyIndexRebuild { - printer.E("Found indexes using the legacy format, you must run `restic repair index' to correct this.\n") + printer.E("error: Found indexes using the legacy format, you must run `restic repair index' to correct this.\n") } if mixedFound { term.Print("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n") @@ -288,7 +288,8 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args orphanedPacks++ printer.P("%v\n", err) } else if err == checker.ErrLegacyLayout { - printer.P("repository still uses the S3 legacy layout\nPlease run `restic migrate s3legacy` to correct this.\n") + errorsFound = true + printer.E("error: repository still uses the S3 legacy layout\nYou must run `restic migrate s3legacy` to correct this.\n") } else { errorsFound = true printer.E("%v\n", err) From 1671a3fe2e6a454bab916182ea40d0e61aa5f72c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 10:57:12 +0200 Subject: [PATCH 413/893] check: hide message about additional files if error in repo The message says "[...] addition files were found [...]. This is non-critical [...]". Unless users are highly experienced with restic, it's hard to correctly interpret what "This" refers to. Thus, just hide the message if there is a real problem. --- cmd/restic/cmd_check.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 1a27ed6ea91..2da877fade6 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -296,7 +296,8 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } } - if orphanedPacks > 0 { + if orphanedPacks > 0 && !errorsFound { + // hide notice if repository is damaged printer.P("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) } if ctx.Err() != nil { From da338d5aa85113716423c0e62ff6896cb1c4c9e0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 11:02:36 +0200 Subject: [PATCH 414/893] check: tweak wording of repair packs message By now, the message is also shown for truncated or otherwise damaged pack files, not just those with corrupted blobs. --- cmd/restic/cmd_check.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 2da877fade6..472cc742adf 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -367,13 +367,13 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args p.Done() if len(salvagePacks) > 0 { - printer.E("\nThe repository contains pack files with damaged blobs. These blobs must be removed to repair the repository. This can be done using the following commands. Please read the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html first.\n\n") + printer.E("\nThe repository contains damaged pack files. These damaged files must be removed to repair the repository. This can be done using the following commands. Please read the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html first.\n\n") var strIDs []string for _, id := range salvagePacks { strIDs = append(strIDs, id.String()) } printer.E("restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIDs, " ")) - printer.E("Corrupted blobs are either caused by hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n") + printer.E("Damaged pack files can be caused by backend problem, hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n") } } From c9a4a95848ec6d50dda861a2dad6c7329edc3686 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 11:06:49 +0200 Subject: [PATCH 415/893] check: suggest using `repair packs` to repair truncated pack files Previously, that help message was only shown for running `check --read-data`. --- cmd/restic/cmd_check.go | 40 ++++++++++++++++++++++--------------- internal/checker/checker.go | 16 +++++---------- 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 472cc742adf..89bb3086860 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -279,14 +279,24 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args orphanedPacks := 0 errChan := make(chan error) + salvagePacks := restic.NewIDSet() printer.P("check all packs\n") go chkr.Packs(ctx, errChan) for err := range errChan { - if checker.IsOrphanedPack(err) { - orphanedPacks++ - printer.P("%v\n", err) + var packErr *checker.PackError + if errors.As(err, &packErr) { + if packErr.Orphaned { + orphanedPacks++ + printer.P("%v\n", err) + } else { + if packErr.Truncated { + salvagePacks.Insert(packErr.ID) + } + errorsFound = true + printer.E("%v\n", err) + } } else if err == checker.ErrLegacyLayout { errorsFound = true printer.E("error: repository still uses the S3 legacy layout\nYou must run `restic migrate s3legacy` to correct this.\n") @@ -355,26 +365,14 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args go chkr.ReadPacks(ctx, packs, p, errChan) - var salvagePacks restic.IDs - for err := range errChan { errorsFound = true printer.E("%v\n", err) if err, ok := err.(*repository.ErrPackData); ok { - salvagePacks = append(salvagePacks, err.PackID) + salvagePacks.Insert(err.PackID) } } p.Done() - - if len(salvagePacks) > 0 { - printer.E("\nThe repository contains damaged pack files. These damaged files must be removed to repair the repository. This can be done using the following commands. Please read the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html first.\n\n") - var strIDs []string - for _, id := range salvagePacks { - strIDs = append(strIDs, id.String()) - } - printer.E("restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIDs, " ")) - printer.E("Damaged pack files can be caused by backend problem, hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n") - } } switch { @@ -418,6 +416,16 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args doReadData(packs) } + if len(salvagePacks) > 0 { + printer.E("\nThe repository contains damaged pack files. These damaged files must be removed to repair the repository. This can be done using the following commands. Please read the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html first.\n\n") + var strIDs []string + for id := range salvagePacks { + strIDs = append(strIDs, id.String()) + } + printer.E("restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIDs, " ")) + printer.E("Damaged pack files can be caused by backend problems, hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n") + } + if ctx.Err() != nil { return ctx.Err() } diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 82348c7ea62..031e13807f0 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -183,22 +183,16 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e // PackError describes an error with a specific pack. type PackError struct { - ID restic.ID - Orphaned bool - Err error + ID restic.ID + Orphaned bool + Truncated bool + Err error } func (e *PackError) Error() string { return "pack " + e.ID.String() + ": " + e.Err.Error() } -// IsOrphanedPack returns true if the error describes a pack which is not -// contained in any index. -func IsOrphanedPack(err error) bool { - var e *PackError - return errors.As(err, &e) && e.Orphaned -} - func isS3Legacy(b backend.Backend) bool { be := backend.AsBackend[*s3.Backend](b) return be != nil && be.Layout.Name() == "s3legacy" @@ -250,7 +244,7 @@ func (c *Checker) Packs(ctx context.Context, errChan chan<- error) { select { case <-ctx.Done(): return - case errChan <- &PackError{ID: id, Err: errors.Errorf("unexpected file size: got %d, expected %d", reposize, size)}: + case errChan <- &PackError{ID: id, Truncated: true, Err: errors.Errorf("unexpected file size: got %d, expected %d", reposize, size)}: } } } From 6d9dfff1cbe18846cb079f4c76b20910af907f73 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 11:08:52 +0200 Subject: [PATCH 416/893] check: point users towards the troubleshooting guide if repo has errors --- cmd/restic/cmd_check.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 89bb3086860..ffb97ccdb68 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -431,6 +431,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } if errorsFound { + if len(salvagePacks) == 0 { + printer.E("\nThe repository is damaged and must be repaired. Please follow the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html .\n\n") + } return errors.Fatal("repository contains errors") } printer.P("no errors were found\n") From 879ba07a87d1819468e38f41d69dfa9544e30ffa Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 11:12:08 +0200 Subject: [PATCH 417/893] check: only show additional files if verbose output is enabled Additional files are nearly always caused by interrupted backup runs. This is unproblematic, thus don't pollute the check output with it. --- cmd/restic/cmd_check.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index ffb97ccdb68..4cc9c666e0a 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -289,7 +289,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args if errors.As(err, &packErr) { if packErr.Orphaned { orphanedPacks++ - printer.P("%v\n", err) + printer.V("%v\n", err) } else { if packErr.Truncated { salvagePacks.Insert(packErr.ID) From a03e00373ce9a701bf9a65c6a8a3053654ca8cf6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 11:18:53 +0200 Subject: [PATCH 418/893] update repair packs changelog --- changelog/unreleased/issue-828 | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/changelog/unreleased/issue-828 b/changelog/unreleased/issue-828 index 6e13542258c..2e8bcd0b023 100644 --- a/changelog/unreleased/issue-828 +++ b/changelog/unreleased/issue-828 @@ -1,11 +1,12 @@ Enhancement: Improve `repair packs` command The `repair packs` command has been improved to also be able to process -truncated pack files. The `check --read-data` command will provide instructions -on using the command if necessary to repair a repository. See the guide at -https://restic.readthedocs.io/en/stable/077_troubleshooting.html for further -instructions. +truncated pack files. The `check` and `check --read-data` command will provide +instructions on using the command if necessary to repair a repository. See the +guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html for +further instructions. https://github.com/restic/restic/issues/828 https://github.com/restic/restic/pull/4644 https://github.com/restic/restic/pull/4655 +https://github.com/restic/restic/pull/4882 From fdc7349aa4cad31587f84ea99dbe4000b88d13cb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 11:42:23 +0200 Subject: [PATCH 419/893] check: improve error on damaged index Always return the `repository contains errors` message if a repository is damaged and must be repaired. Also provide specific instructions how to repair the index. --- cmd/restic/cmd_check.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 4cc9c666e0a..f416c9269ed 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -274,7 +274,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args for _, err := range errs { printer.E("error: %v\n", err) } - return errors.Fatal("LoadIndex returned errors") + + printer.E("\nThe repository index is damaged and must be repaired. You must run `restic repair index' to correct this.\n\n") + return errors.Fatal("repository contains errors") } orphanedPacks := 0 From 283038056e1c1769abbed77c1a48891a56538526 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 11:45:29 +0200 Subject: [PATCH 420/893] doc: suggest to follow troubleshooting steps if check reports error --- doc/045_working_with_repos.rst | 18 +++++++++++++++--- doc/077_troubleshooting.rst | 2 ++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 6b96666935a..8dba8439f1d 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -368,10 +368,22 @@ detect this and yield the same error as when you tried to restore: $ restic -r /srv/restic-repo check ... load indexes - error: error loading index de30f323: load : invalid data returned - Fatal: LoadIndex returned errors + error: error loading index de30f3231ca2e6a59af4aa84216dfe2ef7339c549dc11b09b84000997b139628: LoadRaw(): invalid data returned -If the repository structure is intact, restic will show that no errors were found: + The repository index is damaged and must be repaired. You must run `restic repair index' to correct this. + + Fatal: repository contains errors + +.. warning:: + + If ``check`` reports an error in the repository, then you must repair the repository. + As long as a repository is damaged, restoring some files or directories will fail. New + snapshots are not guaranteed to be restorable either. + + For instructions how to repair a damaged repository, see the :ref:`troubleshooting` + section or follow the instructions provided by the ``check`` command. + +If the repository structure is intact, restic will show that ``no errors were found``: .. code-block:: console diff --git a/doc/077_troubleshooting.rst b/doc/077_troubleshooting.rst index 33302e9e0d7..36c9d63ecc4 100644 --- a/doc/077_troubleshooting.rst +++ b/doc/077_troubleshooting.rst @@ -10,6 +10,8 @@ ^ for subsubsections " for paragraphs +.. _troubleshooting: + ######################### Troubleshooting ######################### From c47bf338841d6d3f296d574f7a9c103866042182 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 20:38:51 +0200 Subject: [PATCH 421/893] restore: print processed files at maximum verbosity --- cmd/restic/cmd_restore.go | 4 +- internal/restorer/filerestorer.go | 4 +- internal/restorer/restorer.go | 19 +++--- internal/restorer/restorer_unix_test.go | 2 + internal/ui/restore/json.go | 43 ++++++++++++- internal/ui/restore/json_test.go | 38 ++++++++--- internal/ui/restore/progress.go | 25 +++++++- internal/ui/restore/progress_test.go | 85 ++++++++++++++++++------- internal/ui/restore/text.go | 34 +++++++++- internal/ui/restore/text_test.go | 38 ++++++++--- 10 files changed, 228 insertions(+), 64 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 467a7535abe..b3f55d9596c 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -133,9 +133,9 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, msg := ui.NewMessage(term, gopts.verbosity) var printer restoreui.ProgressPrinter if gopts.JSON { - printer = restoreui.NewJSONProgress(term) + printer = restoreui.NewJSONProgress(term, gopts.verbosity) } else { - printer = restoreui.NewTextProgress(term) + printer = restoreui.NewTextProgress(term, gopts.verbosity) } progress := restoreui.NewProgress(printer, calculateProgressInterval(!gopts.Quiet, gopts.JSON)) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index f57d5859818..4f9ed930e56 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -215,7 +215,7 @@ func (r *fileRestorer) restoreEmptyFileAt(location string) error { return err } - r.progress.AddProgress(location, 0, 0) + r.progress.AddProgress(location, false, true, 0, 0) return nil } @@ -337,7 +337,7 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID, createSize = file.size } writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse) - r.progress.AddProgress(file.location, uint64(len(blobData)), uint64(file.size)) + r.progress.AddProgress(file.location, false, file.state == nil, uint64(len(blobData)), uint64(file.size)) return writeErr } err := r.sanitizeError(file, writeToFile()) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index a80714fa78a..07356433b3f 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -231,7 +231,7 @@ func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, targe return err } - res.opts.Progress.AddProgress(location, 0, 0) + res.opts.Progress.AddProgress(location, false, true, 0, 0) return res.restoreNodeMetadataTo(node, target, location) } @@ -253,8 +253,7 @@ func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location return errors.WithStack(err) } - res.opts.Progress.AddProgress(location, 0, 0) - + res.opts.Progress.AddProgress(location, false, true, 0, 0) // TODO investigate if hardlinks have separate metadata on any supported system return res.restoreNodeMetadataTo(node, path, location) } @@ -324,9 +323,9 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { idx.Add(node.Inode, node.DeviceID, location) } - buf, err = res.withOverwriteCheck(node, target, false, buf, func(updateMetadataOnly bool, matches *fileState) error { + buf, err = res.withOverwriteCheck(node, target, location, false, buf, func(updateMetadataOnly bool, matches *fileState) error { if updateMetadataOnly { - res.opts.Progress.AddSkippedFile(node.Size) + res.opts.Progress.AddSkippedFile(location, node.Size) } else { res.opts.Progress.AddFile(node.Size) filerestorer.addFile(location, node.Content, int64(node.Size), matches) @@ -353,14 +352,14 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { visitNode: func(node *restic.Node, target, location string) error { debug.Log("second pass, visitNode: restore node %q", location) if node.Type != "file" { - _, err := res.withOverwriteCheck(node, target, false, nil, func(_ bool, _ *fileState) error { + _, err := res.withOverwriteCheck(node, target, location, false, nil, func(_ bool, _ *fileState) error { return res.restoreNodeTo(ctx, node, target, location) }) return err } if idx.Has(node.Inode, node.DeviceID) && idx.Value(node.Inode, node.DeviceID) != location { - _, err := res.withOverwriteCheck(node, target, true, nil, func(_ bool, _ *fileState) error { + _, err := res.withOverwriteCheck(node, target, location, true, nil, func(_ bool, _ *fileState) error { return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.Value(node.Inode, node.DeviceID)), target, location) }) return err @@ -375,7 +374,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { leaveDir: func(node *restic.Node, target, location string) error { err := res.restoreNodeMetadataTo(node, target, location) if err == nil { - res.opts.Progress.AddProgress(location, 0, 0) + res.opts.Progress.AddProgress(location, false, true, 0, 0) } return err }, @@ -392,7 +391,7 @@ func (res *Restorer) hasRestoredFile(location string) (metadataOnly bool, ok boo return metadataOnly, ok } -func (res *Restorer) withOverwriteCheck(node *restic.Node, target string, isHardlink bool, buf []byte, cb func(updateMetadataOnly bool, matches *fileState) error) ([]byte, error) { +func (res *Restorer) withOverwriteCheck(node *restic.Node, target, location string, isHardlink bool, buf []byte, cb func(updateMetadataOnly bool, matches *fileState) error) ([]byte, error) { overwrite, err := shouldOverwrite(res.opts.Overwrite, node, target) if err != nil { return buf, err @@ -401,7 +400,7 @@ func (res *Restorer) withOverwriteCheck(node *restic.Node, target string, isHard if isHardlink { size = 0 } - res.opts.Progress.AddSkippedFile(size) + res.opts.Progress.AddSkippedFile(location, size) return buf, nil } diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go index 59048e9085d..034935c24b3 100644 --- a/internal/restorer/restorer_unix_test.go +++ b/internal/restorer/restorer_unix_test.go @@ -76,6 +76,8 @@ type printerMock struct { func (p *printerMock) Update(_ restoreui.State, _ time.Duration) { } +func (p *printerMock) CompleteItem(action restoreui.ItemAction, item string, size uint64) { +} func (p *printerMock) Finish(s restoreui.State, _ time.Duration) { p.s = s } diff --git a/internal/ui/restore/json.go b/internal/ui/restore/json.go index 512640a7a95..ebc21717671 100644 --- a/internal/ui/restore/json.go +++ b/internal/ui/restore/json.go @@ -7,12 +7,14 @@ import ( ) type jsonPrinter struct { - terminal term + terminal term + verbosity uint } -func NewJSONProgress(terminal term) ProgressPrinter { +func NewJSONProgress(terminal term, verbosity uint) ProgressPrinter { return &jsonPrinter{ - terminal: terminal, + terminal: terminal, + verbosity: verbosity, } } @@ -39,6 +41,34 @@ func (t *jsonPrinter) Update(p State, duration time.Duration) { t.print(status) } +func (t *jsonPrinter) CompleteItem(messageType ItemAction, item string, size uint64) { + if t.verbosity < 3 { + return + } + + var action string + switch messageType { + case ActionDirRestored: + action = "restored" + case ActionFileRestored: + action = "restored" + case ActionFileUpdated: + action = "updated" + case ActionFileUnchanged: + action = "unchanged" + default: + panic("unknown message type") + } + + status := verboseUpdate{ + MessageType: "verbose_status", + Action: action, + Item: item, + Size: size, + } + t.print(status) +} + func (t *jsonPrinter) Finish(p State, duration time.Duration) { status := summaryOutput{ MessageType: "summary", @@ -65,6 +95,13 @@ type statusUpdate struct { BytesSkipped uint64 `json:"bytes_skipped,omitempty"` } +type verboseUpdate struct { + MessageType string `json:"message_type"` // "verbose_status" + Action string `json:"action"` + Item string `json:"item"` + Size uint64 `json:"size"` +} + type summaryOutput struct { MessageType string `json:"message_type"` // "summary" SecondsElapsed uint64 `json:"seconds_elapsed,omitempty"` diff --git a/internal/ui/restore/json_test.go b/internal/ui/restore/json_test.go index 37983f7d7eb..1a749b9334d 100644 --- a/internal/ui/restore/json_test.go +++ b/internal/ui/restore/json_test.go @@ -7,37 +7,55 @@ import ( "github.com/restic/restic/internal/test" ) -func TestJSONPrintUpdate(t *testing.T) { +func createJSONProgress() (*mockTerm, ProgressPrinter) { term := &mockTerm{} - printer := NewJSONProgress(term) + printer := NewJSONProgress(term, 3) + return term, printer +} + +func TestJSONPrintUpdate(t *testing.T) { + term, printer := createJSONProgress() printer.Update(State{3, 11, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.output) } func TestJSONPrintUpdateWithSkipped(t *testing.T) { - term := &mockTerm{} - printer := NewJSONProgress(term) + term, printer := createJSONProgress() printer.Update(State{3, 11, 2, 29, 47, 59}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":29,\"bytes_skipped\":59}\n"}, term.output) } func TestJSONPrintSummaryOnSuccess(t *testing.T) { - term := &mockTerm{} - printer := NewJSONProgress(term) + term, printer := createJSONProgress() printer.Finish(State{11, 11, 0, 47, 47, 0}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"total_bytes\":47,\"bytes_restored\":47}\n"}, term.output) } func TestJSONPrintSummaryOnErrors(t *testing.T) { - term := &mockTerm{} - printer := NewJSONProgress(term) + term, printer := createJSONProgress() printer.Finish(State{3, 11, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.output) } func TestJSONPrintSummaryOnSuccessWithSkipped(t *testing.T) { - term := &mockTerm{} - printer := NewJSONProgress(term) + term, printer := createJSONProgress() printer.Finish(State{11, 11, 2, 47, 47, 59}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":47,\"bytes_skipped\":59}\n"}, term.output) } + +func TestJSONPrintCompleteItem(t *testing.T) { + for _, data := range []struct { + action ItemAction + size uint64 + expected string + }{ + {ActionDirRestored, 0, "{\"message_type\":\"verbose_status\",\"action\":\"restored\",\"item\":\"test\",\"size\":0}\n"}, + {ActionFileRestored, 123, "{\"message_type\":\"verbose_status\",\"action\":\"restored\",\"item\":\"test\",\"size\":123}\n"}, + {ActionFileUpdated, 123, "{\"message_type\":\"verbose_status\",\"action\":\"updated\",\"item\":\"test\",\"size\":123}\n"}, + {ActionFileUnchanged, 123, "{\"message_type\":\"verbose_status\",\"action\":\"unchanged\",\"item\":\"test\",\"size\":123}\n"}, + } { + term, printer := createJSONProgress() + printer.CompleteItem(data.action, "test", data.size) + test.Equals(t, []string{data.expected}, term.output) + } +} diff --git a/internal/ui/restore/progress.go b/internal/ui/restore/progress.go index 7e8bcfd256e..7b12deb8d6e 100644 --- a/internal/ui/restore/progress.go +++ b/internal/ui/restore/progress.go @@ -39,9 +39,20 @@ type term interface { type ProgressPrinter interface { Update(progress State, duration time.Duration) + CompleteItem(action ItemAction, item string, size uint64) Finish(progress State, duration time.Duration) } +type ItemAction string + +// Constants for the different CompleteItem actions. +const ( + ActionDirRestored ItemAction = "dir restored" + ActionFileRestored ItemAction = "file restored" + ActionFileUpdated ItemAction = "file updated" + ActionFileUnchanged ItemAction = "file unchanged" +) + func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress { p := &Progress{ progressInfoMap: make(map[string]progressInfoEntry), @@ -77,7 +88,7 @@ func (p *Progress) AddFile(size uint64) { } // AddProgress accumulates the number of bytes written for a file -func (p *Progress) AddProgress(name string, bytesWrittenPortion uint64, bytesTotal uint64) { +func (p *Progress) AddProgress(name string, isDir bool, isNew bool, bytesWrittenPortion uint64, bytesTotal uint64) { if p == nil { return } @@ -96,10 +107,18 @@ func (p *Progress) AddProgress(name string, bytesWrittenPortion uint64, bytesTot if entry.bytesWritten == entry.bytesTotal { delete(p.progressInfoMap, name) p.s.FilesFinished++ + + action := ActionFileUpdated + if isDir { + action = ActionDirRestored + } else if isNew { + action = ActionFileRestored + } + p.printer.CompleteItem(action, name, bytesTotal) } } -func (p *Progress) AddSkippedFile(size uint64) { +func (p *Progress) AddSkippedFile(name string, size uint64) { if p == nil { return } @@ -109,6 +128,8 @@ func (p *Progress) AddSkippedFile(size uint64) { p.s.FilesSkipped++ p.s.AllBytesSkipped += size + + p.printer.CompleteItem(ActionFileUnchanged, name, size) } func (p *Progress) Finish() { diff --git a/internal/ui/restore/progress_test.go b/internal/ui/restore/progress_test.go index 56f5f62ce64..9d795a31301 100644 --- a/internal/ui/restore/progress_test.go +++ b/internal/ui/restore/progress_test.go @@ -16,8 +16,16 @@ type printerTraceEntry struct { type printerTrace []printerTraceEntry +type itemTraceEntry struct { + action ItemAction + item string + size uint64 +} + +type itemTrace []itemTraceEntry type mockPrinter struct { trace printerTrace + items itemTrace } const mockFinishDuration = 42 * time.Second @@ -25,95 +33,109 @@ const mockFinishDuration = 42 * time.Second func (p *mockPrinter) Update(progress State, duration time.Duration) { p.trace = append(p.trace, printerTraceEntry{progress, duration, false}) } +func (p *mockPrinter) CompleteItem(action ItemAction, item string, size uint64) { + p.items = append(p.items, itemTraceEntry{action, item, size}) +} func (p *mockPrinter) Finish(progress State, _ time.Duration) { p.trace = append(p.trace, printerTraceEntry{progress, mockFinishDuration, true}) } -func testProgress(fn func(progress *Progress) bool) printerTrace { +func testProgress(fn func(progress *Progress) bool) (printerTrace, itemTrace) { printer := &mockPrinter{} progress := NewProgress(printer, 0) final := fn(progress) progress.update(0, final) trace := append(printerTrace{}, printer.trace...) + items := append(itemTrace{}, printer.items...) // cleanup to avoid goroutine leak, but copy trace first progress.Finish() - return trace + return trace, items } func TestNew(t *testing.T) { - result := testProgress(func(progress *Progress) bool { + result, items := testProgress(func(progress *Progress) bool { return false }) test.Equals(t, printerTrace{ printerTraceEntry{State{0, 0, 0, 0, 0, 0}, 0, false}, }, result) + test.Equals(t, itemTrace{}, items) } func TestAddFile(t *testing.T) { fileSize := uint64(100) - result := testProgress(func(progress *Progress) bool { + result, items := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) return false }) test.Equals(t, printerTrace{ printerTraceEntry{State{0, 1, 0, 0, fileSize, 0}, 0, false}, }, result) + test.Equals(t, itemTrace{}, items) } func TestFirstProgressOnAFile(t *testing.T) { expectedBytesWritten := uint64(5) expectedBytesTotal := uint64(100) - result := testProgress(func(progress *Progress) bool { + result, items := testProgress(func(progress *Progress) bool { progress.AddFile(expectedBytesTotal) - progress.AddProgress("test", expectedBytesWritten, expectedBytesTotal) + progress.AddProgress("test", false, false, expectedBytesWritten, expectedBytesTotal) return false }) test.Equals(t, printerTrace{ printerTraceEntry{State{0, 1, 0, expectedBytesWritten, expectedBytesTotal, 0}, 0, false}, }, result) + test.Equals(t, itemTrace{}, items) } func TestLastProgressOnAFile(t *testing.T) { fileSize := uint64(100) - result := testProgress(func(progress *Progress) bool { + result, items := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) - progress.AddProgress("test", 30, fileSize) - progress.AddProgress("test", 35, fileSize) - progress.AddProgress("test", 35, fileSize) + progress.AddProgress("test", false, false, 30, fileSize) + progress.AddProgress("test", false, false, 35, fileSize) + progress.AddProgress("test", false, false, 35, fileSize) return false }) test.Equals(t, printerTrace{ printerTraceEntry{State{1, 1, 0, fileSize, fileSize, 0}, 0, false}, }, result) + test.Equals(t, itemTrace{ + itemTraceEntry{action: ActionFileUpdated, item: "test", size: fileSize}, + }, items) } func TestLastProgressOnLastFile(t *testing.T) { fileSize := uint64(100) - result := testProgress(func(progress *Progress) bool { + result, items := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(50) - progress.AddProgress("test1", 50, 50) - progress.AddProgress("test2", 50, fileSize) - progress.AddProgress("test2", 50, fileSize) + progress.AddProgress("test1", false, false, 50, 50) + progress.AddProgress("test2", false, false, 50, fileSize) + progress.AddProgress("test2", false, false, 50, fileSize) return false }) test.Equals(t, printerTrace{ printerTraceEntry{State{2, 2, 0, 50 + fileSize, 50 + fileSize, 0}, 0, false}, }, result) + test.Equals(t, itemTrace{ + itemTraceEntry{action: ActionFileUpdated, item: "test1", size: 50}, + itemTraceEntry{action: ActionFileUpdated, item: "test2", size: fileSize}, + }, items) } func TestSummaryOnSuccess(t *testing.T) { fileSize := uint64(100) - result := testProgress(func(progress *Progress) bool { + result, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(50) - progress.AddProgress("test1", 50, 50) - progress.AddProgress("test2", fileSize, fileSize) + progress.AddProgress("test1", false, false, 50, 50) + progress.AddProgress("test2", false, false, fileSize, fileSize) return true }) test.Equals(t, printerTrace{ @@ -124,11 +146,11 @@ func TestSummaryOnSuccess(t *testing.T) { func TestSummaryOnErrors(t *testing.T) { fileSize := uint64(100) - result := testProgress(func(progress *Progress) bool { + result, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(50) - progress.AddProgress("test1", 50, 50) - progress.AddProgress("test2", fileSize/2, fileSize) + progress.AddProgress("test1", false, false, 50, 50) + progress.AddProgress("test2", false, false, fileSize/2, fileSize) return true }) test.Equals(t, printerTrace{ @@ -139,11 +161,30 @@ func TestSummaryOnErrors(t *testing.T) { func TestSkipFile(t *testing.T) { fileSize := uint64(100) - result := testProgress(func(progress *Progress) bool { - progress.AddSkippedFile(fileSize) + result, items := testProgress(func(progress *Progress) bool { + progress.AddSkippedFile("test", fileSize) return true }) test.Equals(t, printerTrace{ printerTraceEntry{State{0, 0, 1, 0, 0, fileSize}, mockFinishDuration, true}, }, result) + test.Equals(t, itemTrace{ + itemTraceEntry{ActionFileUnchanged, "test", fileSize}, + }, items) +} + +func TestProgressTypes(t *testing.T) { + fileSize := uint64(100) + + _, items := testProgress(func(progress *Progress) bool { + progress.AddFile(fileSize) + progress.AddFile(0) + progress.AddProgress("dir", true, false, fileSize, fileSize) + progress.AddProgress("new", false, true, 0, 0) + return true + }) + test.Equals(t, itemTrace{ + itemTraceEntry{ActionDirRestored, "dir", fileSize}, + itemTraceEntry{ActionFileRestored, "new", 0}, + }, items) } diff --git a/internal/ui/restore/text.go b/internal/ui/restore/text.go index 28a6eb96560..77c2f2d158e 100644 --- a/internal/ui/restore/text.go +++ b/internal/ui/restore/text.go @@ -8,12 +8,14 @@ import ( ) type textPrinter struct { - terminal term + terminal term + verbosity uint } -func NewTextProgress(terminal term) ProgressPrinter { +func NewTextProgress(terminal term, verbosity uint) ProgressPrinter { return &textPrinter{ - terminal: terminal, + terminal: terminal, + verbosity: verbosity, } } @@ -31,6 +33,32 @@ func (t *textPrinter) Update(p State, duration time.Duration) { t.terminal.SetStatus([]string{progress}) } +func (t *textPrinter) CompleteItem(messageType ItemAction, item string, size uint64) { + if t.verbosity < 3 { + return + } + + var action string + switch messageType { + case ActionDirRestored: + action = "restored" + case ActionFileRestored: + action = "restored" + case ActionFileUpdated: + action = "updated" + case ActionFileUnchanged: + action = "unchanged" + default: + panic("unknown message type") + } + + if messageType == ActionDirRestored { + t.terminal.Print(fmt.Sprintf("restored %v", item)) + } else { + t.terminal.Print(fmt.Sprintf("%-9v %v with size %v", action, item, ui.FormatBytes(size))) + } +} + func (t *textPrinter) Finish(p State, duration time.Duration) { t.terminal.SetStatus([]string{}) diff --git a/internal/ui/restore/text_test.go b/internal/ui/restore/text_test.go index 3b776a7dff4..c7d173422ca 100644 --- a/internal/ui/restore/text_test.go +++ b/internal/ui/restore/text_test.go @@ -19,37 +19,55 @@ func (m *mockTerm) SetStatus(lines []string) { m.output = append([]string{}, lines...) } -func TestPrintUpdate(t *testing.T) { +func createTextProgress() (*mockTerm, ProgressPrinter) { term := &mockTerm{} - printer := NewTextProgress(term) + printer := NewTextProgress(term, 3) + return term, printer +} + +func TestPrintUpdate(t *testing.T) { + term, printer := createTextProgress() printer.Update(State{3, 11, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B"}, term.output) } func TestPrintUpdateWithSkipped(t *testing.T) { - term := &mockTerm{} - printer := NewTextProgress(term) + term, printer := createTextProgress() printer.Update(State{3, 11, 2, 29, 47, 59}, 5*time.Second) test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B, skipped 2 files/dirs 59 B"}, term.output) } func TestPrintSummaryOnSuccess(t *testing.T) { - term := &mockTerm{} - printer := NewTextProgress(term) + term, printer := createTextProgress() printer.Finish(State{11, 11, 0, 47, 47, 0}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05"}, term.output) } func TestPrintSummaryOnErrors(t *testing.T) { - term := &mockTerm{} - printer := NewTextProgress(term) + term, printer := createTextProgress() printer.Finish(State{3, 11, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 3 / 11 files/dirs (29 B / 47 B) in 0:05"}, term.output) } func TestPrintSummaryOnSuccessWithSkipped(t *testing.T) { - term := &mockTerm{} - printer := NewTextProgress(term) + term, printer := createTextProgress() printer.Finish(State{11, 11, 2, 47, 47, 59}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05, skipped 2 files/dirs 59 B"}, term.output) } + +func TestPrintCompleteItem(t *testing.T) { + for _, data := range []struct { + action ItemAction + size uint64 + expected string + }{ + {ActionDirRestored, 0, "restored test"}, + {ActionFileRestored, 123, "restored test with size 123 B"}, + {ActionFileUpdated, 123, "updated test with size 123 B"}, + {ActionFileUnchanged, 123, "unchanged test with size 123 B"}, + } { + term, printer := createTextProgress() + printer.CompleteItem(data.action, "test", data.size) + test.Equals(t, []string{data.expected}, term.output) + } +} From 83351f42e39666d67eb3a95c949143d1faf02a00 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 20:57:28 +0200 Subject: [PATCH 422/893] restore: add dry-run support --- cmd/restic/cmd_restore.go | 6 +++ internal/restorer/restorer.go | 55 +++++++++++++++++-------- internal/restorer/restorer_test.go | 30 ++++++++++++++ internal/restorer/restorer_unix_test.go | 10 ++++- 4 files changed, 82 insertions(+), 19 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index b3f55d9596c..aea6457bd5c 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -47,6 +47,7 @@ type RestoreOptions struct { includePatternOptions Target string restic.SnapshotFilter + DryRun bool Sparse bool Verify bool Overwrite restorer.OverwriteBehavior @@ -64,6 +65,7 @@ func init() { initIncludePatternOptions(flags, &restoreOptions.includePatternOptions) initSingleSnapshotFilter(flags, &restoreOptions.SnapshotFilter) + flags.BoolVar(&restoreOptions.DryRun, "dry-run", false, "do not write any data, just show what would be done") flags.BoolVar(&restoreOptions.Sparse, "sparse", false, "restore files as sparse") flags.BoolVar(&restoreOptions.Verify, "verify", false, "verify restored files content") flags.Var(&restoreOptions.Overwrite, "overwrite", "overwrite behavior, one of (always|if-changed|if-newer|never) (default: always)") @@ -99,6 +101,9 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, if hasExcludes && hasIncludes { return errors.Fatal("exclude and include patterns are mutually exclusive") } + if opts.DryRun && opts.Verify { + return errors.Fatal("--dry-run and --verify are mutually exclusive") + } snapshotIDString := args[0] @@ -140,6 +145,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, progress := restoreui.NewProgress(printer, calculateProgressInterval(!gopts.Quiet, gopts.JSON)) res := restorer.NewRestorer(repo, sn, restorer.Options{ + DryRun: opts.DryRun, Sparse: opts.Sparse, Progress: progress, Overwrite: opts.Overwrite, diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 07356433b3f..4ecd762b4f4 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -33,6 +33,7 @@ type Restorer struct { var restorerAbortOnAllErrors = func(_ string, err error) error { return err } type Options struct { + DryRun bool Sparse bool Progress *restoreui.Progress Overwrite OverwriteBehavior @@ -220,15 +221,17 @@ func (res *Restorer) traverseTree(ctx context.Context, target, location string, } func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, target, location string) error { - debug.Log("restoreNode %v %v %v", node.Name, target, location) - if err := fs.Remove(target); err != nil && !errors.Is(err, os.ErrNotExist) { - return errors.Wrap(err, "RemoveNode") - } + if !res.opts.DryRun { + debug.Log("restoreNode %v %v %v", node.Name, target, location) + if err := fs.Remove(target); err != nil && !errors.Is(err, os.ErrNotExist) { + return errors.Wrap(err, "RemoveNode") + } - err := node.CreateAt(ctx, target, res.repo) - if err != nil { - debug.Log("node.CreateAt(%s) error %v", target, err) - return err + err := node.CreateAt(ctx, target, res.repo) + if err != nil { + debug.Log("node.CreateAt(%s) error %v", target, err) + return err + } } res.opts.Progress.AddProgress(location, false, true, 0, 0) @@ -236,6 +239,9 @@ func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, targe } func (res *Restorer) restoreNodeMetadataTo(node *restic.Node, target, location string) error { + if res.opts.DryRun { + return nil + } debug.Log("restoreNodeMetadata %v %v %v", node.Name, target, location) err := node.RestoreMetadata(target, res.Warn) if err != nil { @@ -245,12 +251,14 @@ func (res *Restorer) restoreNodeMetadataTo(node *restic.Node, target, location s } func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location string) error { - if err := fs.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) { - return errors.Wrap(err, "RemoveCreateHardlink") - } - err := fs.Link(target, path) - if err != nil { - return errors.WithStack(err) + if !res.opts.DryRun { + if err := fs.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) { + return errors.Wrap(err, "RemoveCreateHardlink") + } + err := fs.Link(target, path) + if err != nil { + return errors.WithStack(err) + } } res.opts.Progress.AddProgress(location, false, true, 0, 0) @@ -259,6 +267,10 @@ func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location } func (res *Restorer) ensureDir(target string) error { + if res.opts.DryRun { + return nil + } + fi, err := fs.Lstat(target) if err != nil && !errors.Is(err, os.ErrNotExist) { return fmt.Errorf("failed to check for directory: %w", err) @@ -328,7 +340,12 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { res.opts.Progress.AddSkippedFile(location, node.Size) } else { res.opts.Progress.AddFile(node.Size) - filerestorer.addFile(location, node.Content, int64(node.Size), matches) + if !res.opts.DryRun { + filerestorer.addFile(location, node.Content, int64(node.Size), matches) + } else { + // immediately mark as completed + res.opts.Progress.AddProgress(location, false, matches == nil, node.Size, node.Size) + } } res.trackFile(location, updateMetadataOnly) return nil @@ -340,9 +357,11 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { return err } - err = filerestorer.restoreFiles(ctx) - if err != nil { - return err + if !res.opts.DryRun { + err = filerestorer.restoreFiles(ctx) + if err != nil { + return err + } } debug.Log("second pass for %q", dst) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index d70f1f162e2..5eca779c675 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -15,6 +15,7 @@ import ( "time" "github.com/restic/restic/internal/archiver" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -1166,3 +1167,32 @@ func TestRestoreIfChanged(t *testing.T) { } } } + +func TestRestoreDryRun(t *testing.T) { + snapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n", Links: 2, Inode: 42}, + "foo2": File{Data: "content: foo\n", Links: 2, Inode: 42}, + "dirtest": Dir{ + Nodes: map[string]Node{ + "file": File{Data: "content: file\n"}, + }, + }, + "link": Symlink{Target: "foo"}, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sn, id := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) + t.Logf("snapshot saved as %v", id.Str()) + + res := NewRestorer(repo, sn, Options{DryRun: true}) + rtest.OK(t, res.RestoreTo(ctx, tempdir)) + + _, err := os.Stat(tempdir) + rtest.Assert(t, errors.Is(err, os.ErrNotExist), "expected no file to be created, got %v", err) +} diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go index 034935c24b3..2ad28a0f6df 100644 --- a/internal/restorer/restorer_unix_test.go +++ b/internal/restorer/restorer_unix_test.go @@ -83,6 +83,14 @@ func (p *printerMock) Finish(s restoreui.State, _ time.Duration) { } func TestRestorerProgressBar(t *testing.T) { + testRestorerProgressBar(t, false) +} + +func TestRestorerProgressBarDryRun(t *testing.T) { + testRestorerProgressBar(t, true) +} + +func testRestorerProgressBar(t *testing.T, dryRun bool) { repo := repository.TestRepository(t) sn, _ := saveSnapshot(t, repo, Snapshot{ @@ -99,7 +107,7 @@ func TestRestorerProgressBar(t *testing.T) { mock := &printerMock{} progress := restoreui.NewProgress(mock, 0) - res := NewRestorer(repo, sn, Options{Progress: progress}) + res := NewRestorer(repo, sn, Options{Progress: progress, DryRun: dryRun}) res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { return true, true } From 798256ec5218d334bfd8dddae5b70060e62b2f05 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 May 2024 21:12:13 +0200 Subject: [PATCH 423/893] restore: add dry-run docs --- changelog/unreleased/pull-4839 | 7 +++++++ doc/050_restore.rst | 22 ++++++++++++++++++++++ doc/075_scripting.rst | 16 ++++++++++++++++ 3 files changed, 45 insertions(+) create mode 100644 changelog/unreleased/pull-4839 diff --git a/changelog/unreleased/pull-4839 b/changelog/unreleased/pull-4839 new file mode 100644 index 00000000000..8d644407b14 --- /dev/null +++ b/changelog/unreleased/pull-4839 @@ -0,0 +1,7 @@ +Enhancement: Add dry-run support to `restore` command + +The `restore` command now supports the `--dry-run` option to perform +a dry run. Pass the `--verbose=2` option to see which files would +remain unchanged, which would be updated or freshly restored. + +https://github.com/restic/restic/pull/4839 diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 9c24f09de1f..57251f13502 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -111,6 +111,28 @@ values are supported: newer modification time (mtime). * ``--overwrite never``: never overwrite existing files. +Dry run +------- + +As restore operations can take a long time, it can be useful to perform a dry-run to +see what would be restored without having to run the full restore operation. The +restore command supports the ``--dry-run`` option and prints information about the +restored files when specifying ``--verbose=2``. + +.. code-block:: console + + $ restic restore --target /tmp/restore-work --dry-run --verbose=2 latest + + unchanged /restic/internal/walker/walker.go with size 2.812 KiB + updated /restic/internal/walker/walker_test.go with size 11.143 KiB + restored /restic/restic with size 35.318 MiB + restored /restic + [...] + Summary: Restored 9072 files/dirs (153.597 MiB) in 0:00 + +Files with already up to date content are reported as ``unchanged``. Files whose content +was modified are ``updated`` and files that are new are shown as ``restored``. Directories +and other file types like symlinks are always reported as ``restored``. Restore using mount =================== diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index d40f7c97608..b83fe5eb55f 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -511,6 +511,22 @@ Status |``bytes_skipped`` | Total size of skipped files | +----------------------+------------------------------------------------------------+ +Verbose Status +^^^^^^^^^^^^^^ + +Verbose status provides details about the progress, including details about restored files. +Only printed if `--verbose=2` is specified. + ++----------------------+-----------------------------------------------------------+ +| ``message_type`` | Always "verbose_status" | ++----------------------+-----------------------------------------------------------+ +| ``action`` | Either "restored", "updated" or "unchanged" | ++----------------------+-----------------------------------------------------------+ +| ``item`` | The item in question | ++----------------------+-----------------------------------------------------------+ +| ``size`` | Size of the item in bytes | ++----------------------+-----------------------------------------------------------+ + Summary ^^^^^^^ From ae978d60ccd8935e8aafb96310ba610ea19b03d4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 21:24:45 +0200 Subject: [PATCH 424/893] restore: pass action enum to restore progress --- internal/restorer/filerestorer.go | 8 ++++++-- internal/restorer/restorer.go | 12 ++++++++---- internal/ui/restore/progress.go | 8 +------- internal/ui/restore/progress_test.go | 26 +++++++++++++------------- 4 files changed, 28 insertions(+), 26 deletions(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index 4f9ed930e56..fd5b3c5dbf9 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -215,7 +215,7 @@ func (r *fileRestorer) restoreEmptyFileAt(location string) error { return err } - r.progress.AddProgress(location, false, true, 0, 0) + r.progress.AddProgress(location, restore.ActionFileRestored, 0, 0) return nil } @@ -337,7 +337,11 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID, createSize = file.size } writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse) - r.progress.AddProgress(file.location, false, file.state == nil, uint64(len(blobData)), uint64(file.size)) + action := restore.ActionFileUpdated + if file.state == nil { + action = restore.ActionFileRestored + } + r.progress.AddProgress(file.location, action, uint64(len(blobData)), uint64(file.size)) return writeErr } err := r.sanitizeError(file, writeToFile()) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 4ecd762b4f4..650ad07316d 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -234,7 +234,7 @@ func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, targe } } - res.opts.Progress.AddProgress(location, false, true, 0, 0) + res.opts.Progress.AddProgress(location, restoreui.ActionFileRestored, 0, 0) return res.restoreNodeMetadataTo(node, target, location) } @@ -261,7 +261,7 @@ func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location } } - res.opts.Progress.AddProgress(location, false, true, 0, 0) + res.opts.Progress.AddProgress(location, restoreui.ActionFileRestored, 0, 0) // TODO investigate if hardlinks have separate metadata on any supported system return res.restoreNodeMetadataTo(node, path, location) } @@ -343,8 +343,12 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { if !res.opts.DryRun { filerestorer.addFile(location, node.Content, int64(node.Size), matches) } else { + action := restoreui.ActionFileUpdated + if matches == nil { + action = restoreui.ActionFileRestored + } // immediately mark as completed - res.opts.Progress.AddProgress(location, false, matches == nil, node.Size, node.Size) + res.opts.Progress.AddProgress(location, action, node.Size, node.Size) } } res.trackFile(location, updateMetadataOnly) @@ -393,7 +397,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { leaveDir: func(node *restic.Node, target, location string) error { err := res.restoreNodeMetadataTo(node, target, location) if err == nil { - res.opts.Progress.AddProgress(location, false, true, 0, 0) + res.opts.Progress.AddProgress(location, restoreui.ActionDirRestored, 0, 0) } return err }, diff --git a/internal/ui/restore/progress.go b/internal/ui/restore/progress.go index 7b12deb8d6e..04274b7ea54 100644 --- a/internal/ui/restore/progress.go +++ b/internal/ui/restore/progress.go @@ -88,7 +88,7 @@ func (p *Progress) AddFile(size uint64) { } // AddProgress accumulates the number of bytes written for a file -func (p *Progress) AddProgress(name string, isDir bool, isNew bool, bytesWrittenPortion uint64, bytesTotal uint64) { +func (p *Progress) AddProgress(name string, action ItemAction, bytesWrittenPortion uint64, bytesTotal uint64) { if p == nil { return } @@ -108,12 +108,6 @@ func (p *Progress) AddProgress(name string, isDir bool, isNew bool, bytesWritten delete(p.progressInfoMap, name) p.s.FilesFinished++ - action := ActionFileUpdated - if isDir { - action = ActionDirRestored - } else if isNew { - action = ActionFileRestored - } p.printer.CompleteItem(action, name, bytesTotal) } } diff --git a/internal/ui/restore/progress_test.go b/internal/ui/restore/progress_test.go index 9d795a31301..eda1b05c0c8 100644 --- a/internal/ui/restore/progress_test.go +++ b/internal/ui/restore/progress_test.go @@ -81,7 +81,7 @@ func TestFirstProgressOnAFile(t *testing.T) { result, items := testProgress(func(progress *Progress) bool { progress.AddFile(expectedBytesTotal) - progress.AddProgress("test", false, false, expectedBytesWritten, expectedBytesTotal) + progress.AddProgress("test", ActionFileUpdated, expectedBytesWritten, expectedBytesTotal) return false }) test.Equals(t, printerTrace{ @@ -95,9 +95,9 @@ func TestLastProgressOnAFile(t *testing.T) { result, items := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) - progress.AddProgress("test", false, false, 30, fileSize) - progress.AddProgress("test", false, false, 35, fileSize) - progress.AddProgress("test", false, false, 35, fileSize) + progress.AddProgress("test", ActionFileUpdated, 30, fileSize) + progress.AddProgress("test", ActionFileUpdated, 35, fileSize) + progress.AddProgress("test", ActionFileUpdated, 35, fileSize) return false }) test.Equals(t, printerTrace{ @@ -114,9 +114,9 @@ func TestLastProgressOnLastFile(t *testing.T) { result, items := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(50) - progress.AddProgress("test1", false, false, 50, 50) - progress.AddProgress("test2", false, false, 50, fileSize) - progress.AddProgress("test2", false, false, 50, fileSize) + progress.AddProgress("test1", ActionFileUpdated, 50, 50) + progress.AddProgress("test2", ActionFileUpdated, 50, fileSize) + progress.AddProgress("test2", ActionFileUpdated, 50, fileSize) return false }) test.Equals(t, printerTrace{ @@ -134,8 +134,8 @@ func TestSummaryOnSuccess(t *testing.T) { result, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(50) - progress.AddProgress("test1", false, false, 50, 50) - progress.AddProgress("test2", false, false, fileSize, fileSize) + progress.AddProgress("test1", ActionFileUpdated, 50, 50) + progress.AddProgress("test2", ActionFileUpdated, fileSize, fileSize) return true }) test.Equals(t, printerTrace{ @@ -149,8 +149,8 @@ func TestSummaryOnErrors(t *testing.T) { result, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(50) - progress.AddProgress("test1", false, false, 50, 50) - progress.AddProgress("test2", false, false, fileSize/2, fileSize) + progress.AddProgress("test1", ActionFileUpdated, 50, 50) + progress.AddProgress("test2", ActionFileUpdated, fileSize/2, fileSize) return true }) test.Equals(t, printerTrace{ @@ -179,8 +179,8 @@ func TestProgressTypes(t *testing.T) { _, items := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(0) - progress.AddProgress("dir", true, false, fileSize, fileSize) - progress.AddProgress("new", false, true, 0, 0) + progress.AddProgress("dir", ActionDirRestored, fileSize, fileSize) + progress.AddProgress("new", ActionFileRestored, 0, 0) return true }) test.Equals(t, itemTrace{ From a90665028fa9ad0107911a6f330528eaba5549ea Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 5 Jul 2024 20:12:41 +0200 Subject: [PATCH 425/893] doc: fix syntax error --- doc/040_backup.rst | 2 +- doc/050_restore.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index a07413ad9c5..2aeb063d912 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -242,7 +242,7 @@ case, specify the ``--skip-if-unchanged`` option. Note that when using absolute paths to specify the backup target, then also changes to the parent folders result in a changed snapshot. For example, a backup of ``/home/user/work`` will create a new snapshot if the metadata of either - ``/``, ``/home`` or ``/home/user`` change. To avoid this problem run restic from +``/``, ``/home`` or ``/home/user`` change. To avoid this problem run restic from the corresponding folder and use relative paths. .. code-block:: console diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 57251f13502..497488241e4 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -69,7 +69,7 @@ There are case insensitive variants of ``--exclude`` and ``--include`` called ignore the casing of paths. There are also ``--include-file``, ``--exclude-file``, ``--iinclude-file`` and - ``--iexclude-file`` flags that read the include and exclude patterns from a file. +``--iexclude-file`` flags that read the include and exclude patterns from a file. Restoring symbolic links on windows is only possible when the user has ``SeCreateSymbolicLinkPrivilege`` privilege or is running as admin. This is a From 1369658a32a62053530c82df2deae3f733bcf7ce Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 17:15:29 +0200 Subject: [PATCH 426/893] archiver: extract Readdirnames to fs package --- internal/archiver/archiver.go | 25 ++----------------------- internal/archiver/scanner.go | 2 +- internal/archiver/tree.go | 2 +- internal/fs/file.go | 22 ++++++++++++++++++++++ 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 9a31911b972..19ad12ab8c2 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -304,7 +304,7 @@ func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi return FutureNode{}, err } - names, err := readdirnames(arch.FS, dir, fs.O_NOFOLLOW) + names, err := fs.Readdirnames(arch.FS, dir, fs.O_NOFOLLOW) if err != nil { return FutureNode{}, err } @@ -707,27 +707,6 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, return fn, len(nodes), nil } -// flags are passed to fs.OpenFile. O_RDONLY is implied. -func readdirnames(filesystem fs.FS, dir string, flags int) ([]string, error) { - f, err := filesystem.OpenFile(dir, fs.O_RDONLY|flags, 0) - if err != nil { - return nil, errors.WithStack(err) - } - - entries, err := f.Readdirnames(-1) - if err != nil { - _ = f.Close() - return nil, errors.Wrapf(err, "Readdirnames %v failed", dir) - } - - err = f.Close() - if err != nil { - return nil, err - } - - return entries, nil -} - // resolveRelativeTargets replaces targets that only contain relative // directories ("." or "../../") with the contents of the directory. Each // element of target is processed with fs.Clean(). @@ -743,7 +722,7 @@ func resolveRelativeTargets(filesys fs.FS, targets []string) ([]string, error) { } debug.Log("replacing %q with readdir(%q)", target, target) - entries, err := readdirnames(filesys, target, fs.O_NOFOLLOW) + entries, err := fs.Readdirnames(filesys, target, fs.O_NOFOLLOW) if err != nil { return nil, err } diff --git a/internal/archiver/scanner.go b/internal/archiver/scanner.go index cc419b19eb9..d61e5ce4785 100644 --- a/internal/archiver/scanner.go +++ b/internal/archiver/scanner.go @@ -124,7 +124,7 @@ func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (Sca stats.Files++ stats.Bytes += uint64(fi.Size()) case fi.Mode().IsDir(): - names, err := readdirnames(s.FS, target, fs.O_NOFOLLOW) + names, err := fs.Readdirnames(s.FS, target, fs.O_NOFOLLOW) if err != nil { return stats, s.Error(target, err) } diff --git a/internal/archiver/tree.go b/internal/archiver/tree.go index 16a78ee70ac..cd03ba52172 100644 --- a/internal/archiver/tree.go +++ b/internal/archiver/tree.go @@ -233,7 +233,7 @@ func unrollTree(f fs.FS, t *Tree) error { // nodes, add the contents of Path to the nodes. if t.Path != "" && len(t.Nodes) > 0 { debug.Log("resolve path %v", t.Path) - entries, err := readdirnames(f, t.Path, 0) + entries, err := fs.Readdirnames(f, t.Path, 0) if err != nil { return err } diff --git a/internal/fs/file.go b/internal/fs/file.go index 4a236ea090a..929195f1c97 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -1,6 +1,7 @@ package fs import ( + "fmt" "os" "path/filepath" "time" @@ -138,3 +139,24 @@ func ResetPermissions(path string) error { } return nil } + +// Readdirnames returns a list of file in a directory. Flags are passed to fs.OpenFile. O_RDONLY is implied. +func Readdirnames(filesystem FS, dir string, flags int) ([]string, error) { + f, err := filesystem.OpenFile(dir, O_RDONLY|flags, 0) + if err != nil { + return nil, fmt.Errorf("openfile for readdirnames failed: %w", err) + } + + entries, err := f.Readdirnames(-1) + if err != nil { + _ = f.Close() + return nil, fmt.Errorf("readdirnames %v failed: %w", dir, err) + } + + err = f.Close() + if err != nil { + return nil, err + } + + return entries, nil +} From a9a60f77ced8c8ecbca2d09795c7574c71715b13 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 17:24:47 +0200 Subject: [PATCH 427/893] restore: optimize memory usage --- internal/restorer/restorer.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 650ad07316d..3817414ebe7 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -120,7 +120,9 @@ func (res *Restorer) traverseTree(ctx context.Context, target, location string, return hasRestored, res.Error(location, err) } - for _, node := range tree.Nodes { + for i, node := range tree.Nodes { + // allow GC of tree node + tree.Nodes[i] = nil // ensure that the node name does not contain anything that refers to a // top-level directory. From d762f4ee64ff22889b97f361aa5010c11f9ec87e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 17:53:47 +0200 Subject: [PATCH 428/893] restore: simplfy selectFilter arguments --- cmd/restic/cmd_restore.go | 8 ++++---- internal/restorer/restorer.go | 6 +++--- internal/restorer/restorer_test.go | 22 +++++++++++----------- internal/restorer/restorer_unix_test.go | 8 -------- 4 files changed, 18 insertions(+), 26 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index aea6457bd5c..d10558c6a7c 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -161,7 +161,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, msg.E("Warning: %s\n", message) } - selectExcludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + selectExcludeFilter := func(item string, _ string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { matched := false for _, rejectFn := range excludePatternFns { matched = matched || rejectFn(item) @@ -178,12 +178,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, // therefore childMayMatch does not matter, but we should not go down // unless the dir is selected for restore selectedForRestore = !matched - childMayBeSelected = selectedForRestore && node.Type == "dir" + childMayBeSelected = selectedForRestore && isDir return selectedForRestore, childMayBeSelected } - selectIncludeFilter := func(item string, _ string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + selectIncludeFilter := func(item string, _ string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { selectedForRestore = false childMayBeSelected = false for _, includeFn := range includePatternFns { @@ -195,7 +195,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, break } } - childMayBeSelected = childMayBeSelected && node.Type == "dir" + childMayBeSelected = childMayBeSelected && isDir return selectedForRestore, childMayBeSelected } diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 3817414ebe7..d13c3462cf9 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -27,7 +27,7 @@ type Restorer struct { Error func(location string, err error) error Warn func(message string) - SelectFilter func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) + SelectFilter func(item string, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) } var restorerAbortOnAllErrors = func(_ string, err error) error { return err } @@ -97,7 +97,7 @@ func NewRestorer(repo restic.Repository, sn *restic.Snapshot, opts Options) *Res opts: opts, fileList: make(map[string]bool), Error: restorerAbortOnAllErrors, - SelectFilter: func(string, string, *restic.Node) (bool, bool) { return true, true }, + SelectFilter: func(string, string, bool) (bool, bool) { return true, true }, sn: sn, } @@ -154,7 +154,7 @@ func (res *Restorer) traverseTree(ctx context.Context, target, location string, continue } - selectedForRestore, childMayBeSelected := res.SelectFilter(nodeLocation, nodeTarget, node) + selectedForRestore, childMayBeSelected := res.SelectFilter(nodeLocation, nodeTarget, node.Type == "dir") debug.Log("SelectFilter returned %v %v for %q", selectedForRestore, childMayBeSelected, nodeLocation) if selectedForRestore { diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 5eca779c675..720b913682a 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -192,7 +192,7 @@ func TestRestorer(t *testing.T) { Files map[string]string ErrorsMust map[string]map[string]struct{} ErrorsMay map[string]map[string]struct{} - Select func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) + Select func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) }{ // valid test cases { @@ -284,7 +284,7 @@ func TestRestorer(t *testing.T) { Files: map[string]string{ "dir/file": "content: file\n", }, - Select: func(item, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + Select: func(item, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { switch item { case filepath.FromSlash("/dir"): childMayBeSelected = true @@ -370,7 +370,7 @@ func TestRestorer(t *testing.T) { // make sure we're creating a new subdir of the tempdir tempdir = filepath.Join(tempdir, "target") - res.SelectFilter = func(item, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + res.SelectFilter = func(item, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { t.Logf("restore %v to %v", item, dstpath) if !fs.HasPathPrefix(tempdir, dstpath) { t.Errorf("would restore %v to %v, which is not within the target dir %v", @@ -379,7 +379,7 @@ func TestRestorer(t *testing.T) { } if test.Select != nil { - return test.Select(item, dstpath, node) + return test.Select(item, dstpath, isDir) } return true, true @@ -570,7 +570,7 @@ func checkVisitOrder(list []TreeVisit) TraverseTreeCheck { func TestRestorerTraverseTree(t *testing.T) { var tests = []struct { Snapshot - Select func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) + Select func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) Visitor TraverseTreeCheck }{ { @@ -586,7 +586,7 @@ func TestRestorerTraverseTree(t *testing.T) { "foo": File{Data: "content: foo\n"}, }, }, - Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { return true, true }, Visitor: checkVisitOrder([]TreeVisit{ @@ -613,7 +613,7 @@ func TestRestorerTraverseTree(t *testing.T) { "foo": File{Data: "content: foo\n"}, }, }, - Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { if item == "/foo" { return true, false } @@ -635,7 +635,7 @@ func TestRestorerTraverseTree(t *testing.T) { }}, }, }, - Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { if item == "/aaa" { return true, false } @@ -659,7 +659,7 @@ func TestRestorerTraverseTree(t *testing.T) { "foo": File{Data: "content: foo\n"}, }, }, - Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { if strings.HasPrefix(item, "/dir") { return true, true } @@ -688,7 +688,7 @@ func TestRestorerTraverseTree(t *testing.T) { "foo": File{Data: "content: foo\n"}, }, }, - Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { switch item { case "/dir": return false, true @@ -788,7 +788,7 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) { res := NewRestorer(repo, sn, Options{}) - res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + res.SelectFilter = func(item string, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { switch filepath.ToSlash(item) { case "/dir": childMayBeSelected = true diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go index 2ad28a0f6df..febd43acea2 100644 --- a/internal/restorer/restorer_unix_test.go +++ b/internal/restorer/restorer_unix_test.go @@ -13,7 +13,6 @@ import ( "time" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" restoreui "github.com/restic/restic/internal/ui/restore" ) @@ -34,10 +33,6 @@ func TestRestorerRestoreEmptyHardlinkedFields(t *testing.T) { res := NewRestorer(repo, sn, Options{}) - res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { - return true, true - } - tempdir := rtest.TempDir(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -108,9 +103,6 @@ func testRestorerProgressBar(t *testing.T, dryRun bool) { mock := &printerMock{} progress := restoreui.NewProgress(mock, 0) res := NewRestorer(repo, sn, Options{Progress: progress, DryRun: dryRun}) - res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { - return true, true - } tempdir := rtest.TempDir(t) ctx, cancel := context.WithCancel(context.Background()) From 144e2a451fb19753ec37f2543cc5ffb1b929a591 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 18:58:17 +0200 Subject: [PATCH 429/893] restore: track expected filenames in a folder --- internal/restorer/restorer.go | 87 +++++++++++++++++++++++------- internal/restorer/restorer_test.go | 73 ++++++++++++++++--------- 2 files changed, 117 insertions(+), 43 deletions(-) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index d13c3462cf9..62c486f0224 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -37,6 +37,7 @@ type Options struct { Sparse bool Progress *restoreui.Progress Overwrite OverwriteBehavior + Delete bool } type OverwriteBehavior int @@ -107,22 +108,61 @@ func NewRestorer(repo restic.Repository, sn *restic.Snapshot, opts Options) *Res type treeVisitor struct { enterDir func(node *restic.Node, target, location string) error visitNode func(node *restic.Node, target, location string) error - leaveDir func(node *restic.Node, target, location string) error + // 'entries' contains all files the snapshot contains for this node. This also includes files + // ignored by the SelectFilter. + leaveDir func(node *restic.Node, target, location string, entries []string) error } // traverseTree traverses a tree from the repo and calls treeVisitor. // target is the path in the file system, location within the snapshot. -func (res *Restorer) traverseTree(ctx context.Context, target, location string, treeID restic.ID, visitor treeVisitor) (hasRestored bool, err error) { +func (res *Restorer) traverseTree(ctx context.Context, target string, treeID restic.ID, visitor treeVisitor) error { + location := string(filepath.Separator) + sanitizeError := func(err error) error { + switch err { + case nil, context.Canceled, context.DeadlineExceeded: + // Context errors are permanent. + return err + default: + return res.Error(location, err) + } + } + + if visitor.enterDir != nil { + err := sanitizeError(visitor.enterDir(nil, target, location)) + if err != nil { + return err + } + } + childFilenames, hasRestored, err := res.traverseTreeInner(ctx, target, location, treeID, visitor) + if err != nil { + return err + } + if hasRestored && visitor.leaveDir != nil { + err = sanitizeError(visitor.leaveDir(nil, target, location, childFilenames)) + } + + return err +} + +func (res *Restorer) traverseTreeInner(ctx context.Context, target, location string, treeID restic.ID, visitor treeVisitor) (filenames []string, hasRestored bool, err error) { debug.Log("%v %v %v", target, location, treeID) tree, err := restic.LoadTree(ctx, res.repo, treeID) if err != nil { debug.Log("error loading tree %v: %v", treeID, err) - return hasRestored, res.Error(location, err) + return nil, hasRestored, res.Error(location, err) } + if res.opts.Delete { + filenames = make([]string, 0, len(tree.Nodes)) + } for i, node := range tree.Nodes { // allow GC of tree node tree.Nodes[i] = nil + if res.opts.Delete { + // just track all files included in the tree node to simplify the control flow. + // tracking too many files does not matter except for a slightly elevated memory usage + filenames = append(filenames, node.Name) + } // ensure that the node name does not contain anything that refers to a // top-level directory. @@ -131,8 +171,10 @@ func (res *Restorer) traverseTree(ctx context.Context, target, location string, debug.Log("node %q has invalid name %q", node.Name, nodeName) err := res.Error(location, errors.Errorf("invalid child node name %s", node.Name)) if err != nil { - return hasRestored, err + return nil, hasRestored, err } + // force disable deletion to prevent unexpected behavior + res.opts.Delete = false continue } @@ -144,8 +186,10 @@ func (res *Restorer) traverseTree(ctx context.Context, target, location string, debug.Log("node %q has invalid target path %q", node.Name, nodeTarget) err := res.Error(nodeLocation, errors.New("node has invalid path")) if err != nil { - return hasRestored, err + return nil, hasRestored, err } + // force disable deletion to prevent unexpected behavior + res.opts.Delete = false continue } @@ -173,25 +217,26 @@ func (res *Restorer) traverseTree(ctx context.Context, target, location string, if node.Type == "dir" { if node.Subtree == nil { - return hasRestored, errors.Errorf("Dir without subtree in tree %v", treeID.Str()) + return nil, hasRestored, errors.Errorf("Dir without subtree in tree %v", treeID.Str()) } if selectedForRestore && visitor.enterDir != nil { err = sanitizeError(visitor.enterDir(node, nodeTarget, nodeLocation)) if err != nil { - return hasRestored, err + return nil, hasRestored, err } } // keep track of restored child status // so metadata of the current directory are restored on leaveDir childHasRestored := false + var childFilenames []string if childMayBeSelected { - childHasRestored, err = res.traverseTree(ctx, nodeTarget, nodeLocation, *node.Subtree, visitor) + childFilenames, childHasRestored, err = res.traverseTreeInner(ctx, nodeTarget, nodeLocation, *node.Subtree, visitor) err = sanitizeError(err) if err != nil { - return hasRestored, err + return nil, hasRestored, err } // inform the parent directory to restore parent metadata on leaveDir if needed if childHasRestored { @@ -202,9 +247,9 @@ func (res *Restorer) traverseTree(ctx context.Context, target, location string, // metadata need to be restore when leaving the directory in both cases // selected for restore or any child of any subtree have been restored if (selectedForRestore || childHasRestored) && visitor.leaveDir != nil { - err = sanitizeError(visitor.leaveDir(node, nodeTarget, nodeLocation)) + err = sanitizeError(visitor.leaveDir(node, nodeTarget, nodeLocation, childFilenames)) if err != nil { - return hasRestored, err + return nil, hasRestored, err } } @@ -214,12 +259,12 @@ func (res *Restorer) traverseTree(ctx context.Context, target, location string, if selectedForRestore { err = sanitizeError(visitor.visitNode(node, nodeTarget, nodeLocation)) if err != nil { - return hasRestored, err + return nil, hasRestored, err } } } - return hasRestored, nil + return filenames, hasRestored, nil } func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, target, location string) error { @@ -310,10 +355,12 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { var buf []byte // first tree pass: create directories and collect all files to restore - _, err = res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ + err = res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{ enterDir: func(_ *restic.Node, target, location string) error { debug.Log("first pass, enterDir: mkdir %q, leaveDir should restore metadata", location) - res.opts.Progress.AddFile(0) + if location != "/" { + res.opts.Progress.AddFile(0) + } return res.ensureDir(target) }, @@ -373,7 +420,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { debug.Log("second pass for %q", dst) // second tree pass: restore special files and filesystem metadata - _, err = res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ + err = res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{ visitNode: func(node *restic.Node, target, location string) error { debug.Log("second pass, visitNode: restore node %q", location) if node.Type != "file" { @@ -396,7 +443,11 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { // don't touch skipped files return nil }, - leaveDir: func(node *restic.Node, target, location string) error { + leaveDir: func(node *restic.Node, target, location string, expectedFilenames []string) error { + if node == nil { + return nil + } + err := res.restoreNodeMetadataTo(node, target, location) if err == nil { res.opts.Progress.AddProgress(location, restoreui.ActionDirRestored, 0, 0) @@ -493,7 +544,7 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { g.Go(func() error { defer close(work) - _, err := res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ + err := res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{ visitNode: func(node *restic.Node, target, location string) error { if node.Type != "file" { return nil diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 720b913682a..d483872e0da 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -8,6 +8,7 @@ import ( "math" "os" "path/filepath" + "reflect" "runtime" "strings" "syscall" @@ -527,16 +528,17 @@ func TestRestorerRelative(t *testing.T) { type TraverseTreeCheck func(testing.TB) treeVisitor type TreeVisit struct { - funcName string // name of the function - location string // location passed to the function + funcName string // name of the function + location string // location passed to the function + files []string // file list passed to the function } func checkVisitOrder(list []TreeVisit) TraverseTreeCheck { var pos int return func(t testing.TB) treeVisitor { - check := func(funcName string) func(*restic.Node, string, string) error { - return func(node *restic.Node, target, location string) error { + check := func(funcName string) func(*restic.Node, string, string, []string) error { + return func(node *restic.Node, target, location string, expectedFilenames []string) error { if pos >= len(list) { t.Errorf("step %v, %v(%v): expected no more than %d function calls", pos, funcName, location, len(list)) pos++ @@ -554,14 +556,24 @@ func checkVisitOrder(list []TreeVisit) TraverseTreeCheck { t.Errorf("step %v: want location %v, got %v", pos, list[pos].location, location) } + if !reflect.DeepEqual(expectedFilenames, v.files) { + t.Errorf("step %v: want files %v, got %v", pos, list[pos].files, expectedFilenames) + } + pos++ return nil } } + checkNoFilename := func(funcName string) func(*restic.Node, string, string) error { + f := check(funcName) + return func(node *restic.Node, target, location string) error { + return f(node, target, location, nil) + } + } return treeVisitor{ - enterDir: check("enterDir"), - visitNode: check("visitNode"), + enterDir: checkNoFilename("enterDir"), + visitNode: checkNoFilename("visitNode"), leaveDir: check("leaveDir"), } } @@ -590,13 +602,15 @@ func TestRestorerTraverseTree(t *testing.T) { return true, true }, Visitor: checkVisitOrder([]TreeVisit{ - {"enterDir", "/dir"}, - {"visitNode", "/dir/otherfile"}, - {"enterDir", "/dir/subdir"}, - {"visitNode", "/dir/subdir/file"}, - {"leaveDir", "/dir/subdir"}, - {"leaveDir", "/dir"}, - {"visitNode", "/foo"}, + {"enterDir", "/", nil}, + {"enterDir", "/dir", nil}, + {"visitNode", "/dir/otherfile", nil}, + {"enterDir", "/dir/subdir", nil}, + {"visitNode", "/dir/subdir/file", nil}, + {"leaveDir", "/dir/subdir", []string{"file"}}, + {"leaveDir", "/dir", []string{"otherfile", "subdir"}}, + {"visitNode", "/foo", nil}, + {"leaveDir", "/", []string{"dir", "foo"}}, }), }, @@ -620,7 +634,9 @@ func TestRestorerTraverseTree(t *testing.T) { return false, false }, Visitor: checkVisitOrder([]TreeVisit{ - {"visitNode", "/foo"}, + {"enterDir", "/", nil}, + {"visitNode", "/foo", nil}, + {"leaveDir", "/", []string{"dir", "foo"}}, }), }, { @@ -642,7 +658,9 @@ func TestRestorerTraverseTree(t *testing.T) { return false, false }, Visitor: checkVisitOrder([]TreeVisit{ - {"visitNode", "/aaa"}, + {"enterDir", "/", nil}, + {"visitNode", "/aaa", nil}, + {"leaveDir", "/", []string{"aaa", "dir"}}, }), }, @@ -666,12 +684,14 @@ func TestRestorerTraverseTree(t *testing.T) { return false, false }, Visitor: checkVisitOrder([]TreeVisit{ - {"enterDir", "/dir"}, - {"visitNode", "/dir/otherfile"}, - {"enterDir", "/dir/subdir"}, - {"visitNode", "/dir/subdir/file"}, - {"leaveDir", "/dir/subdir"}, - {"leaveDir", "/dir"}, + {"enterDir", "/", nil}, + {"enterDir", "/dir", nil}, + {"visitNode", "/dir/otherfile", nil}, + {"enterDir", "/dir/subdir", nil}, + {"visitNode", "/dir/subdir/file", nil}, + {"leaveDir", "/dir/subdir", []string{"file"}}, + {"leaveDir", "/dir", []string{"otherfile", "subdir"}}, + {"leaveDir", "/", []string{"dir", "foo"}}, }), }, @@ -699,8 +719,10 @@ func TestRestorerTraverseTree(t *testing.T) { } }, Visitor: checkVisitOrder([]TreeVisit{ - {"visitNode", "/dir/otherfile"}, - {"leaveDir", "/dir"}, + {"enterDir", "/", nil}, + {"visitNode", "/dir/otherfile", nil}, + {"leaveDir", "/dir", []string{"otherfile", "subdir"}}, + {"leaveDir", "/", []string{"dir", "foo"}}, }), }, } @@ -710,7 +732,8 @@ func TestRestorerTraverseTree(t *testing.T) { repo := repository.TestRepository(t) sn, _ := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes) - res := NewRestorer(repo, sn, Options{}) + // set Delete option to enable tracking filenames in a directory + res := NewRestorer(repo, sn, Options{Delete: true}) res.SelectFilter = test.Select @@ -721,7 +744,7 @@ func TestRestorerTraverseTree(t *testing.T) { // make sure we're creating a new subdir of the tempdir target := filepath.Join(tempdir, "target") - _, err := res.traverseTree(ctx, target, string(filepath.Separator), *sn.Tree, test.Visitor(t)) + err := res.traverseTree(ctx, target, *sn.Tree, test.Visitor(t)) if err != nil { t.Fatal(err) } From ac44bdf6dd2805c6f5f30f761a7893bc912e57c1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 19:02:57 +0200 Subject: [PATCH 430/893] restore: add --delete option to remove files that are not in snapshot --- cmd/restic/cmd_restore.go | 3 + internal/restorer/restorer.go | 56 ++++++++++++++- internal/restorer/restorer_test.go | 112 +++++++++++++++++++++++++++++ 3 files changed, 169 insertions(+), 2 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index d10558c6a7c..07290a05b05 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -51,6 +51,7 @@ type RestoreOptions struct { Sparse bool Verify bool Overwrite restorer.OverwriteBehavior + Delete bool } var restoreOptions RestoreOptions @@ -69,6 +70,7 @@ func init() { flags.BoolVar(&restoreOptions.Sparse, "sparse", false, "restore files as sparse") flags.BoolVar(&restoreOptions.Verify, "verify", false, "verify restored files content") flags.Var(&restoreOptions.Overwrite, "overwrite", "overwrite behavior, one of (always|if-changed|if-newer|never) (default: always)") + flags.BoolVar(&restoreOptions.Delete, "delete", false, "delete files from target directory if they do not exist in snapshot. Use '--dry-run -vv' to check what would be deleted") } func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, @@ -149,6 +151,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, Sparse: opts.Sparse, Progress: progress, Overwrite: opts.Overwrite, + Delete: opts.Delete, }) totalErrors := 0 diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 62c486f0224..5fd06098fc0 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -25,8 +25,10 @@ type Restorer struct { fileList map[string]bool - Error func(location string, err error) error - Warn func(message string) + Error func(location string, err error) error + Warn func(message string) + // SelectFilter determines whether the item is selectedForRestore or whether a childMayBeSelected. + // selectedForRestore must not depend on isDir as `removeUnexpectedFiles` always passes false to isDir. SelectFilter func(item string, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) } @@ -444,6 +446,12 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { return nil }, leaveDir: func(node *restic.Node, target, location string, expectedFilenames []string) error { + if res.opts.Delete { + if err := res.removeUnexpectedFiles(target, location, expectedFilenames); err != nil { + return err + } + } + if node == nil { return nil } @@ -458,6 +466,50 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { return err } +func (res *Restorer) removeUnexpectedFiles(target, location string, expectedFilenames []string) error { + if !res.opts.Delete { + panic("internal error") + } + + entries, err := fs.Readdirnames(fs.Local{}, target, fs.O_NOFOLLOW) + if errors.Is(err, os.ErrNotExist) { + return nil + } else if err != nil { + return err + } + + keep := map[string]struct{}{} + for _, name := range expectedFilenames { + keep[name] = struct{}{} + } + + for _, entry := range entries { + if _, ok := keep[entry]; ok { + continue + } + + nodeTarget := filepath.Join(target, entry) + nodeLocation := filepath.Join(location, entry) + + if target == nodeTarget || !fs.HasPathPrefix(target, nodeTarget) { + return fmt.Errorf("skipping deletion due to invalid filename: %v", entry) + } + + // TODO pass a proper value to the isDir parameter once this becomes relevant for the filters + selectedForRestore, _ := res.SelectFilter(nodeLocation, nodeTarget, false) + // only delete files that were selected for restore + if selectedForRestore { + if !res.opts.DryRun { + if err := fs.RemoveAll(nodeTarget); err != nil { + return err + } + } + } + } + + return nil +} + func (res *Restorer) trackFile(location string, metadataOnly bool) { res.fileList[location] = metadataOnly } diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index d483872e0da..0dc2961fadb 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -1219,3 +1219,115 @@ func TestRestoreDryRun(t *testing.T) { _, err := os.Stat(tempdir) rtest.Assert(t, errors.Is(err, os.ErrNotExist), "expected no file to be created, got %v", err) } + +func TestRestoreDelete(t *testing.T) { + repo := repository.TestRepository(t) + tempdir := rtest.TempDir(t) + + sn, _ := saveSnapshot(t, repo, Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{ + Mode: normalizeFileMode(0755 | os.ModeDir), + Nodes: map[string]Node{ + "file1": File{Data: "content: file\n"}, + "anotherfile": File{Data: "content: file\n"}, + }, + }, + "dir2": Dir{ + Mode: normalizeFileMode(0755 | os.ModeDir), + Nodes: map[string]Node{ + "anotherfile": File{Data: "content: file\n"}, + }, + }, + "anotherfile": File{Data: "content: file\n"}, + }, + }, noopGetGenericAttributes) + + // should delete files that no longer exist in the snapshot + deleteSn, _ := saveSnapshot(t, repo, Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{ + Mode: normalizeFileMode(0755 | os.ModeDir), + Nodes: map[string]Node{ + "file1": File{Data: "content: file\n"}, + }, + }, + }, + }, noopGetGenericAttributes) + + tests := []struct { + selectFilter func(item string, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) + fileState map[string]bool + }{ + { + selectFilter: nil, + fileState: map[string]bool{ + "dir": true, + filepath.Join("dir", "anotherfile"): false, + filepath.Join("dir", "file1"): true, + "dir2": false, + filepath.Join("dir2", "anotherfile"): false, + "anotherfile": false, + }, + }, + { + selectFilter: func(item, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + return false, false + }, + fileState: map[string]bool{ + "dir": true, + filepath.Join("dir", "anotherfile"): true, + filepath.Join("dir", "file1"): true, + "dir2": true, + filepath.Join("dir2", "anotherfile"): true, + "anotherfile": true, + }, + }, + { + selectFilter: func(item, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + switch item { + case filepath.FromSlash("/dir"): + selectedForRestore = true + case filepath.FromSlash("/dir2"): + selectedForRestore = true + } + return + }, + fileState: map[string]bool{ + "dir": true, + filepath.Join("dir", "anotherfile"): true, + filepath.Join("dir", "file1"): true, + "dir2": false, + filepath.Join("dir2", "anotherfile"): false, + "anotherfile": true, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + res := NewRestorer(repo, sn, Options{}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + res = NewRestorer(repo, deleteSn, Options{Delete: true}) + if test.selectFilter != nil { + res.SelectFilter = test.selectFilter + } + err = res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + for fn, shouldExist := range test.fileState { + _, err := os.Stat(filepath.Join(tempdir, fn)) + if shouldExist { + rtest.OK(t, err) + } else { + rtest.Assert(t, errors.Is(err, os.ErrNotExist), "file %v: unexpected error got %v, expected ErrNotExist", fn, err) + } + } + }) + } +} From 013a6156bd2eba4d557548636b7db6e7cd3aee5d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 19:23:09 +0200 Subject: [PATCH 431/893] restore: remove unused parameter from SelectFilter --- cmd/restic/cmd_restore.go | 4 ++-- internal/restorer/restorer.go | 8 +++---- internal/restorer/restorer_test.go | 36 +++++++++++++----------------- 3 files changed, 21 insertions(+), 27 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 07290a05b05..1835219ad91 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -164,7 +164,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, msg.E("Warning: %s\n", message) } - selectExcludeFilter := func(item string, _ string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + selectExcludeFilter := func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { matched := false for _, rejectFn := range excludePatternFns { matched = matched || rejectFn(item) @@ -186,7 +186,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, return selectedForRestore, childMayBeSelected } - selectIncludeFilter := func(item string, _ string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + selectIncludeFilter := func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { selectedForRestore = false childMayBeSelected = false for _, includeFn := range includePatternFns { diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 5fd06098fc0..52d34c5ede8 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -29,7 +29,7 @@ type Restorer struct { Warn func(message string) // SelectFilter determines whether the item is selectedForRestore or whether a childMayBeSelected. // selectedForRestore must not depend on isDir as `removeUnexpectedFiles` always passes false to isDir. - SelectFilter func(item string, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) + SelectFilter func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) } var restorerAbortOnAllErrors = func(_ string, err error) error { return err } @@ -100,7 +100,7 @@ func NewRestorer(repo restic.Repository, sn *restic.Snapshot, opts Options) *Res opts: opts, fileList: make(map[string]bool), Error: restorerAbortOnAllErrors, - SelectFilter: func(string, string, bool) (bool, bool) { return true, true }, + SelectFilter: func(string, bool) (bool, bool) { return true, true }, sn: sn, } @@ -200,7 +200,7 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str continue } - selectedForRestore, childMayBeSelected := res.SelectFilter(nodeLocation, nodeTarget, node.Type == "dir") + selectedForRestore, childMayBeSelected := res.SelectFilter(nodeLocation, node.Type == "dir") debug.Log("SelectFilter returned %v %v for %q", selectedForRestore, childMayBeSelected, nodeLocation) if selectedForRestore { @@ -496,7 +496,7 @@ func (res *Restorer) removeUnexpectedFiles(target, location string, expectedFile } // TODO pass a proper value to the isDir parameter once this becomes relevant for the filters - selectedForRestore, _ := res.SelectFilter(nodeLocation, nodeTarget, false) + selectedForRestore, _ := res.SelectFilter(nodeLocation, false) // only delete files that were selected for restore if selectedForRestore { if !res.opts.DryRun { diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 0dc2961fadb..8a8f81ce043 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -193,7 +193,7 @@ func TestRestorer(t *testing.T) { Files map[string]string ErrorsMust map[string]map[string]struct{} ErrorsMay map[string]map[string]struct{} - Select func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) + Select func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) }{ // valid test cases { @@ -285,7 +285,7 @@ func TestRestorer(t *testing.T) { Files: map[string]string{ "dir/file": "content: file\n", }, - Select: func(item, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + Select: func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { switch item { case filepath.FromSlash("/dir"): childMayBeSelected = true @@ -371,16 +371,10 @@ func TestRestorer(t *testing.T) { // make sure we're creating a new subdir of the tempdir tempdir = filepath.Join(tempdir, "target") - res.SelectFilter = func(item, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { - t.Logf("restore %v to %v", item, dstpath) - if !fs.HasPathPrefix(tempdir, dstpath) { - t.Errorf("would restore %v to %v, which is not within the target dir %v", - item, dstpath, tempdir) - return false, false - } - + res.SelectFilter = func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + t.Logf("restore %v", item) if test.Select != nil { - return test.Select(item, dstpath, isDir) + return test.Select(item, isDir) } return true, true @@ -582,7 +576,7 @@ func checkVisitOrder(list []TreeVisit) TraverseTreeCheck { func TestRestorerTraverseTree(t *testing.T) { var tests = []struct { Snapshot - Select func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) + Select func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) Visitor TraverseTreeCheck }{ { @@ -598,7 +592,7 @@ func TestRestorerTraverseTree(t *testing.T) { "foo": File{Data: "content: foo\n"}, }, }, - Select: func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { return true, true }, Visitor: checkVisitOrder([]TreeVisit{ @@ -627,7 +621,7 @@ func TestRestorerTraverseTree(t *testing.T) { "foo": File{Data: "content: foo\n"}, }, }, - Select: func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { if item == "/foo" { return true, false } @@ -651,7 +645,7 @@ func TestRestorerTraverseTree(t *testing.T) { }}, }, }, - Select: func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { if item == "/aaa" { return true, false } @@ -677,7 +671,7 @@ func TestRestorerTraverseTree(t *testing.T) { "foo": File{Data: "content: foo\n"}, }, }, - Select: func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { if strings.HasPrefix(item, "/dir") { return true, true } @@ -708,7 +702,7 @@ func TestRestorerTraverseTree(t *testing.T) { "foo": File{Data: "content: foo\n"}, }, }, - Select: func(item string, dstpath string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { + Select: func(item string, isDir bool) (selectForRestore bool, childMayBeSelected bool) { switch item { case "/dir": return false, true @@ -811,7 +805,7 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) { res := NewRestorer(repo, sn, Options{}) - res.SelectFilter = func(item string, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + res.SelectFilter = func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { switch filepath.ToSlash(item) { case "/dir": childMayBeSelected = true @@ -1256,7 +1250,7 @@ func TestRestoreDelete(t *testing.T) { }, noopGetGenericAttributes) tests := []struct { - selectFilter func(item string, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) + selectFilter func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) fileState map[string]bool }{ { @@ -1271,7 +1265,7 @@ func TestRestoreDelete(t *testing.T) { }, }, { - selectFilter: func(item, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + selectFilter: func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { return false, false }, fileState: map[string]bool{ @@ -1284,7 +1278,7 @@ func TestRestoreDelete(t *testing.T) { }, }, { - selectFilter: func(item, dstpath string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { + selectFilter: func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { switch item { case filepath.FromSlash("/dir"): selectedForRestore = true From 168fc09d5f25b34d72a25de5aaafb704e97035d4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 19:54:12 +0200 Subject: [PATCH 432/893] restore: use case insensitive file name comparison on windows --- internal/restorer/restorer.go | 4 +-- internal/restorer/restorer_unix.go | 10 +++++++ internal/restorer/restorer_windows.go | 13 +++++++++ internal/restorer/restorer_windows_test.go | 34 ++++++++++++++++++++++ 4 files changed, 59 insertions(+), 2 deletions(-) create mode 100644 internal/restorer/restorer_unix.go create mode 100644 internal/restorer/restorer_windows.go diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 52d34c5ede8..6e81812c2ce 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -480,11 +480,11 @@ func (res *Restorer) removeUnexpectedFiles(target, location string, expectedFile keep := map[string]struct{}{} for _, name := range expectedFilenames { - keep[name] = struct{}{} + keep[toComparableFilename(name)] = struct{}{} } for _, entry := range entries { - if _, ok := keep[entry]; ok { + if _, ok := keep[toComparableFilename(entry)]; ok { continue } diff --git a/internal/restorer/restorer_unix.go b/internal/restorer/restorer_unix.go new file mode 100644 index 00000000000..7316f7b5dd1 --- /dev/null +++ b/internal/restorer/restorer_unix.go @@ -0,0 +1,10 @@ +//go:build !windows +// +build !windows + +package restorer + +// toComparableFilename returns a filename suitable for equality checks. On Windows, it returns the +// uppercase version of the string. On all other systems, it returns the unmodified filename. +func toComparableFilename(path string) string { + return path +} diff --git a/internal/restorer/restorer_windows.go b/internal/restorer/restorer_windows.go new file mode 100644 index 00000000000..72337d8ae80 --- /dev/null +++ b/internal/restorer/restorer_windows.go @@ -0,0 +1,13 @@ +//go:build windows +// +build windows + +package restorer + +import "strings" + +// toComparableFilename returns a filename suitable for equality checks. On Windows, it returns the +// uppercase version of the string. On all other systems, it returns the unmodified filename. +func toComparableFilename(path string) string { + // apparently NTFS internally uppercases filenames for comparision + return strings.ToUpper(path) +} diff --git a/internal/restorer/restorer_windows_test.go b/internal/restorer/restorer_windows_test.go index 61d07506144..3f6c8472ba0 100644 --- a/internal/restorer/restorer_windows_test.go +++ b/internal/restorer/restorer_windows_test.go @@ -9,6 +9,7 @@ import ( "math" "os" "path" + "path/filepath" "syscall" "testing" "time" @@ -539,3 +540,36 @@ func TestDirAttributeCombinationsOverwrite(t *testing.T) { } } } + +func TestRestoreDeleteCaseInsensitive(t *testing.T) { + repo := repository.TestRepository(t) + tempdir := rtest.TempDir(t) + + sn, _ := saveSnapshot(t, repo, Snapshot{ + Nodes: map[string]Node{ + "anotherfile": File{Data: "content: file\n"}, + }, + }, noopGetGenericAttributes) + + // should delete files that no longer exist in the snapshot + deleteSn, _ := saveSnapshot(t, repo, Snapshot{ + Nodes: map[string]Node{ + "AnotherfilE": File{Data: "content: file\n"}, + }, + }, noopGetGenericAttributes) + + res := NewRestorer(repo, sn, Options{}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + res = NewRestorer(repo, deleteSn, Options{Delete: true}) + err = res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + // anotherfile must still exist + _, err = os.Stat(filepath.Join(tempdir, "anotherfile")) + rtest.OK(t, err) +} From f4b15fdd96389e704423b8f6a2dd67f7e6ac442f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 20:23:28 +0200 Subject: [PATCH 433/893] restore: allow deleting a directory to replace it with a file When the `--delete` option is specified, recursively delete directories that should be replaced with a file. --- internal/restorer/filerestorer.go | 24 ++++++++------ internal/restorer/filerestorer_test.go | 6 ++-- internal/restorer/fileswriter.go | 22 ++++++++----- internal/restorer/fileswriter_test.go | 43 ++++++++++++++++++++++++-- internal/restorer/restorer.go | 2 +- internal/restorer/restorer_test.go | 21 +++++++++++++ 6 files changed, 95 insertions(+), 23 deletions(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index fd5b3c5dbf9..56059cb16f7 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -53,6 +53,8 @@ type fileRestorer struct { sparse bool progress *restore.Progress + allowRecursiveDelete bool + dst string files []*fileInfo Error func(string, error) error @@ -63,21 +65,23 @@ func newFileRestorer(dst string, idx func(restic.BlobType, restic.ID) []restic.PackedBlob, connections uint, sparse bool, + allowRecursiveDelete bool, progress *restore.Progress) *fileRestorer { // as packs are streamed the concurrency is limited by IO workerCount := int(connections) return &fileRestorer{ - idx: idx, - blobsLoader: blobsLoader, - filesWriter: newFilesWriter(workerCount), - zeroChunk: repository.ZeroChunk(), - sparse: sparse, - progress: progress, - workerCount: workerCount, - dst: dst, - Error: restorerAbortOnAllErrors, + idx: idx, + blobsLoader: blobsLoader, + filesWriter: newFilesWriter(workerCount, allowRecursiveDelete), + zeroChunk: repository.ZeroChunk(), + sparse: sparse, + progress: progress, + allowRecursiveDelete: allowRecursiveDelete, + workerCount: workerCount, + dst: dst, + Error: restorerAbortOnAllErrors, } } @@ -207,7 +211,7 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { } func (r *fileRestorer) restoreEmptyFileAt(location string) error { - f, err := createFile(r.targetPath(location), 0, false) + f, err := createFile(r.targetPath(location), 0, false, r.allowRecursiveDelete) if err != nil { return err } diff --git a/internal/restorer/filerestorer_test.go b/internal/restorer/filerestorer_test.go index d29c0dcead0..f594760e4af 100644 --- a/internal/restorer/filerestorer_test.go +++ b/internal/restorer/filerestorer_test.go @@ -144,7 +144,7 @@ func restoreAndVerify(t *testing.T, tempdir string, content []TestFile, files ma t.Helper() repo := newTestRepo(content) - r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, sparse, nil) + r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, sparse, false, nil) if files == nil { r.files = repo.files @@ -285,7 +285,7 @@ func TestErrorRestoreFiles(t *testing.T) { return loadError } - r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, nil) + r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, false, nil) r.files = repo.files err := r.restoreFiles(context.TODO()) @@ -326,7 +326,7 @@ func TestFatalDownloadError(t *testing.T) { }) } - r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, nil) + r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, false, nil) r.files = repo.files var errors []string diff --git a/internal/restorer/fileswriter.go b/internal/restorer/fileswriter.go index 034ed27256a..962f666197d 100644 --- a/internal/restorer/fileswriter.go +++ b/internal/restorer/fileswriter.go @@ -19,7 +19,8 @@ import ( // TODO I am not 100% convinced this is necessary, i.e. it may be okay // to use multiple os.File to write to the same target file type filesWriter struct { - buckets []filesWriterBucket + buckets []filesWriterBucket + allowRecursiveDelete bool } type filesWriterBucket struct { @@ -33,13 +34,14 @@ type partialFile struct { sparse bool } -func newFilesWriter(count int) *filesWriter { +func newFilesWriter(count int, allowRecursiveDelete bool) *filesWriter { buckets := make([]filesWriterBucket, count) for b := 0; b < count; b++ { buckets[b].files = make(map[string]*partialFile) } return &filesWriter{ - buckets: buckets, + buckets: buckets, + allowRecursiveDelete: allowRecursiveDelete, } } @@ -60,7 +62,7 @@ func openFile(path string) (*os.File, error) { return f, nil } -func createFile(path string, createSize int64, sparse bool) (*os.File, error) { +func createFile(path string, createSize int64, sparse bool, allowRecursiveDelete bool) (*os.File, error) { f, err := fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_NOFOLLOW, 0600) if err != nil && fs.IsAccessDenied(err) { // If file is readonly, clear the readonly flag by resetting the @@ -109,8 +111,14 @@ func createFile(path string, createSize int64, sparse bool) (*os.File, error) { } // not what we expected, try to get rid of it - if err := fs.Remove(path); err != nil { - return nil, err + if allowRecursiveDelete { + if err := fs.RemoveAll(path); err != nil { + return nil, err + } + } else { + if err := fs.Remove(path); err != nil { + return nil, err + } } // create a new file, pass O_EXCL to make sure there are no surprises f, err = fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_EXCL|fs.O_NOFOLLOW, 0600) @@ -169,7 +177,7 @@ func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, create var f *os.File var err error if createSize >= 0 { - f, err = createFile(path, createSize, sparse) + f, err = createFile(path, createSize, sparse, w.allowRecursiveDelete) if err != nil { return nil, err } diff --git a/internal/restorer/fileswriter_test.go b/internal/restorer/fileswriter_test.go index 383a9e0d743..c6984792703 100644 --- a/internal/restorer/fileswriter_test.go +++ b/internal/restorer/fileswriter_test.go @@ -13,7 +13,7 @@ import ( func TestFilesWriterBasic(t *testing.T) { dir := rtest.TempDir(t) - w := newFilesWriter(1) + w := newFilesWriter(1, false) f1 := dir + "/f1" f2 := dir + "/f2" @@ -39,6 +39,29 @@ func TestFilesWriterBasic(t *testing.T) { rtest.Equals(t, []byte{2, 2}, buf) } +func TestFilesWriterRecursiveOverwrite(t *testing.T) { + path := filepath.Join(t.TempDir(), "test") + + // create filled directory + rtest.OK(t, os.Mkdir(path, 0o700)) + rtest.OK(t, os.WriteFile(filepath.Join(path, "file"), []byte("data"), 0o400)) + + // must error if recursive delete is not allowed + w := newFilesWriter(1, false) + err := w.writeToFile(path, []byte{1}, 0, 2, false) + rtest.Assert(t, errors.Is(err, notEmptyDirError()), "unexepected error got %v", err) + rtest.Equals(t, 0, len(w.buckets[0].files)) + + // must replace directory + w = newFilesWriter(1, true) + rtest.OK(t, w.writeToFile(path, []byte{1, 1}, 0, 2, false)) + rtest.Equals(t, 0, len(w.buckets[0].files)) + + buf, err := os.ReadFile(path) + rtest.OK(t, err) + rtest.Equals(t, []byte{1, 1}, buf) +} + func TestCreateFile(t *testing.T) { basepath := filepath.Join(t.TempDir(), "test") @@ -110,7 +133,7 @@ func TestCreateFile(t *testing.T) { for j, test := range tests { path := basepath + fmt.Sprintf("%v%v", i, j) sc.create(t, path) - f, err := createFile(path, test.size, test.isSparse) + f, err := createFile(path, test.size, test.isSparse, false) if sc.err == nil { rtest.OK(t, err) fi, err := f.Stat() @@ -129,3 +152,19 @@ func TestCreateFile(t *testing.T) { }) } } + +func TestCreateFileRecursiveDelete(t *testing.T) { + path := filepath.Join(t.TempDir(), "test") + + // create filled directory + rtest.OK(t, os.Mkdir(path, 0o700)) + rtest.OK(t, os.WriteFile(filepath.Join(path, "file"), []byte("data"), 0o400)) + + // replace it + f, err := createFile(path, 42, false, true) + rtest.OK(t, err) + fi, err := f.Stat() + rtest.OK(t, err) + rtest.Assert(t, fi.Mode().IsRegular(), "wrong filetype %v", fi.Mode()) + rtest.OK(t, f.Close()) +} diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 6e81812c2ce..9efaa64df2a 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -349,7 +349,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { idx := NewHardlinkIndex[string]() filerestorer := newFileRestorer(dst, res.repo.LoadBlobsFromPack, res.repo.LookupBlob, - res.repo.Connections(), res.opts.Sparse, res.opts.Progress) + res.repo.Connections(), res.opts.Sparse, res.opts.Delete, res.opts.Progress) filerestorer.Error = res.Error debug.Log("first pass for %q", dst) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 8a8f81ce043..3d2323d0fd2 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -1214,6 +1214,27 @@ func TestRestoreDryRun(t *testing.T) { rtest.Assert(t, errors.Is(err, os.ErrNotExist), "expected no file to be created, got %v", err) } +func TestRestoreOverwriteDirectory(t *testing.T) { + saveSnapshotsAndOverwrite(t, + Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{ + Mode: normalizeFileMode(0755 | os.ModeDir), + Nodes: map[string]Node{ + "anotherfile": File{Data: "content: file\n"}, + }, + }, + }, + }, + Snapshot{ + Nodes: map[string]Node{ + "dir": File{Data: "content: file\n"}, + }, + }, + Options{Delete: true}, + ) +} + func TestRestoreDelete(t *testing.T) { repo := repository.TestRepository(t) tempdir := rtest.TempDir(t) From aa8e18cf32528435350d3a8e99b7d7f0cb49edba Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 21:29:42 +0200 Subject: [PATCH 434/893] restore: add deletions to progress output --- doc/075_scripting.rst | 2 +- internal/restorer/restorer.go | 1 + internal/ui/restore/json.go | 2 ++ internal/ui/restore/json_test.go | 1 + internal/ui/restore/progress.go | 12 ++++++++++++ internal/ui/restore/progress_test.go | 2 ++ internal/ui/restore/text.go | 6 ++++-- internal/ui/restore/text_test.go | 1 + 8 files changed, 24 insertions(+), 3 deletions(-) diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index b83fe5eb55f..e11f280dbb4 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -520,7 +520,7 @@ Only printed if `--verbose=2` is specified. +----------------------+-----------------------------------------------------------+ | ``message_type`` | Always "verbose_status" | +----------------------+-----------------------------------------------------------+ -| ``action`` | Either "restored", "updated" or "unchanged" | +| ``action`` | Either "restored", "updated", "unchanged" or "deleted" | +----------------------+-----------------------------------------------------------+ | ``item`` | The item in question | +----------------------+-----------------------------------------------------------+ diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 9efaa64df2a..37072d9a99c 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -499,6 +499,7 @@ func (res *Restorer) removeUnexpectedFiles(target, location string, expectedFile selectedForRestore, _ := res.SelectFilter(nodeLocation, false) // only delete files that were selected for restore if selectedForRestore { + res.opts.Progress.ReportDeletedFile(nodeLocation) if !res.opts.DryRun { if err := fs.RemoveAll(nodeTarget); err != nil { return err diff --git a/internal/ui/restore/json.go b/internal/ui/restore/json.go index ebc21717671..7db2e21a34b 100644 --- a/internal/ui/restore/json.go +++ b/internal/ui/restore/json.go @@ -56,6 +56,8 @@ func (t *jsonPrinter) CompleteItem(messageType ItemAction, item string, size uin action = "updated" case ActionFileUnchanged: action = "unchanged" + case ActionDeleted: + action = "deleted" default: panic("unknown message type") } diff --git a/internal/ui/restore/json_test.go b/internal/ui/restore/json_test.go index 1a749b9334d..06a70d5dcd4 100644 --- a/internal/ui/restore/json_test.go +++ b/internal/ui/restore/json_test.go @@ -53,6 +53,7 @@ func TestJSONPrintCompleteItem(t *testing.T) { {ActionFileRestored, 123, "{\"message_type\":\"verbose_status\",\"action\":\"restored\",\"item\":\"test\",\"size\":123}\n"}, {ActionFileUpdated, 123, "{\"message_type\":\"verbose_status\",\"action\":\"updated\",\"item\":\"test\",\"size\":123}\n"}, {ActionFileUnchanged, 123, "{\"message_type\":\"verbose_status\",\"action\":\"unchanged\",\"item\":\"test\",\"size\":123}\n"}, + {ActionDeleted, 0, "{\"message_type\":\"verbose_status\",\"action\":\"deleted\",\"item\":\"test\",\"size\":0}\n"}, } { term, printer := createJSONProgress() printer.CompleteItem(data.action, "test", data.size) diff --git a/internal/ui/restore/progress.go b/internal/ui/restore/progress.go index 04274b7ea54..71a46e9ddae 100644 --- a/internal/ui/restore/progress.go +++ b/internal/ui/restore/progress.go @@ -51,6 +51,7 @@ const ( ActionFileRestored ItemAction = "file restored" ActionFileUpdated ItemAction = "file updated" ActionFileUnchanged ItemAction = "file unchanged" + ActionDeleted ItemAction = "deleted" ) func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress { @@ -126,6 +127,17 @@ func (p *Progress) AddSkippedFile(name string, size uint64) { p.printer.CompleteItem(ActionFileUnchanged, name, size) } +func (p *Progress) ReportDeletedFile(name string) { + if p == nil { + return + } + + p.m.Lock() + defer p.m.Unlock() + + p.printer.CompleteItem(ActionDeleted, name, 0) +} + func (p *Progress) Finish() { p.updater.Done() } diff --git a/internal/ui/restore/progress_test.go b/internal/ui/restore/progress_test.go index eda1b05c0c8..4a630474159 100644 --- a/internal/ui/restore/progress_test.go +++ b/internal/ui/restore/progress_test.go @@ -181,10 +181,12 @@ func TestProgressTypes(t *testing.T) { progress.AddFile(0) progress.AddProgress("dir", ActionDirRestored, fileSize, fileSize) progress.AddProgress("new", ActionFileRestored, 0, 0) + progress.ReportDeletedFile("del") return true }) test.Equals(t, itemTrace{ itemTraceEntry{ActionDirRestored, "dir", fileSize}, itemTraceEntry{ActionFileRestored, "new", 0}, + itemTraceEntry{ActionDeleted, "del", 0}, }, items) } diff --git a/internal/ui/restore/text.go b/internal/ui/restore/text.go index 77c2f2d158e..2f0b3c01f1b 100644 --- a/internal/ui/restore/text.go +++ b/internal/ui/restore/text.go @@ -48,12 +48,14 @@ func (t *textPrinter) CompleteItem(messageType ItemAction, item string, size uin action = "updated" case ActionFileUnchanged: action = "unchanged" + case ActionDeleted: + action = "deleted" default: panic("unknown message type") } - if messageType == ActionDirRestored { - t.terminal.Print(fmt.Sprintf("restored %v", item)) + if messageType == ActionDirRestored || messageType == ActionDeleted { + t.terminal.Print(fmt.Sprintf("%-9v %v", action, item)) } else { t.terminal.Print(fmt.Sprintf("%-9v %v with size %v", action, item, ui.FormatBytes(size))) } diff --git a/internal/ui/restore/text_test.go b/internal/ui/restore/text_test.go index c7d173422ca..eddc0d1cad1 100644 --- a/internal/ui/restore/text_test.go +++ b/internal/ui/restore/text_test.go @@ -65,6 +65,7 @@ func TestPrintCompleteItem(t *testing.T) { {ActionFileRestored, 123, "restored test with size 123 B"}, {ActionFileUpdated, 123, "updated test with size 123 B"}, {ActionFileUnchanged, 123, "unchanged test with size 123 B"}, + {ActionDeleted, 0, "deleted test"}, } { term, printer := createTextProgress() printer.CompleteItem(data.action, "test", data.size) From 868219aad1873dc703163855bf9687782898a622 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 21:53:07 +0200 Subject: [PATCH 435/893] restore: test --dry-run plus --delete --- internal/restorer/restorer_test.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 3d2323d0fd2..a343bda2cb9 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -1214,6 +1214,32 @@ func TestRestoreDryRun(t *testing.T) { rtest.Assert(t, errors.Is(err, os.ErrNotExist), "expected no file to be created, got %v", err) } +func TestRestoreDryRunDelete(t *testing.T) { + snapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n"}, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + tempfile := filepath.Join(tempdir, "existing") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rtest.OK(t, os.Mkdir(tempdir, 0o755)) + f, err := os.Create(tempfile) + rtest.OK(t, err) + rtest.OK(t, f.Close()) + + sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) + res := NewRestorer(repo, sn, Options{DryRun: true, Delete: true}) + rtest.OK(t, res.RestoreTo(ctx, tempdir)) + + _, err = os.Stat(tempfile) + rtest.Assert(t, err == nil, "expected file to still exist, got error %v", err) +} + func TestRestoreOverwriteDirectory(t *testing.T) { saveSnapshotsAndOverwrite(t, Snapshot{ From 569f111cb176459931fd5b83dd1c34c9dba1f8d0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 22:21:27 +0200 Subject: [PATCH 436/893] restore: document --delete option --- doc/050_restore.rst | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 497488241e4..5f351ae1df3 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -111,6 +111,27 @@ values are supported: newer modification time (mtime). * ``--overwrite never``: never overwrite existing files. +Delete files not in snapshot +---------------------------- + +When restoring into a directory that already contains files, it can be useful to remove all +files that do not exist in the snapshot. For this, pass the ``--delete`` option to the ``restore`` +command. The command will then **delete all files** from the target directory that do not +exist in the snapshot. + +The ``--delete`` option also allows overwriting a non-empty directory if the snapshot contains a +file with the same name. + +.. warning:: + + Always use the ``--dry-run -vv`` option to verify what would be deleted before running the actual + command. + +When specifying ``--include`` or ``--exclude`` options, only files or directories matched by those +options will be deleted. For example, the command +``restic -r /srv/restic-repo restore 79766175:/work --target /tmp/restore-work --include /foo --delete`` +would only delete files within ``/tmp/restore-work/foo``. + Dry run ------- From 54316978cd1fe42bb0ba9251d24595f68c38dc99 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 29 Jun 2024 22:31:19 +0200 Subject: [PATCH 437/893] add restore --delete changelog --- changelog/unreleased/issue-2348 | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 changelog/unreleased/issue-2348 diff --git a/changelog/unreleased/issue-2348 b/changelog/unreleased/issue-2348 new file mode 100644 index 00000000000..a8a0849fec6 --- /dev/null +++ b/changelog/unreleased/issue-2348 @@ -0,0 +1,10 @@ +Enhancement: Add `--delete` option to `restore` command + +The `restore` command now supports a `--delete` option that allows removing files and directories +from the target directory that do not exist in the snapshot. This option also allows files in the +snapshot to replace non-empty directories. + +To check that only the expected files are deleted add the `--dry-run --verbose=2` options. + +https://github.com/restic/restic/issues/2348 +https://github.com/restic/restic/pull/4881 From aedead2823c500655a25cf04637374853ed81a78 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Jul 2024 11:27:35 +0200 Subject: [PATCH 438/893] ui/termstatus: fix clearing status lines To clear the status lines, they should be set to an empty array to prevent future updates of those lines. Setting the status lines to an array containing an empty string is wrong as this causes the output to continuously add that empty status line after each message. --- cmd/restic/progress.go | 2 +- internal/ui/backup/text.go | 2 +- internal/ui/restore/text.go | 2 +- internal/ui/termstatus/status.go | 5 +---- internal/ui/termstatus/status_test.go | 9 +++++++++ 5 files changed, 13 insertions(+), 7 deletions(-) diff --git a/cmd/restic/progress.go b/cmd/restic/progress.go index 48aa209a6a3..d9ff634ce6d 100644 --- a/cmd/restic/progress.go +++ b/cmd/restic/progress.go @@ -53,7 +53,7 @@ func newGenericProgressMax(show bool, max uint64, description string, print func func newTerminalProgressMax(show bool, max uint64, description string, term *termstatus.Terminal) *progress.Counter { return newGenericProgressMax(show, max, description, func(status string, final bool) { if final { - term.SetStatus([]string{}) + term.SetStatus(nil) term.Print(status) } else { term.SetStatus([]string{status}) diff --git a/internal/ui/backup/text.go b/internal/ui/backup/text.go index 43e963b82d5..f96746739e3 100644 --- a/internal/ui/backup/text.go +++ b/internal/ui/backup/text.go @@ -121,7 +121,7 @@ func (b *TextProgress) ReportTotal(start time.Time, s archiver.ScanStats) { // Reset status func (b *TextProgress) Reset() { if b.term.CanUpdateStatus() { - b.term.SetStatus([]string{""}) + b.term.SetStatus(nil) } } diff --git a/internal/ui/restore/text.go b/internal/ui/restore/text.go index 2f0b3c01f1b..235e7f08557 100644 --- a/internal/ui/restore/text.go +++ b/internal/ui/restore/text.go @@ -62,7 +62,7 @@ func (t *textPrinter) CompleteItem(messageType ItemAction, item string, size uin } func (t *textPrinter) Finish(p State, duration time.Duration) { - t.terminal.SetStatus([]string{}) + t.terminal.SetStatus(nil) timeLeft := ui.FormatDuration(duration) formattedAllBytesTotal := ui.FormatBytes(p.AllBytesTotal) diff --git a/internal/ui/termstatus/status.go b/internal/ui/termstatus/status.go index 4a73ce5b3fb..39654cc8cdc 100644 --- a/internal/ui/termstatus/status.go +++ b/internal/ui/termstatus/status.go @@ -315,11 +315,8 @@ func sanitizeLines(lines []string, width int) []string { // SetStatus updates the status lines. // The lines should not contain newlines; this method adds them. +// Pass nil or an empty array to remove the status lines. func (t *Terminal) SetStatus(lines []string) { - if len(lines) == 0 { - return - } - // only truncate interactive status output var width int if t.canUpdateStatus { diff --git a/internal/ui/termstatus/status_test.go b/internal/ui/termstatus/status_test.go index 997a2d7b1cb..2a17a905a42 100644 --- a/internal/ui/termstatus/status_test.go +++ b/internal/ui/termstatus/status_test.go @@ -32,6 +32,15 @@ func TestSetStatus(t *testing.T) { term.SetStatus([]string{"first"}) exp := home + clear + "first" + home + term.SetStatus([]string{""}) + exp += home + clear + "" + home + + term.SetStatus([]string{}) + exp += home + clear + "" + home + + // already empty status + term.SetStatus([]string{}) + term.SetStatus([]string{"foo", "bar", "baz"}) exp += home + clear + "foo\n" + home + clear + "bar\n" + home + clear + "baz" + home + up + up From 33de00bba083eeefaae931d7d4d6200c1a415baa Mon Sep 17 00:00:00 2001 From: Joram Berger Date: Sat, 6 Jul 2024 16:47:04 +0200 Subject: [PATCH 439/893] Add explanation to restic diff symbols Explain what the characters for each file in a restic diff output mean. --- doc/040_backup.rst | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 2aeb063d912..c068e0904ee 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -498,7 +498,7 @@ You can combine all three options with each other and with the normal file argum Comparing Snapshots ******************* -Restic has a `diff` command which shows the difference between two snapshots +Restic has a ``diff`` command which shows the difference between two snapshots and displays a small statistic, just pass the command two snapshot IDs: .. code-block:: console @@ -506,9 +506,9 @@ and displays a small statistic, just pass the command two snapshot IDs: $ restic -r /srv/restic-repo diff 5845b002 2ab627a6 comparing snapshot ea657ce5 to 2ab627a6: - C /restic/cmd_diff.go + M /restic/cmd_diff.go + /restic/foo - C /restic/restic + M /restic/restic Files: 0 new, 0 removed, 2 changed Dirs: 1 new, 0 removed @@ -527,6 +527,24 @@ folder, you could use the following command: $ restic -r /srv/restic-repo diff 5845b002:/restic 2ab627a6:/restic +By default, the ``diff`` command only lists differences in file contents. +The flag `--metadata` shows changes to file metadata, too. + +The characters left of the file path show what has changed for this file: + ++-------+-----------------------+ +| ``+`` | added | ++-------+-----------------------+ +| ``-`` | removed | ++-------+-----------------------+ +| ``T`` | entry type changed | ++-------+-----------------------+ +| ``M`` | file content changed | ++-------+-----------------------+ +| ``U`` | metadata changed | ++-------+-----------------------+ +| ``?`` | bitrot detected | ++-------+-----------------------+ Backing up special items and metadata ************************************* From f3a3c46153513f0d9f49f6096d829d5901ffb61a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Jul 2024 17:29:16 +0200 Subject: [PATCH 440/893] bump some dependencies --- go.mod | 24 ++++++++++++------------ go.sum | 50 +++++++++++++++++++++++++------------------------- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/go.mod b/go.mod index 3648e5c8c40..17c2337a011 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,12 @@ module github.com/restic/restic require ( - cloud.google.com/go/storage v1.41.0 + cloud.google.com/go/storage v1.43.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 github.com/Backblaze/blazer v0.6.1 - github.com/anacrolix/fuse v0.3.1-0.20240611061849-e4651fbc304d + github.com/anacrolix/fuse v0.3.1 github.com/cenkalti/backoff/v4 v4.3.0 github.com/cespare/xxhash/v2 v2.3.0 github.com/elithrar/simple-scrypt v1.3.0 @@ -30,16 +30,16 @@ require ( golang.org/x/net v0.26.0 golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.21.0 - golang.org/x/term v0.21.0 + golang.org/x/sys v0.22.0 + golang.org/x/term v0.22.0 golang.org/x/text v0.16.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.182.0 + google.golang.org/api v0.187.0 ) require ( - cloud.google.com/go v0.114.0 // indirect - cloud.google.com/go/auth v0.4.2 // indirect + cloud.google.com/go v0.115.0 // indirect + cloud.google.com/go/auth v0.6.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/iam v1.1.8 // indirect @@ -58,7 +58,7 @@ require ( github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/googleapis/gax-go/v2 v2.12.5 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect @@ -77,11 +77,11 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect + google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.1 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 849957b0ddf..b372fcce2d7 100644 --- a/go.sum +++ b/go.sum @@ -1,16 +1,17 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= -cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= -cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg= -cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go/auth v0.6.1 h1:T0Zw1XM5c1GlpN2HYr2s+m3vr1p2wy+8VN+Z1FKxW38= +cloud.google.com/go/auth v0.6.1/go.mod h1:eFHG7zDzbXHKmjJddFG/rBlcGp6t25SwRUiEQSlO4x4= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= -cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= -cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= +cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 h1:1nGuui+4POelzDwI7RG56yfQJHCnKvwfMoU7VsEp+Zg= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0/go.mod h1:99EvauvlcJ1U06amZiksfYz/3aFGyIhWGHVyiZXtBAI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= @@ -28,8 +29,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74= github.com/anacrolix/envpprof v1.3.0 h1:WJt9bpuT7A/CDCxPOv/eeZqHWlle/Y0keJUvc6tcJDk= github.com/anacrolix/envpprof v1.3.0/go.mod h1:7QIG4CaX1uexQ3tqd5+BRa/9e2D02Wcertl6Yh0jCB0= -github.com/anacrolix/fuse v0.3.1-0.20240611061849-e4651fbc304d h1:wCFrzeUrl7FASv1w6VRCaEAlG5u8dkSNGcj2OeJ78Vc= -github.com/anacrolix/fuse v0.3.1-0.20240611061849-e4651fbc304d/go.mod h1:vN3X/6E+uHNjg5F8Oy9FD9I+pYxeDWeB8mNjIoxL5ds= +github.com/anacrolix/fuse v0.3.1 h1:oT8s3B5HFkBdLe/WKJO5MNo9iIyEtc+BhvTZYp4jhDM= +github.com/anacrolix/fuse v0.3.1/go.mod h1:vN3X/6E+uHNjg5F8Oy9FD9I+pYxeDWeB8mNjIoxL5ds= github.com/anacrolix/generics v0.0.0-20230113004304-d6428d516633 h1:TO3pytMIJ98CO1nYtqbFx/iuTHi4OgIUoE2wNfDdKxw= github.com/anacrolix/generics v0.0.0-20230113004304-d6428d516633/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= github.com/anacrolix/log v0.13.1/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= @@ -112,8 +113,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= @@ -277,14 +278,14 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -307,20 +308,19 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -google.golang.org/api v0.182.0 h1:if5fPvudRQ78GeRx3RayIoiuV7modtErPIZC/T2bIvE= -google.golang.org/api v0.182.0/go.mod h1:cGhjy4caqA5yXRzEhkHI8Y9mfyC2VLTlER2l08xaqtM= +google.golang.org/api v0.187.0 h1:Mxs7VATVC2v7CY+7Xwm4ndkX71hpElcvx0D1Ji/p1eo= +google.golang.org/api v0.187.0/go.mod h1:KIHlTc4x7N7gKKuVsdmfBXN13yEEWXWFURWY6SBp2gk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= -google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 h1:W5Xj/70xIA4x60O/IFyXivR5MGqblAb8R3w26pnD6No= -google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/HkBX+Ltq8T0ke0PKIRBNBatoDTVls= +google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M= +google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc= +google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -337,8 +337,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 10765151d3145cceb5ed864f22616bfa68c4df92 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Jul 2024 21:01:58 +0200 Subject: [PATCH 441/893] doc: fine tune description of diff command --- doc/040_backup.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index c068e0904ee..77176b0549c 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -506,9 +506,9 @@ and displays a small statistic, just pass the command two snapshot IDs: $ restic -r /srv/restic-repo diff 5845b002 2ab627a6 comparing snapshot ea657ce5 to 2ab627a6: - M /restic/cmd_diff.go + M /restic/cmd_diff.go + /restic/foo - M /restic/restic + M /restic/restic Files: 0 new, 0 removed, 2 changed Dirs: 1 new, 0 removed @@ -528,7 +528,7 @@ folder, you could use the following command: $ restic -r /srv/restic-repo diff 5845b002:/restic 2ab627a6:/restic By default, the ``diff`` command only lists differences in file contents. -The flag `--metadata` shows changes to file metadata, too. +The flag ``--metadata`` shows changes to file metadata, too. The characters left of the file path show what has changed for this file: From a2a2401a681c2ee89ffce113fc875454d600ec0a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 7 Jul 2024 11:31:04 +0200 Subject: [PATCH 442/893] s3: prevent repeated credential queries with anonymous authentication --- internal/backend/s3/s3.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index bddb5774139..1b7f6e3d22d 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -132,6 +132,9 @@ func getCredentials(cfg Config) (*credentials.Credentials, error) { if c.SignerType == credentials.SignatureAnonymous { debug.Log("using anonymous access for %#v", cfg.Endpoint) + // short circuit credentials resolution when using anonymous access + // otherwise the IAM provider would continuously try to (unsuccessfully) retrieve new credentials + creds = credentials.New(&credentials.Static{}) } roleArn := os.Getenv("RESTIC_AWS_ASSUME_ROLE_ARN") From 4b364940aa19bc3f6db9fa733b8bd8ebeb9cc034 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 7 Jul 2024 11:32:40 +0200 Subject: [PATCH 443/893] s3: use http client with configured timeouts for s3 IAM communication The default client has no timeouts configured opening network connections. Thus, if 169.254.169.254 is inaccessible, then the client would wait for until the operating system gives up, which will take several minutes. --- internal/backend/s3/s3.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index 1b7f6e3d22d..6fe9e384bc4 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -52,7 +52,7 @@ func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, erro minio.MaxRetry = int(cfg.MaxRetries) } - creds, err := getCredentials(cfg) + creds, err := getCredentials(cfg, rt) if err != nil { return nil, errors.Wrap(err, "s3.getCredentials") } @@ -97,7 +97,7 @@ func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, erro // getCredentials -- runs through the various credential types and returns the first one that works. // additionally if the user has specified a role to assume, it will do that as well. -func getCredentials(cfg Config) (*credentials.Credentials, error) { +func getCredentials(cfg Config, tr http.RoundTripper) (*credentials.Credentials, error) { // Chains all credential types, in the following order: // - Static credentials provided by user // - AWS env vars (i.e. AWS_ACCESS_KEY_ID) @@ -120,7 +120,7 @@ func getCredentials(cfg Config) (*credentials.Credentials, error) { &credentials.FileMinioClient{}, &credentials.IAM{ Client: &http.Client{ - Transport: http.DefaultTransport, + Transport: tr, }, }, }) From 894ec9d05d56c3de7ce124832797d1657b98077e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 7 Jul 2024 13:08:41 +0200 Subject: [PATCH 444/893] ls: fix broken folder if --ncdu an file filters are combined --- cmd/restic/cmd_ls.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index c4fb32de3fa..7499da68f66 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -367,9 +367,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return nil } + printedDir := false if withinDir(nodepath) { // if we're within a dir, print the node printer.Node(nodepath, node) + printedDir = true // if recursive listing is requested, signal the walker that it // should continue walking recursively @@ -387,6 +389,9 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri // otherwise, signal the walker to not walk recursively into any // subdirs if node.Type == "dir" { + if printedDir { + printer.LeaveDir(nodepath) + } return walker.ErrSkipNode } return nil From 1856f4d87a076315d1095a87d2c338391259a630 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 7 Jul 2024 13:54:45 +0200 Subject: [PATCH 445/893] restore: forbid --target / --delete unless include/exclude is specified --- cmd/restic/cmd_restore.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 1835219ad91..e21d0bd94e9 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -2,6 +2,7 @@ package main import ( "context" + "path/filepath" "time" "github.com/restic/restic/internal/debug" @@ -107,6 +108,10 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, return errors.Fatal("--dry-run and --verify are mutually exclusive") } + if opts.Delete && filepath.Clean(opts.Target) == "/" && !hasExcludes && !hasIncludes { + return errors.Fatal("'--target / --delete' must be combined with an include or exclude filter") + } + snapshotIDString := args[0] debug.Log("restore %v to %v", snapshotIDString, opts.Target) From a0cac7fcd110a6ce4f8bebf97823681912e2a080 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 7 Jul 2024 12:07:24 +0200 Subject: [PATCH 446/893] helpers/prepare-release: write dev version to VERSION file --- VERSION | 2 +- helpers/prepare-release/main.go | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 19270385eaf..e62f3ef1c62 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.16.5 +0.16.5-dev diff --git a/helpers/prepare-release/main.go b/helpers/prepare-release/main.go index 703d85e7007..f01fe8a2fdc 100644 --- a/helpers/prepare-release/main.go +++ b/helpers/prepare-release/main.go @@ -323,6 +323,11 @@ func updateVersion() { } func updateVersionDev() { + err := os.WriteFile("VERSION", []byte(opts.Version+"-dev\n"), 0644) + if err != nil { + die("unable to write version to file: %v", err) + } + newVersion := fmt.Sprintf(`const version = "%s-dev (compiled manually)"`, opts.Version) replace(versionCodeFile, versionPattern, newVersion) From 00d9ce7bee2cce668e222abfaa1de9d60015194c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 8 Jul 2024 19:30:17 +0200 Subject: [PATCH 447/893] fix setting version in build script --- cmd/restic/global.go | 2 +- helpers/prepare-release/main.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index ff4fda5b6d4..cd6a683bffe 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -43,7 +43,7 @@ import ( "golang.org/x/term" ) -const version = "0.16.5-dev (compiled manually)" +var version = "0.16.5-dev (compiled manually)" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" diff --git a/helpers/prepare-release/main.go b/helpers/prepare-release/main.go index f01fe8a2fdc..ba3de38a521 100644 --- a/helpers/prepare-release/main.go +++ b/helpers/prepare-release/main.go @@ -303,7 +303,7 @@ func generateFiles() { } } -var versionPattern = `const version = ".*"` +var versionPattern = `var version = ".*"` const versionCodeFile = "cmd/restic/global.go" @@ -313,7 +313,7 @@ func updateVersion() { die("unable to write version to file: %v", err) } - newVersion := fmt.Sprintf("const version = %q", opts.Version) + newVersion := fmt.Sprintf("var version = %q", opts.Version) replace(versionCodeFile, versionPattern, newVersion) if len(uncommittedChanges("VERSION")) > 0 || len(uncommittedChanges(versionCodeFile)) > 0 { @@ -328,7 +328,7 @@ func updateVersionDev() { die("unable to write version to file: %v", err) } - newVersion := fmt.Sprintf(`const version = "%s-dev (compiled manually)"`, opts.Version) + newVersion := fmt.Sprintf(`var version = "%s-dev (compiled manually)"`, opts.Version) replace(versionCodeFile, versionPattern, newVersion) msg("committing cmd/restic/global.go with dev version") From f980f5647e55993c52b9c22c58c6af188c29e6bf Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 8 Jul 2024 19:58:11 +0200 Subject: [PATCH 448/893] snapshots: also show snapshot size in compact view --- changelog/unreleased/issue-693 | 1 + cmd/restic/cmd_snapshots.go | 3 +++ 2 files changed, 4 insertions(+) diff --git a/changelog/unreleased/issue-693 b/changelog/unreleased/issue-693 index 054ae42ed0b..34b5b945510 100644 --- a/changelog/unreleased/issue-693 +++ b/changelog/unreleased/issue-693 @@ -10,3 +10,4 @@ statistics use `restic snapshots --json` or `restic cat snapshot `. https://github.com/restic/restic/issues/693 https://github.com/restic/restic/pull/4705 +https://github.com/restic/restic/pull/4913 diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index f689e426093..ea15af019e5 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -190,6 +190,9 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke tab.AddColumn("Time", "{{ .Timestamp }}") tab.AddColumn("Host", "{{ .Hostname }}") tab.AddColumn("Tags ", `{{ join .Tags "\n" }}`) + if hasSize { + tab.AddColumn("Size", `{{ .Size }}`) + } } else { tab.AddColumn("ID", "{{ .ID }}") tab.AddColumn("Time", "{{ .Timestamp }}") From 94c3d3f0976cfb55108d0e2c4859ae2d8d5a169a Mon Sep 17 00:00:00 2001 From: Daniel Oliveira Date: Tue, 9 Jul 2024 00:18:09 +0100 Subject: [PATCH 449/893] doc: Update index explanation to match the example In commit 00575ec the example was changed to three data blobs due to the deprecation of mixed pack files but the following description was not updated to reflect this. --- doc/design.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/design.rst b/doc/design.rst index b80029d10c6..7fb8b71b2ea 100644 --- a/doc/design.rst +++ b/doc/design.rst @@ -296,8 +296,8 @@ of a JSON document like the following: } This JSON document lists Packs and the blobs contained therein. In this -example, the Pack ``73d04e61`` contains two data Blobs and one Tree -blob, the plaintext hashes are listed afterwards. The ``length`` field +example, the Pack ``73d04e61`` contains three data Blobs, +the plaintext hashes are listed afterwards. The ``length`` field corresponds to ``Length(encrypted_blob)`` in the pack file header. Field ``uncompressed_length`` is only present for compressed blobs and therefore is never present in version 1 of the repository format. It is From f74e70cc36d52e05d1b7da69d9d694e4b5f6fde9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 8 Jul 2024 19:42:00 +0200 Subject: [PATCH 450/893] s3: forbid anonymous authentication unless explicitly requested --- internal/backend/s3/config.go | 11 ++++++----- internal/backend/s3/s3.go | 13 +++++++++++-- internal/feature/registry.go | 2 ++ 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/internal/backend/s3/config.go b/internal/backend/s3/config.go index 4aea4c3d11b..be2a78ce5cc 100644 --- a/internal/backend/s3/config.go +++ b/internal/backend/s3/config.go @@ -23,11 +23,12 @@ type Config struct { Layout string `option:"layout" help:"use this backend layout (default: auto-detect) (deprecated)"` StorageClass string `option:"storage-class" help:"set S3 storage class (STANDARD, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or REDUCED_REDUNDANCY)"` - Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` - MaxRetries uint `option:"retries" help:"set the number of retries attempted"` - Region string `option:"region" help:"set region"` - BucketLookup string `option:"bucket-lookup" help:"bucket lookup style: 'auto', 'dns', or 'path'"` - ListObjectsV1 bool `option:"list-objects-v1" help:"use deprecated V1 api for ListObjects calls"` + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` + MaxRetries uint `option:"retries" help:"set the number of retries attempted"` + Region string `option:"region" help:"set region"` + BucketLookup string `option:"bucket-lookup" help:"bucket lookup style: 'auto', 'dns', or 'path'"` + ListObjectsV1 bool `option:"list-objects-v1" help:"use deprecated V1 api for ListObjects calls"` + UnsafeAnonymousAuth bool `option:"unsafe-anonymous-auth" help:"use anonymous authentication"` } // NewConfig returns a new Config with the default values filled in. diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index 6fe9e384bc4..019f8471b53 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -98,6 +98,10 @@ func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, erro // getCredentials -- runs through the various credential types and returns the first one that works. // additionally if the user has specified a role to assume, it will do that as well. func getCredentials(cfg Config, tr http.RoundTripper) (*credentials.Credentials, error) { + if cfg.UnsafeAnonymousAuth { + return credentials.New(&credentials.Static{}), nil + } + // Chains all credential types, in the following order: // - Static credentials provided by user // - AWS env vars (i.e. AWS_ACCESS_KEY_ID) @@ -131,9 +135,14 @@ func getCredentials(cfg Config, tr http.RoundTripper) (*credentials.Credentials, } if c.SignerType == credentials.SignatureAnonymous { + // Fail if no credentials were found to prevent repeated attempts to (unsuccessfully) retrieve new credentials. + // The first attempt still has to timeout which slows down restic usage considerably. Thus, migrate towards forcing + // users to explicitly decide between authenticated and anonymous access. + if feature.Flag.Enabled(feature.ExplicitS3AnonymousAuth) { + return nil, fmt.Errorf("no credentials found. Use `-o s3.unsafe-anonymous-auth=true` for anonymous authentication") + } + debug.Log("using anonymous access for %#v", cfg.Endpoint) - // short circuit credentials resolution when using anonymous access - // otherwise the IAM provider would continuously try to (unsuccessfully) retrieve new credentials creds = credentials.New(&credentials.Static{}) } diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 74d8a2f6132..6b8f6b3977d 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -9,6 +9,7 @@ const ( DeprecateLegacyIndex FlagName = "deprecate-legacy-index" DeprecateS3LegacyLayout FlagName = "deprecate-s3-legacy-layout" DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" + ExplicitS3AnonymousAuth FlagName = "explicit-s3-anonymous-auth" SafeForgetKeepTags FlagName = "safe-forget-keep-tags" ) @@ -18,6 +19,7 @@ func init() { DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, DeprecateS3LegacyLayout: {Type: Beta, Description: "disable support for S3 legacy layout used up to restic 0.7.0. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your S3 repository if necessary."}, DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, + ExplicitS3AnonymousAuth: {Type: Beta, Description: "forbid anonymous S3 authentication unless `-o s3.unsafe-anonymous-auth=true` is set"}, SafeForgetKeepTags: {Type: Beta, Description: "prevent deleting all snapshots if the tag passed to `forget --keep-tags tagname` does not exist"}, }) } From dc0db4eda47a366f40b043f2c778e74842b83824 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 8 Jul 2024 19:47:05 +0200 Subject: [PATCH 451/893] add s3 anonymous authentication changelog entry --- changelog/unreleased/issue-4707 | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 changelog/unreleased/issue-4707 diff --git a/changelog/unreleased/issue-4707 b/changelog/unreleased/issue-4707 new file mode 100644 index 00000000000..3c5ffa2ad8f --- /dev/null +++ b/changelog/unreleased/issue-4707 @@ -0,0 +1,14 @@ +Change: Disallow S3 anonymous authentication by default + +When using the S3 backend with anonymous authentication, it continuously tried +to retrieve new authentication credentials, which caused bad performance. + +Now, to use anonymous authentication, it is necessary to pass the option `-o +s3.unsafe-anonymous-auth=true` to restic. + +It is temporarily possible to revert to the old behavior by setting the +environment variable `RESTIC_FEATURES=explicit-s3-anonymous-auth=false`. Note +that this feature flag will be removed in the next minor restic version. + +https://github.com/restic/restic/issues/4707 +https://github.com/restic/restic/pull/4908 From 15419d603dbfa0159963331ad1a7168d35ba5871 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 7 Jul 2024 13:28:56 +0200 Subject: [PATCH 452/893] ls: add missing intermediate directories to --ncdu output --- cmd/restic/cmd_ls.go | 26 ++++++++++++++++++-------- cmd/restic/cmd_ls_integration_test.go | 15 +++++++++------ cmd/restic/cmd_ls_test.go | 4 ++-- 3 files changed, 29 insertions(+), 16 deletions(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 7499da68f66..8dd2af2d67b 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -71,7 +71,7 @@ func init() { type lsPrinter interface { Snapshot(sn *restic.Snapshot) - Node(path string, node *restic.Node) + Node(path string, node *restic.Node, isPrefixDirectory bool) LeaveDir(path string) Close() } @@ -102,7 +102,10 @@ func (p *jsonLsPrinter) Snapshot(sn *restic.Snapshot) { } // Print node in our custom JSON format, followed by a newline. -func (p *jsonLsPrinter) Node(path string, node *restic.Node) { +func (p *jsonLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) { + if isPrefixDirectory { + return + } err := lsNodeJSON(p.enc, path, node) if err != nil { Warnf("JSON encode failed: %v\n", err) @@ -217,7 +220,7 @@ func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { return json.Marshal(outNode) } -func (p *ncduLsPrinter) Node(path string, node *restic.Node) { +func (p *ncduLsPrinter) Node(path string, node *restic.Node, _ bool) { out, err := lsNcduNode(path, node) if err != nil { Warnf("JSON encode failed: %v\n", err) @@ -249,8 +252,10 @@ type textLsPrinter struct { func (p *textLsPrinter) Snapshot(sn *restic.Snapshot) { Verbosef("%v filtered by %v:\n", sn, p.dirs) } -func (p *textLsPrinter) Node(path string, node *restic.Node) { - Printf("%s\n", formatNode(path, node, p.ListLong, p.HumanReadable)) +func (p *textLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) { + if !isPrefixDirectory { + Printf("%s\n", formatNode(path, node, p.ListLong, p.HumanReadable)) + } } func (p *textLsPrinter) LeaveDir(_ string) {} @@ -369,8 +374,8 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri printedDir := false if withinDir(nodepath) { - // if we're within a dir, print the node - printer.Node(nodepath, node) + // if we're within a target path, print the node + printer.Node(nodepath, node, false) printedDir = true // if recursive listing is requested, signal the walker that it @@ -383,12 +388,17 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri // if there's an upcoming match deeper in the tree (but we're not // there yet), signal the walker to descend into any subdirs if approachingMatchingTree(nodepath) { + // print node leading up to the target paths + if !printedDir { + printer.Node(nodepath, node, true) + } return nil } // otherwise, signal the walker to not walk recursively into any // subdirs if node.Type == "dir" { + // immediately generate leaveDir if the directory is skipped if printedDir { printer.LeaveDir(nodepath) } @@ -401,7 +411,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri ProcessNode: processNode, LeaveDir: func(path string) { // the root path `/` has no corresponding node and is thus also skipped by processNode - if withinDir(path) && path != "/" { + if path != "/" { printer.LeaveDir(path) } }, diff --git a/cmd/restic/cmd_ls_integration_test.go b/cmd/restic/cmd_ls_integration_test.go index 1b3c964e4ec..2b742d1b2a4 100644 --- a/cmd/restic/cmd_ls_integration_test.go +++ b/cmd/restic/cmd_ls_integration_test.go @@ -35,13 +35,16 @@ func TestRunLsNcdu(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() - testRunInit(t, env.gopts) + testSetupBackupData(t, env) opts := BackupOptions{} testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) - ncdu := testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true}, []string{"latest"}) - assertIsValidJSON(t, ncdu) - - ncdu = testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true}, []string{"latest", "/testdata"}) - assertIsValidJSON(t, ncdu) + for _, paths := range [][]string{ + {"latest"}, + {"latest", "/testdata"}, + {"latest", "/testdata/0", "/testdata/0/tests"}, + } { + ncdu := testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true}, paths) + assertIsValidJSON(t, ncdu) + } } diff --git a/cmd/restic/cmd_ls_test.go b/cmd/restic/cmd_ls_test.go index 828b2920e64..a59b5f81bb9 100644 --- a/cmd/restic/cmd_ls_test.go +++ b/cmd/restic/cmd_ls_test.go @@ -140,12 +140,12 @@ func TestLsNcdu(t *testing.T) { printer.Node("/directory", &restic.Node{ Type: "dir", Name: "directory", - }) + }, false) printer.Node("/directory/data", &restic.Node{ Type: "file", Name: "data", Size: 42, - }) + }, false) printer.LeaveDir("/directory") printer.Close() From 79c9fc45a9ce3fb98131b6a70d8521744bd75b5f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 7 Jul 2024 13:33:25 +0200 Subject: [PATCH 453/893] ls: fix disk size in --ncdu output Ncdu expects a size in disk blocks. Round up to the next full block. Otherwise, small files with size below 255bytes would be rounded down to 0B by ncdu. --- cmd/restic/cmd_ls.go | 9 ++++++--- cmd/restic/cmd_ls_test.go | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 8dd2af2d67b..6e48b010f70 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -193,10 +193,13 @@ func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { Mtime int64 `json:"mtime"` } + const blockSize = 512 + outNode := NcduNode{ - Name: node.Name, - Asize: node.Size, - Dsize: node.Size, + Name: node.Name, + Asize: node.Size, + // round up to nearest full blocksize + Dsize: (node.Size + blockSize - 1) / blockSize * blockSize, Dev: node.DeviceID, Ino: node.Inode, NLink: node.Links, diff --git a/cmd/restic/cmd_ls_test.go b/cmd/restic/cmd_ls_test.go index a59b5f81bb9..19497505342 100644 --- a/cmd/restic/cmd_ls_test.go +++ b/cmd/restic/cmd_ls_test.go @@ -109,7 +109,7 @@ func TestLsNodeJSON(t *testing.T) { func TestLsNcduNode(t *testing.T) { for i, expect := range []string{ - `{"name":"baz","asize":12345,"dsize":12345,"dev":0,"ino":0,"nlink":1,"notreg":false,"uid":10000000,"gid":20000000,"mode":0,"mtime":-62135596800}`, + `{"name":"baz","asize":12345,"dsize":12800,"dev":0,"ino":0,"nlink":1,"notreg":false,"uid":10000000,"gid":20000000,"mode":0,"mtime":-62135596800}`, `{"name":"empty","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":3840,"notreg":false,"uid":1001,"gid":1001,"mode":0,"mtime":-62135596800}`, `{"name":"link","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":true,"uid":0,"gid":0,"mode":511,"mtime":-62135596800}`, `{"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":493,"mtime":1577934245}`, @@ -152,7 +152,7 @@ func TestLsNcdu(t *testing.T) { rtest.Equals(t, `[1, 2, {"time":"0001-01-01T00:00:00Z","tree":null,"paths":["/example"],"hostname":"host"}, [ {"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800}, - {"name":"data","asize":42,"dsize":42,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800} + {"name":"data","asize":42,"dsize":512,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800} ] ] `, buf.String()) From 5067a40bd829b44b853b4631e79c19f1f257d3f9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 7 Jul 2024 13:45:05 +0200 Subject: [PATCH 454/893] update ncdu changelog --- changelog/unreleased/issue-4549 | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog/unreleased/issue-4549 b/changelog/unreleased/issue-4549 index 4829a9881d4..8f35b02335d 100644 --- a/changelog/unreleased/issue-4549 +++ b/changelog/unreleased/issue-4549 @@ -9,3 +9,4 @@ You can use it as follows: `restic ls latest --ncdu | ncdu -f -` https://github.com/restic/restic/issues/4549 https://github.com/restic/restic/pull/4550 +https://github.com/restic/restic/pull/4911 From 8c7a2f179247b15e9c81a2ef61ccbc44431886b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 18:23:27 +0000 Subject: [PATCH 455/893] build(deps): bump google.golang.org/grpc from 1.64.0 to 1.64.1 Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.64.0 to 1.64.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.64.0...v1.64.1) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: indirect ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 17c2337a011..1e2769c7c63 100644 --- a/go.mod +++ b/go.mod @@ -80,7 +80,7 @@ require ( google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/grpc v1.64.0 // indirect + google.golang.org/grpc v1.64.1 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index b372fcce2d7..e4caba45642 100644 --- a/go.sum +++ b/go.sum @@ -326,8 +326,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= +google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From f5601526279a5eeb7924569c2fd195dccee768dc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 10 Jul 2024 21:07:10 +0200 Subject: [PATCH 456/893] restore: document how to speed up dry-run --- doc/050_restore.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 5f351ae1df3..1a920fad432 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -155,6 +155,10 @@ Files with already up to date content are reported as ``unchanged``. Files whose was modified are ``updated`` and files that are new are shown as ``restored``. Directories and other file types like symlinks are always reported as ``restored``. +To reliably determine which files would be updated, a dry-run also verifies the content of +already existing files according to the specified overwrite behavior. To skip these checks +either specify ``--overwrite never`` or specify a non-existing ``--target`` directory. + Restore using mount =================== From 3b7228c8c1f6c3a7d78941ea8ef8346dd1d00d9e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 17:52:50 +0200 Subject: [PATCH 457/893] explicitly document exit code 1 on error --- cmd/restic/cmd_cache.go | 3 ++- cmd/restic/cmd_cat.go | 3 ++- cmd/restic/cmd_check.go | 3 ++- cmd/restic/cmd_copy.go | 6 ++++++ cmd/restic/cmd_debug.go | 3 ++- cmd/restic/cmd_diff.go | 3 ++- cmd/restic/cmd_dump.go | 3 ++- cmd/restic/cmd_features.go | 3 ++- cmd/restic/cmd_find.go | 3 ++- cmd/restic/cmd_forget.go | 3 ++- cmd/restic/cmd_generate.go | 3 ++- cmd/restic/cmd_init.go | 3 ++- cmd/restic/cmd_key_add.go | 3 ++- cmd/restic/cmd_key_list.go | 3 ++- cmd/restic/cmd_key_passwd.go | 3 ++- cmd/restic/cmd_key_remove.go | 3 ++- cmd/restic/cmd_list.go | 3 ++- cmd/restic/cmd_ls.go | 3 ++- cmd/restic/cmd_migrate.go | 3 ++- cmd/restic/cmd_mount.go | 3 ++- cmd/restic/cmd_options.go | 3 ++- cmd/restic/cmd_prune.go | 3 ++- cmd/restic/cmd_recover.go | 3 ++- cmd/restic/cmd_repair_index.go | 3 ++- cmd/restic/cmd_repair_packs.go | 3 ++- cmd/restic/cmd_repair_snapshots.go | 3 ++- cmd/restic/cmd_restore.go | 3 ++- cmd/restic/cmd_rewrite.go | 3 ++- cmd/restic/cmd_self_update.go | 3 ++- cmd/restic/cmd_snapshots.go | 3 ++- cmd/restic/cmd_stats.go | 3 ++- cmd/restic/cmd_tag.go | 3 ++- cmd/restic/cmd_unlock.go | 3 ++- cmd/restic/cmd_version.go | 3 ++- 34 files changed, 72 insertions(+), 33 deletions(-) diff --git a/cmd/restic/cmd_cache.go b/cmd/restic/cmd_cache.go index 651c65fcf27..e71d383656f 100644 --- a/cmd/restic/cmd_cache.go +++ b/cmd/restic/cmd_cache.go @@ -25,7 +25,8 @@ The "cache" command allows listing and cleaning local cache directories. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index 23205771a18..b830e1a3572 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -21,7 +21,8 @@ The "cat" command is used to print internal objects to stdout. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index f416c9269ed..4b41f11ff8b 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -35,7 +35,8 @@ repository and not use a local cache. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index d12501dd96b..8615458164e 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -30,6 +30,12 @@ This means that copied files, which existed in both the source and destination repository, /may occupy up to twice their space/ in the destination repository. This can be mitigated by the "--copy-chunker-params" option when initializing a new destination repository using the "init" command. + +EXIT STATUS +=========== + +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, RunE: func(cmd *cobra.Command, args []string) error { return runCopy(cmd.Context(), copyOptions, globalOptions, args) diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 1a42995fdaa..2b5fe27c30e 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -43,7 +43,8 @@ is used for debugging purposes only. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index fd54897f14f..73cbee1fcc6 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -39,7 +39,8 @@ snapshot. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 2a98a8ae8e9..d3697264866 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -34,7 +34,8 @@ snapshot. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_features.go b/cmd/restic/cmd_features.go index 8125d3e2628..497013696ea 100644 --- a/cmd/restic/cmd_features.go +++ b/cmd/restic/cmd_features.go @@ -28,7 +28,8 @@ A _deprecated_ feature is always disabled and cannot be enabled. The flag will b EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, Hidden: true, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 59e34c46813..b243017897d 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -33,7 +33,8 @@ restic find --pack 025c1d06 EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 4fd931ff075..26f41b52ffb 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -35,7 +35,8 @@ security considerations. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_generate.go b/cmd/restic/cmd_generate.go index ba710e70830..b5c7cecb589 100644 --- a/cmd/restic/cmd_generate.go +++ b/cmd/restic/cmd_generate.go @@ -18,7 +18,8 @@ and the auto-completion files for bash, fish and zsh). EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_init.go b/cmd/restic/cmd_init.go index e6ea694413e..3c0319e55d8 100644 --- a/cmd/restic/cmd_init.go +++ b/cmd/restic/cmd_init.go @@ -23,7 +23,8 @@ The "init" command initializes a new repository. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index d38991f090c..f438445cfc6 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -19,7 +19,8 @@ The "add" sub-command creates a new key and validates the key. Returns the new k EXIT STATUS =========== -Exit status is 0 if the command is successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, } diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index fcca6055a2b..51333578755 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -23,7 +23,8 @@ used to access the repository. EXIT STATUS =========== -Exit status is 0 if the command is successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index 1a12001099c..16ea9bbf450 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -19,7 +19,8 @@ Returns the new key ID. EXIT STATUS =========== -Exit status is 0 if the command is successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, } diff --git a/cmd/restic/cmd_key_remove.go b/cmd/restic/cmd_key_remove.go index 93babb4f3ec..a8a958f1863 100644 --- a/cmd/restic/cmd_key_remove.go +++ b/cmd/restic/cmd_key_remove.go @@ -20,7 +20,8 @@ removing the current key being used to access the repository. EXIT STATUS =========== -Exit status is 0 if the command is successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 6f443042077..172bf47f63c 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -19,7 +19,8 @@ The "list" command allows listing objects in the repository based on type. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 6e48b010f70..cc253e2c23d 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -39,7 +39,8 @@ a path separator); paths use the forward slash '/' as separator. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index 0f9bdde1ac3..28642a4b494 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -22,7 +22,8 @@ names are specified, these migrations are applied. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 5a10447f36f..be7cdc43121 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -64,7 +64,8 @@ The default path templates are: EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_options.go b/cmd/restic/cmd_options.go index 85e06222025..4cd574b6811 100644 --- a/cmd/restic/cmd_options.go +++ b/cmd/restic/cmd_options.go @@ -17,7 +17,8 @@ The "options" command prints a list of extended options. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, Hidden: true, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index f0bfa2b949a..126a5c37c3f 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -28,7 +28,8 @@ referenced and therefore not needed any more. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index 726f1bf652c..3ea843a71bc 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -22,7 +22,8 @@ It can be used if, for example, a snapshot has been removed by accident with "fo EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index 50ba16e33b9..2c8b59f437a 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -19,7 +19,8 @@ repository. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index ab8c7f47598..4a4f7e4221c 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -23,7 +23,8 @@ the index to remove the damaged pack files and removes the pack files from the r EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index be5ef4ad92d..4594991b9c8 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -37,7 +37,8 @@ snapshot! EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index e21d0bd94e9..ed3caad50f3 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -32,7 +32,8 @@ syntax, where "subfolder" is a path within the snapshot. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 83ace7a11a5..a290744a268 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -38,7 +38,8 @@ use the "prune" command. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_self_update.go b/cmd/restic/cmd_self_update.go index 4b86c416f41..5e5e93f2233 100644 --- a/cmd/restic/cmd_self_update.go +++ b/cmd/restic/cmd_self_update.go @@ -24,7 +24,8 @@ files. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index ea15af019e5..3bd410cea49 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -23,7 +23,8 @@ The "snapshots" command lists all snapshots stored in the repository. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 0f8e45f366c..5717aa2224e 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -49,7 +49,8 @@ Refer to the online manual for more details about each mode. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index 033dc5ebea0..1550b17bd5d 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -25,7 +25,8 @@ When no snapshotID is given, all snapshots matching the host, tag and path filte EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_unlock.go b/cmd/restic/cmd_unlock.go index 6893f3365f8..96eef7e02db 100644 --- a/cmd/restic/cmd_unlock.go +++ b/cmd/restic/cmd_unlock.go @@ -16,7 +16,8 @@ The "unlock" command removes stale locks that have been created by other restic EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_version.go b/cmd/restic/cmd_version.go index e3f9b301039..cd32e24704d 100644 --- a/cmd/restic/cmd_version.go +++ b/cmd/restic/cmd_version.go @@ -18,7 +18,8 @@ and the version of this software. EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. `, DisableAutoGenTag: true, Run: func(_ *cobra.Command, _ []string) { From d76a78aa99cbb78b08582fa6b2de830d3d26ef3c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 17:53:33 +0200 Subject: [PATCH 458/893] return exit code 11 if the repository could not be locked --- cmd/restic/cmd_backup.go | 1 + cmd/restic/cmd_cat.go | 1 + cmd/restic/cmd_check.go | 1 + cmd/restic/cmd_copy.go | 1 + cmd/restic/cmd_debug.go | 1 + cmd/restic/cmd_diff.go | 1 + cmd/restic/cmd_dump.go | 1 + cmd/restic/cmd_find.go | 1 + cmd/restic/cmd_forget.go | 1 + cmd/restic/cmd_key_add.go | 1 + cmd/restic/cmd_key_list.go | 1 + cmd/restic/cmd_key_passwd.go | 1 + cmd/restic/cmd_key_remove.go | 1 + cmd/restic/cmd_list.go | 1 + cmd/restic/cmd_ls.go | 1 + cmd/restic/cmd_migrate.go | 1 + cmd/restic/cmd_mount.go | 1 + cmd/restic/cmd_prune.go | 1 + cmd/restic/cmd_recover.go | 1 + cmd/restic/cmd_repair_index.go | 1 + cmd/restic/cmd_repair_packs.go | 1 + cmd/restic/cmd_repair_snapshots.go | 1 + cmd/restic/cmd_restore.go | 1 + cmd/restic/cmd_rewrite.go | 1 + cmd/restic/cmd_self_update.go | 1 + cmd/restic/cmd_snapshots.go | 1 + cmd/restic/cmd_stats.go | 1 + cmd/restic/cmd_tag.go | 1 + cmd/restic/main.go | 2 ++ 29 files changed, 30 insertions(+) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 434469683a0..4a5d8acd7ca 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -41,6 +41,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was a fatal error (no snapshot created). Exit status is 3 if some source data could not be read (incomplete snapshot created). +Exit status is 11 if the repository is already locked. `, PreRun: func(_ *cobra.Command, _ []string) { if backupOptions.Host == "" { diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index b830e1a3572..46cd0ad6f77 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -23,6 +23,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 4b41f11ff8b..f6f7c5563ee 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -37,6 +37,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 8615458164e..158409f6187 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -36,6 +36,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, RunE: func(cmd *cobra.Command, args []string) error { return runCopy(cmd.Context(), copyOptions, globalOptions, args) diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 2b5fe27c30e..044bf3af5fa 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -45,6 +45,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 73cbee1fcc6..b7c1036c418 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -41,6 +41,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index d3697264866..7a7c3850bae 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -36,6 +36,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index b243017897d..8978297a048 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -35,6 +35,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 26f41b52ffb..104a75a7842 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -37,6 +37,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index f438445cfc6..66fdfd18499 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -21,6 +21,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, } diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index 51333578755..6793489690d 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -25,6 +25,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index 16ea9bbf450..e2c23f9cbe2 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -21,6 +21,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, } diff --git a/cmd/restic/cmd_key_remove.go b/cmd/restic/cmd_key_remove.go index a8a958f1863..761ed5b6dc7 100644 --- a/cmd/restic/cmd_key_remove.go +++ b/cmd/restic/cmd_key_remove.go @@ -22,6 +22,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 172bf47f63c..8a884ab8cb3 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -21,6 +21,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index cc253e2c23d..115cc49e3d2 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -41,6 +41,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index 28642a4b494..f591a1b2196 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -24,6 +24,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index be7cdc43121..52dbf4dacbc 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -66,6 +66,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 126a5c37c3f..36226a16b3e 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -30,6 +30,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index 3ea843a71bc..13b0ee5365b 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -24,6 +24,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index 2c8b59f437a..9d0dd32b3ef 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -21,6 +21,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index 4a4f7e4221c..c75c65915ce 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -25,6 +25,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index 4594991b9c8..dc844c35af9 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -39,6 +39,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index ed3caad50f3..c783a81a671 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -34,6 +34,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index a290744a268..07fcf3e67bb 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -40,6 +40,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_self_update.go b/cmd/restic/cmd_self_update.go index 5e5e93f2233..ce7f8ca99d2 100644 --- a/cmd/restic/cmd_self_update.go +++ b/cmd/restic/cmd_self_update.go @@ -26,6 +26,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index 3bd410cea49..c043d7167d1 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -25,6 +25,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 5717aa2224e..1b00dbfba40 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -51,6 +51,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index 1550b17bd5d..f3c98c27d9f 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -27,6 +27,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/main.go b/cmd/restic/main.go index e847b815674..c61ce6cf320 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -156,6 +156,8 @@ func main() { exitCode = 0 case err == ErrInvalidSourceData: exitCode = 3 + case restic.IsAlreadyLocked(err): + exitCode = 11 case errors.Is(err, context.Canceled): exitCode = 130 default: From 94fdca08c45fe75409e548f4b5d89039c1efe1a4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 10 Jul 2024 21:46:26 +0200 Subject: [PATCH 459/893] return exit code 10 if repository does not exist --- cmd/restic/cmd_backup.go | 1 + cmd/restic/cmd_cat.go | 1 + cmd/restic/cmd_check.go | 1 + cmd/restic/cmd_copy.go | 1 + cmd/restic/cmd_debug.go | 1 + cmd/restic/cmd_diff.go | 1 + cmd/restic/cmd_dump.go | 1 + cmd/restic/cmd_find.go | 1 + cmd/restic/cmd_forget.go | 1 + cmd/restic/cmd_key_add.go | 1 + cmd/restic/cmd_key_list.go | 1 + cmd/restic/cmd_key_passwd.go | 1 + cmd/restic/cmd_key_remove.go | 1 + cmd/restic/cmd_list.go | 1 + cmd/restic/cmd_ls.go | 1 + cmd/restic/cmd_migrate.go | 1 + cmd/restic/cmd_mount.go | 1 + cmd/restic/cmd_prune.go | 1 + cmd/restic/cmd_recover.go | 1 + cmd/restic/cmd_repair_index.go | 1 + cmd/restic/cmd_repair_packs.go | 1 + cmd/restic/cmd_repair_snapshots.go | 1 + cmd/restic/cmd_restore.go | 1 + cmd/restic/cmd_rewrite.go | 1 + cmd/restic/cmd_self_update.go | 1 + cmd/restic/cmd_snapshots.go | 1 + cmd/restic/cmd_stats.go | 1 + cmd/restic/cmd_tag.go | 1 + cmd/restic/global.go | 10 ++++++++++ cmd/restic/main.go | 2 ++ internal/backend/b2/b2.go | 4 +++- internal/backend/backend.go | 3 +++ 32 files changed, 46 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 4a5d8acd7ca..9957b578493 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -41,6 +41,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was a fatal error (no snapshot created). Exit status is 3 if some source data could not be read (incomplete snapshot created). +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, PreRun: func(_ *cobra.Command, _ []string) { diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index 46cd0ad6f77..693c26790dc 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -23,6 +23,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index f6f7c5563ee..9cccc0609fc 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -37,6 +37,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 158409f6187..d7761174a00 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -36,6 +36,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 044bf3af5fa..74c21df24d4 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -45,6 +45,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index b7c1036c418..6488a7c354e 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -41,6 +41,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 7a7c3850bae..7e1efa3ae3d 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -36,6 +36,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 8978297a048..4f9549ca44a 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -35,6 +35,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 104a75a7842..87738b518f1 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -37,6 +37,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index 66fdfd18499..c9f0ef23319 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -21,6 +21,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index 6793489690d..ae751a48751 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -25,6 +25,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index e2c23f9cbe2..723acaaab65 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -21,6 +21,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_key_remove.go b/cmd/restic/cmd_key_remove.go index 761ed5b6dc7..c4c24fdb706 100644 --- a/cmd/restic/cmd_key_remove.go +++ b/cmd/restic/cmd_key_remove.go @@ -22,6 +22,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 8a884ab8cb3..060bca8718d 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -21,6 +21,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 115cc49e3d2..d9a3b0fb82f 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -41,6 +41,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index f591a1b2196..e8998005096 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -24,6 +24,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 52dbf4dacbc..3e0b159bec0 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -66,6 +66,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 36226a16b3e..7e706ccf899 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -30,6 +30,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index 13b0ee5365b..5e4744bb6b7 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -24,6 +24,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index 9d0dd32b3ef..e6b6e9fa500 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -21,6 +21,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index c75c65915ce..b0afefb2de1 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -25,6 +25,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index dc844c35af9..fc221ebea29 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -39,6 +39,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index c783a81a671..89942f4cf93 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -34,6 +34,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 07fcf3e67bb..73bc32f6fa9 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -40,6 +40,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_self_update.go b/cmd/restic/cmd_self_update.go index ce7f8ca99d2..0fce41241dc 100644 --- a/cmd/restic/cmd_self_update.go +++ b/cmd/restic/cmd_self_update.go @@ -26,6 +26,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index c043d7167d1..9112e1b9585 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -25,6 +25,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 1b00dbfba40..8a78d57f76d 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -51,6 +51,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index f3c98c27d9f..ea73955f012 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -27,6 +27,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. `, DisableAutoGenTag: true, diff --git a/cmd/restic/global.go b/cmd/restic/global.go index cd6a683bffe..be485dc3b94 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -43,6 +43,10 @@ import ( "golang.org/x/term" ) +// ErrNoRepository is used to report if opening a repsitory failed due +// to a missing backend storage location or config file +var ErrNoRepository = errors.New("repository does not exist") + var version = "0.16.5-dev (compiled manually)" // TimeFormat is the format used for all timestamps printed by restic. @@ -607,6 +611,9 @@ func innerOpen(ctx context.Context, s string, gopts GlobalOptions, opts options. be, err = factory.Open(ctx, cfg, rt, lim) } + if errors.Is(err, backend.ErrNoRepository) { + return nil, fmt.Errorf("Fatal: %w at %v: %v", ErrNoRepository, location.StripPassword(gopts.backends, s), err) + } if err != nil { return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.backends, s), err) } @@ -635,6 +642,9 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio // check if config is there fi, err := be.Stat(ctx, backend.Handle{Type: restic.ConfigFile}) + if be.IsNotExist(err) { + return nil, fmt.Errorf("Fatal: %w: unable to open config file: %v\nIs there a repository at the following location?\n%v", ErrNoRepository, err, location.StripPassword(gopts.backends, s)) + } if err != nil { return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, location.StripPassword(gopts.backends, s)) } diff --git a/cmd/restic/main.go b/cmd/restic/main.go index c61ce6cf320..5818221a587 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -156,6 +156,8 @@ func main() { exitCode = 0 case err == ErrInvalidSourceData: exitCode = 3 + case errors.Is(err, ErrNoRepository): + exitCode = 10 case restic.IsAlreadyLocked(err): exitCode = 11 case errors.Is(err, context.Canceled): diff --git a/internal/backend/b2/b2.go b/internal/backend/b2/b2.go index 1a5e72aaa71..9717cdd0eaa 100644 --- a/internal/backend/b2/b2.go +++ b/internal/backend/b2/b2.go @@ -100,7 +100,9 @@ func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backen } bucket, err := client.Bucket(ctx, cfg.Bucket) - if err != nil { + if b2.IsNotExist(err) { + return nil, backend.ErrNoRepository + } else if err != nil { return nil, errors.Wrap(err, "Bucket") } diff --git a/internal/backend/backend.go b/internal/backend/backend.go index 3b0599c3076..f606e1123e8 100644 --- a/internal/backend/backend.go +++ b/internal/backend/backend.go @@ -2,10 +2,13 @@ package backend import ( "context" + "fmt" "hash" "io" ) +var ErrNoRepository = fmt.Errorf("repository does not exist") + // Backend is used to store and access data. // // Backend operations that return an error will be retried when a Backend is From 294c8524edacf55ec447ab678e28f994ab2c1608 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 10 Jul 2024 21:47:03 +0200 Subject: [PATCH 460/893] doc: document exit codes --- doc/075_scripting.rst | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index e11f280dbb4..ae876c9ea3b 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -38,6 +38,33 @@ to ``cat config``) and it may print a different error message. If there are no errors, restic will return a zero exit code and print the repository metadata. +Exit codes +********** + +Restic commands return an exit code that signals whether the command was successful. +The following table provides a general description, see the help of each command for +a more specific description. + +.. warning:: + New exit codes will be added over time. If an unknown exit code is returned, then it + MUST be treated as a command failure. + ++-----+----------------------------------------------------+ +| 0 | Command was successful | ++-----+----------------------------------------------------+ +| 1 | Command failed, see command help for more details | ++-----+----------------------------------------------------+ +| 2 | Go runtime error | ++-----+----------------------------------------------------+ +| 3 | ``backup`` command could not read some source data | ++-----+----------------------------------------------------+ +| 10 | Repository does not exist (since restic 0.17.0) | ++-----+----------------------------------------------------+ +| 11 | Failed to lock repository (since restic 0.17.0) | ++-----+----------------------------------------------------+ +| 130 | Restic was interrupted using SIGINT or SIGSTOP | ++-----+----------------------------------------------------+ + JSON output *********** From 87316671563299972d7a5d7469dff17bcf07cbe2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 10 Jul 2024 22:08:49 +0200 Subject: [PATCH 461/893] restore: do not delete target if it is a file This is implicitly achieved by creating the target directory as the first step, which will fail if a file already exist in its place. --- internal/restorer/restorer.go | 8 ++++++++ internal/restorer/restorer_test.go | 22 ++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 37072d9a99c..a47fd5ef612 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -347,6 +347,14 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { } } + if !res.opts.DryRun { + // ensure that the target directory exists and is actually a directory + // Using ensureDir is too aggressive here as it also removes unexpected files + if err := fs.MkdirAll(dst, 0700); err != nil { + return fmt.Errorf("cannot create target directory: %w", err) + } + } + idx := NewHardlinkIndex[string]() filerestorer := newFileRestorer(dst, res.repo.LoadBlobsFromPack, res.repo.LookupBlob, res.repo.Connections(), res.opts.Sparse, res.opts.Delete, res.opts.Progress) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index a343bda2cb9..2f28265ccd0 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -1372,3 +1372,25 @@ func TestRestoreDelete(t *testing.T) { }) } } + +func TestRestoreToFile(t *testing.T) { + snapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n"}, + }, + } + + repo := repository.TestRepository(t) + tempdir := filepath.Join(rtest.TempDir(t), "target") + + // create a file in the place of the target directory + rtest.OK(t, os.WriteFile(tempdir, []byte{}, 0o700)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) + res := NewRestorer(repo, sn, Options{}) + err := res.RestoreTo(ctx, tempdir) + rtest.Assert(t, strings.Contains(err.Error(), "cannot create target directory"), "unexpected error %v", err) +} From 54ca27a6f2345d4f2fc4e1b60319078bf7d02761 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 19:13:00 +0200 Subject: [PATCH 462/893] doc: update init check with exit code 10 --- doc/075_scripting.rst | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index ae876c9ea3b..87ae4fcf46f 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -21,21 +21,22 @@ Check if a repository is already initialized ******************************************** You may find a need to check if a repository is already initialized, -perhaps to prevent your script from initializing a repository multiple -times. The command ``cat config`` may be used for this purpose: +perhaps to prevent your script from trying to initialize a repository multiple +times (the ``init`` command contains a check to prevent overwriting existing +repositories). The command ``cat config`` may be used for this purpose: .. code-block:: console $ restic -r /srv/restic-repo cat config - Fatal: unable to open config file: stat /srv/restic-repo/config: no such file or directory + Fatal: repository does not exist: unable to open config file: stat /srv/restic-repo/config: no such file or directory Is there a repository at the following location? /srv/restic-repo -If a repository does not exist, restic will return a non-zero exit code -and print an error message. Note that restic will also return a non-zero -exit code if a different error is encountered (e.g.: incorrect password -to ``cat config``) and it may print a different error message. If there -are no errors, restic will return a zero exit code and print the repository +If a repository does not exist, restic (since 0.17.0) will return exit code ``10`` +and print a corresponding error message. Older versions return exit code ``1``. +Note that restic will also return exit code ``1`` if a different error is encountered +(e.g.: incorrect password to ``cat config``) and it may print a different error message. +If there are no errors, restic will return a zero exit code and print the repository metadata. Exit codes From 36cc62075cbf5f44e06f531ebb598e3710e1be60 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 30 Jun 2024 19:25:48 +0200 Subject: [PATCH 463/893] add exit code changelog --- changelog/unreleased/pull-4884 | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 changelog/unreleased/pull-4884 diff --git a/changelog/unreleased/pull-4884 b/changelog/unreleased/pull-4884 new file mode 100644 index 00000000000..ff2bff96756 --- /dev/null +++ b/changelog/unreleased/pull-4884 @@ -0,0 +1,11 @@ +Change: return exit code 10 or 11 if repository does not exist or is locked + +If a repository does not exist or cannot be locked, then restic always returned +exit code 1. This made it difficult to distinguish these cases from other +errors. + +Now, restic returns exit code 10 if the repository does not exist and exit code +11 if the repository could be not locked due to a conflicting lock. + +https://github.com/restic/restic/issues/956 +https://github.com/restic/restic/pull/4884 From 9f66065237be7a80eb925e43ba9c613f7b3550ea Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 12 Jul 2024 21:22:37 +0200 Subject: [PATCH 464/893] ls: fix handling of toplevel directories in ncdu output --- cmd/restic/cmd_ls.go | 4 ++-- cmd/restic/cmd_ls_integration_test.go | 11 ++++++----- cmd/restic/cmd_ls_test.go | 3 ++- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index d9a3b0fb82f..13bc5040606 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -178,7 +178,7 @@ func (p *ncduLsPrinter) Snapshot(sn *restic.Snapshot) { Warnf("JSON encode failed: %v\n", err) } p.depth++ - fmt.Fprintf(p.out, "[%d, %d, %s", NcduMajorVer, NcduMinorVer, string(snapshotBytes)) + fmt.Fprintf(p.out, "[%d, %d, %s, [{\"name\":\"\"}", NcduMajorVer, NcduMinorVer, string(snapshotBytes)) } func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { @@ -246,7 +246,7 @@ func (p *ncduLsPrinter) LeaveDir(_ string) { } func (p *ncduLsPrinter) Close() { - fmt.Fprint(p.out, "\n]\n") + fmt.Fprint(p.out, "\n]\n]\n") } type textLsPrinter struct { diff --git a/cmd/restic/cmd_ls_integration_test.go b/cmd/restic/cmd_ls_integration_test.go index 2b742d1b2a4..f5655bdff9b 100644 --- a/cmd/restic/cmd_ls_integration_test.go +++ b/cmd/restic/cmd_ls_integration_test.go @@ -3,7 +3,6 @@ package main import ( "context" "encoding/json" - "path/filepath" "strings" "testing" @@ -26,9 +25,10 @@ func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string { func assertIsValidJSON(t *testing.T, data []byte) { // Sanity check: output must be valid JSON. - var v interface{} + var v []any err := json.Unmarshal(data, &v) rtest.OK(t, err) + rtest.Assert(t, len(v) == 4, "invalid ncdu output, expected 4 array elements, got %v", len(v)) } func TestRunLsNcdu(t *testing.T) { @@ -37,12 +37,13 @@ func TestRunLsNcdu(t *testing.T) { testSetupBackupData(t, env) opts := BackupOptions{} - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + // backup such that there are multiple toplevel elements + testRunBackup(t, env.testdata+"/0", []string{"."}, opts, env.gopts) for _, paths := range [][]string{ {"latest"}, - {"latest", "/testdata"}, - {"latest", "/testdata/0", "/testdata/0/tests"}, + {"latest", "/0"}, + {"latest", "/0", "/0/9"}, } { ncdu := testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true}, paths) assertIsValidJSON(t, ncdu) diff --git a/cmd/restic/cmd_ls_test.go b/cmd/restic/cmd_ls_test.go index 19497505342..8523f702f18 100644 --- a/cmd/restic/cmd_ls_test.go +++ b/cmd/restic/cmd_ls_test.go @@ -149,11 +149,12 @@ func TestLsNcdu(t *testing.T) { printer.LeaveDir("/directory") printer.Close() - rtest.Equals(t, `[1, 2, {"time":"0001-01-01T00:00:00Z","tree":null,"paths":["/example"],"hostname":"host"}, + rtest.Equals(t, `[1, 2, {"time":"0001-01-01T00:00:00Z","tree":null,"paths":["/example"],"hostname":"host"}, [{"name":""}, [ {"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800}, {"name":"data","asize":42,"dsize":512,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800} ] ] +] `, buf.String()) } From 31887ea9aaaf7a8a4766d48e972ebfd0d50d20dd Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 12 Jul 2024 22:24:35 +0200 Subject: [PATCH 465/893] restore: fix hang on command cancelation --- internal/restorer/filerestorer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index 56059cb16f7..d2a4ba068af 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -192,6 +192,7 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { // the main restore loop wg.Go(func() error { + defer close(downloadCh) for _, id := range packOrder { pack := packs[id] // allow garbage collection of packInfo @@ -203,7 +204,6 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { debug.Log("Scheduled download pack %s", pack.id.Str()) } } - close(downloadCh) return nil }) From 40e5163114f2b164286bff0272de1aa1847c3172 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 12 Jul 2024 22:26:22 +0200 Subject: [PATCH 466/893] restore: properly cancel file verification --- internal/restorer/restorer.go | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index a47fd5ef612..29401e5e950 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -158,6 +158,10 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str filenames = make([]string, 0, len(tree.Nodes)) } for i, node := range tree.Nodes { + if ctx.Err() != nil { + return nil, hasRestored, ctx.Err() + } + // allow GC of tree node tree.Nodes[i] = nil if res.opts.Delete { @@ -394,7 +398,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { idx.Add(node.Inode, node.DeviceID, location) } - buf, err = res.withOverwriteCheck(node, target, location, false, buf, func(updateMetadataOnly bool, matches *fileState) error { + buf, err = res.withOverwriteCheck(ctx, node, target, location, false, buf, func(updateMetadataOnly bool, matches *fileState) error { if updateMetadataOnly { res.opts.Progress.AddSkippedFile(location, node.Size) } else { @@ -434,14 +438,14 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { visitNode: func(node *restic.Node, target, location string) error { debug.Log("second pass, visitNode: restore node %q", location) if node.Type != "file" { - _, err := res.withOverwriteCheck(node, target, location, false, nil, func(_ bool, _ *fileState) error { + _, err := res.withOverwriteCheck(ctx, node, target, location, false, nil, func(_ bool, _ *fileState) error { return res.restoreNodeTo(ctx, node, target, location) }) return err } if idx.Has(node.Inode, node.DeviceID) && idx.Value(node.Inode, node.DeviceID) != location { - _, err := res.withOverwriteCheck(node, target, location, true, nil, func(_ bool, _ *fileState) error { + _, err := res.withOverwriteCheck(ctx, node, target, location, true, nil, func(_ bool, _ *fileState) error { return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.Value(node.Inode, node.DeviceID)), target, location) }) return err @@ -528,7 +532,7 @@ func (res *Restorer) hasRestoredFile(location string) (metadataOnly bool, ok boo return metadataOnly, ok } -func (res *Restorer) withOverwriteCheck(node *restic.Node, target, location string, isHardlink bool, buf []byte, cb func(updateMetadataOnly bool, matches *fileState) error) ([]byte, error) { +func (res *Restorer) withOverwriteCheck(ctx context.Context, node *restic.Node, target, location string, isHardlink bool, buf []byte, cb func(updateMetadataOnly bool, matches *fileState) error) ([]byte, error) { overwrite, err := shouldOverwrite(res.opts.Overwrite, node, target) if err != nil { return buf, err @@ -545,7 +549,7 @@ func (res *Restorer) withOverwriteCheck(node *restic.Node, target, location stri updateMetadataOnly := false if node.Type == "file" && !isHardlink { // if a file fails to verify, then matches is nil which results in restoring from scratch - matches, buf, _ = res.verifyFile(target, node, false, res.opts.Overwrite == OverwriteIfChanged, buf) + matches, buf, _ = res.verifyFile(ctx, target, node, false, res.opts.Overwrite == OverwriteIfChanged, buf) // skip files that are already correct completely updateMetadataOnly = !matches.NeedsRestore() } @@ -628,7 +632,7 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { g.Go(func() (err error) { var buf []byte for job := range work { - _, buf, err = res.verifyFile(job.path, job.node, true, false, buf) + _, buf, err = res.verifyFile(ctx, job.path, job.node, true, false, buf) if err != nil { err = res.Error(job.path, err) } @@ -676,7 +680,7 @@ func (s *fileState) HasMatchingBlob(i int) bool { // buf and the first return value are scratch space, passed around for reuse. // Reusing buffers prevents the verifier goroutines allocating all of RAM and // flushing the filesystem cache (at least on Linux). -func (res *Restorer) verifyFile(target string, node *restic.Node, failFast bool, trustMtime bool, buf []byte) (*fileState, []byte, error) { +func (res *Restorer) verifyFile(ctx context.Context, target string, node *restic.Node, failFast bool, trustMtime bool, buf []byte) (*fileState, []byte, error) { f, err := fs.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) if err != nil { return nil, buf, err @@ -707,6 +711,9 @@ func (res *Restorer) verifyFile(target string, node *restic.Node, failFast bool, matches := make([]bool, len(node.Content)) var offset int64 for i, blobID := range node.Content { + if ctx.Err() != nil { + return nil, buf, ctx.Err() + } length, found := res.repo.LookupBlobSize(restic.DataBlob, blobID) if !found { return nil, buf, errors.Errorf("Unable to fetch blob %s", blobID) From 2971a769daa5b899151b0e6e78751aa3d5af89dc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 12 Jul 2024 22:27:00 +0200 Subject: [PATCH 467/893] restore: fix corrupt restore of partially up to date files --- internal/restorer/filerestorer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index d2a4ba068af..f0983e003f0 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -136,8 +136,8 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) { if largeFile && !file.state.HasMatchingBlob(idx) { packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset}) - fileOffset += int64(blob.DataLength()) } + fileOffset += int64(blob.DataLength()) pack, ok := packs[packID] if !ok { pack = &packInfo{ From ff9238ebf16866d6a5c8bb02f605ed7daddeea9a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 12 Jul 2024 23:46:50 +0200 Subject: [PATCH 468/893] ls: use / as top-level dir name in ncdu output --- cmd/restic/cmd_ls.go | 2 +- cmd/restic/cmd_ls_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 13bc5040606..dede03a018f 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -178,7 +178,7 @@ func (p *ncduLsPrinter) Snapshot(sn *restic.Snapshot) { Warnf("JSON encode failed: %v\n", err) } p.depth++ - fmt.Fprintf(p.out, "[%d, %d, %s, [{\"name\":\"\"}", NcduMajorVer, NcduMinorVer, string(snapshotBytes)) + fmt.Fprintf(p.out, "[%d, %d, %s, [{\"name\":\"/\"}", NcduMajorVer, NcduMinorVer, string(snapshotBytes)) } func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { diff --git a/cmd/restic/cmd_ls_test.go b/cmd/restic/cmd_ls_test.go index 8523f702f18..19b30479e3f 100644 --- a/cmd/restic/cmd_ls_test.go +++ b/cmd/restic/cmd_ls_test.go @@ -149,7 +149,7 @@ func TestLsNcdu(t *testing.T) { printer.LeaveDir("/directory") printer.Close() - rtest.Equals(t, `[1, 2, {"time":"0001-01-01T00:00:00Z","tree":null,"paths":["/example"],"hostname":"host"}, [{"name":""}, + rtest.Equals(t, `[1, 2, {"time":"0001-01-01T00:00:00Z","tree":null,"paths":["/example"],"hostname":"host"}, [{"name":"/"}, [ {"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800}, {"name":"data","asize":42,"dsize":512,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800} From 5671dfb481e272b48c6f7a9e5fbd76e4c7c3f1e9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 13 Jul 2024 10:21:02 +0200 Subject: [PATCH 469/893] ls: fix ncdu output for file with timestamp before 1970 --- cmd/restic/cmd_ls.go | 4 ++++ cmd/restic/cmd_ls_test.go | 25 ++++++++++++++----------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index dede03a018f..76e192b6c9e 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -222,6 +222,10 @@ func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { if node.Mode&os.ModeSticky != 0 { outNode.Mode |= 0o1000 } + if outNode.Mtime < 0 { + // ncdu does not allow negative times + outNode.Mtime = 0 + } return json.Marshal(outNode) } diff --git a/cmd/restic/cmd_ls_test.go b/cmd/restic/cmd_ls_test.go index 19b30479e3f..21f3d6212c5 100644 --- a/cmd/restic/cmd_ls_test.go +++ b/cmd/restic/cmd_ls_test.go @@ -109,11 +109,11 @@ func TestLsNodeJSON(t *testing.T) { func TestLsNcduNode(t *testing.T) { for i, expect := range []string{ - `{"name":"baz","asize":12345,"dsize":12800,"dev":0,"ino":0,"nlink":1,"notreg":false,"uid":10000000,"gid":20000000,"mode":0,"mtime":-62135596800}`, - `{"name":"empty","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":3840,"notreg":false,"uid":1001,"gid":1001,"mode":0,"mtime":-62135596800}`, - `{"name":"link","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":true,"uid":0,"gid":0,"mode":511,"mtime":-62135596800}`, + `{"name":"baz","asize":12345,"dsize":12800,"dev":0,"ino":0,"nlink":1,"notreg":false,"uid":10000000,"gid":20000000,"mode":0,"mtime":0}`, + `{"name":"empty","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":3840,"notreg":false,"uid":1001,"gid":1001,"mode":0,"mtime":0}`, + `{"name":"link","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":true,"uid":0,"gid":0,"mode":511,"mtime":0}`, `{"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":493,"mtime":1577934245}`, - `{"name":"sticky","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":4077,"mtime":-62135596800}`, + `{"name":"sticky","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":4077,"mtime":0}`, } { c := lsTestNodes[i] out, err := lsNcduNode(c.path, &c.Node) @@ -132,27 +132,30 @@ func TestLsNcdu(t *testing.T) { printer := &ncduLsPrinter{ out: &buf, } + modTime := time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC) printer.Snapshot(&restic.Snapshot{ Hostname: "host", Paths: []string{"/example"}, }) printer.Node("/directory", &restic.Node{ - Type: "dir", - Name: "directory", + Type: "dir", + Name: "directory", + ModTime: modTime, }, false) printer.Node("/directory/data", &restic.Node{ - Type: "file", - Name: "data", - Size: 42, + Type: "file", + Name: "data", + Size: 42, + ModTime: modTime, }, false) printer.LeaveDir("/directory") printer.Close() rtest.Equals(t, `[1, 2, {"time":"0001-01-01T00:00:00Z","tree":null,"paths":["/example"],"hostname":"host"}, [{"name":"/"}, [ - {"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800}, - {"name":"data","asize":42,"dsize":512,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":-62135596800} + {"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":1577934245}, + {"name":"data","asize":42,"dsize":512,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":1577934245} ] ] ] From bdf24dc8f91157fceff483d31e510d555491f77f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 13 Jul 2024 10:22:01 +0200 Subject: [PATCH 470/893] ls: extend test sample with a second toplevel file --- cmd/restic/cmd_ls_test.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_ls_test.go b/cmd/restic/cmd_ls_test.go index 21f3d6212c5..a1fcd479bdd 100644 --- a/cmd/restic/cmd_ls_test.go +++ b/cmd/restic/cmd_ls_test.go @@ -150,13 +150,20 @@ func TestLsNcdu(t *testing.T) { ModTime: modTime, }, false) printer.LeaveDir("/directory") + printer.Node("/file", &restic.Node{ + Type: "file", + Name: "file", + Size: 12345, + ModTime: modTime, + }, false) printer.Close() rtest.Equals(t, `[1, 2, {"time":"0001-01-01T00:00:00Z","tree":null,"paths":["/example"],"hostname":"host"}, [{"name":"/"}, [ {"name":"directory","asize":0,"dsize":0,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":1577934245}, {"name":"data","asize":42,"dsize":512,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":1577934245} - ] + ], + {"name":"file","asize":12345,"dsize":12800,"dev":0,"ino":0,"nlink":0,"notreg":false,"uid":0,"gid":0,"mode":0,"mtime":1577934245} ] ] `, buf.String()) From f64191da9c98feff632d665e6993104f6470cbd7 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 12 Jul 2024 22:27:32 +0200 Subject: [PATCH 471/893] restore: improve reporting of cancelation errors --- internal/restorer/filerestorer.go | 9 ++++-- internal/restorer/restorer.go | 51 ++++++++++++------------------- 2 files changed, 26 insertions(+), 34 deletions(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index f0983e003f0..fec8c87803f 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -273,10 +273,13 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error { } func (r *fileRestorer) sanitizeError(file *fileInfo, err error) error { - if err != nil { - err = r.Error(file.location, err) + switch err { + case nil, context.Canceled, context.DeadlineExceeded: + // Context errors are permanent. + return err + default: + return r.Error(file.location, err) } - return err } func (r *fileRestorer) reportError(blobs blobToFileOffsetsMapping, processedBlobs restic.BlobSet, err error) error { diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 29401e5e950..9d5a5a11451 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -115,22 +115,23 @@ type treeVisitor struct { leaveDir func(node *restic.Node, target, location string, entries []string) error } +func (res *Restorer) sanitizeError(location string, err error) error { + switch err { + case nil, context.Canceled, context.DeadlineExceeded: + // Context errors are permanent. + return err + default: + return res.Error(location, err) + } +} + // traverseTree traverses a tree from the repo and calls treeVisitor. // target is the path in the file system, location within the snapshot. func (res *Restorer) traverseTree(ctx context.Context, target string, treeID restic.ID, visitor treeVisitor) error { location := string(filepath.Separator) - sanitizeError := func(err error) error { - switch err { - case nil, context.Canceled, context.DeadlineExceeded: - // Context errors are permanent. - return err - default: - return res.Error(location, err) - } - } if visitor.enterDir != nil { - err := sanitizeError(visitor.enterDir(nil, target, location)) + err := res.sanitizeError(location, visitor.enterDir(nil, target, location)) if err != nil { return err } @@ -140,7 +141,7 @@ func (res *Restorer) traverseTree(ctx context.Context, target string, treeID res return err } if hasRestored && visitor.leaveDir != nil { - err = sanitizeError(visitor.leaveDir(nil, target, location, childFilenames)) + err = res.sanitizeError(location, visitor.leaveDir(nil, target, location, childFilenames)) } return err @@ -151,7 +152,7 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str tree, err := restic.LoadTree(ctx, res.repo, treeID) if err != nil { debug.Log("error loading tree %v: %v", treeID, err) - return nil, hasRestored, res.Error(location, err) + return nil, hasRestored, res.sanitizeError(location, err) } if res.opts.Delete { @@ -175,7 +176,7 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str nodeName := filepath.Base(filepath.Join(string(filepath.Separator), node.Name)) if nodeName != node.Name { debug.Log("node %q has invalid name %q", node.Name, nodeName) - err := res.Error(location, errors.Errorf("invalid child node name %s", node.Name)) + err := res.sanitizeError(location, errors.Errorf("invalid child node name %s", node.Name)) if err != nil { return nil, hasRestored, err } @@ -190,7 +191,7 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str if target == nodeTarget || !fs.HasPathPrefix(target, nodeTarget) { debug.Log("target: %v %v", target, nodeTarget) debug.Log("node %q has invalid target path %q", node.Name, nodeTarget) - err := res.Error(nodeLocation, errors.New("node has invalid path")) + err := res.sanitizeError(nodeLocation, errors.New("node has invalid path")) if err != nil { return nil, hasRestored, err } @@ -211,23 +212,13 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str hasRestored = true } - sanitizeError := func(err error) error { - switch err { - case nil, context.Canceled, context.DeadlineExceeded: - // Context errors are permanent. - return err - default: - return res.Error(nodeLocation, err) - } - } - if node.Type == "dir" { if node.Subtree == nil { return nil, hasRestored, errors.Errorf("Dir without subtree in tree %v", treeID.Str()) } if selectedForRestore && visitor.enterDir != nil { - err = sanitizeError(visitor.enterDir(node, nodeTarget, nodeLocation)) + err = res.sanitizeError(nodeLocation, visitor.enterDir(node, nodeTarget, nodeLocation)) if err != nil { return nil, hasRestored, err } @@ -240,7 +231,7 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str if childMayBeSelected { childFilenames, childHasRestored, err = res.traverseTreeInner(ctx, nodeTarget, nodeLocation, *node.Subtree, visitor) - err = sanitizeError(err) + err = res.sanitizeError(nodeLocation, err) if err != nil { return nil, hasRestored, err } @@ -253,7 +244,7 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str // metadata need to be restore when leaving the directory in both cases // selected for restore or any child of any subtree have been restored if (selectedForRestore || childHasRestored) && visitor.leaveDir != nil { - err = sanitizeError(visitor.leaveDir(node, nodeTarget, nodeLocation, childFilenames)) + err = res.sanitizeError(nodeLocation, visitor.leaveDir(node, nodeTarget, nodeLocation, childFilenames)) if err != nil { return nil, hasRestored, err } @@ -263,7 +254,7 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str } if selectedForRestore { - err = sanitizeError(visitor.visitNode(node, nodeTarget, nodeLocation)) + err = res.sanitizeError(nodeLocation, visitor.visitNode(node, nodeTarget, nodeLocation)) if err != nil { return nil, hasRestored, err } @@ -633,9 +624,7 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { var buf []byte for job := range work { _, buf, err = res.verifyFile(ctx, job.path, job.node, true, false, buf) - if err != nil { - err = res.Error(job.path, err) - } + err = res.sanitizeError(job.path, err) if err != nil || ctx.Err() != nil { break } From c77b2d5ca20a7b49c7302d62a5970a720a7696bf Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 12 Jul 2024 22:29:56 +0200 Subject: [PATCH 472/893] restore: avoid long cancelation delay for frequently used blobs --- internal/restorer/filerestorer.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index fec8c87803f..01d3e43f7fe 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -327,6 +327,11 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID, } for file, offsets := range blob.files { for _, offset := range offsets { + // avoid long cancelation delays for frequently used blobs + if ctx.Err() != nil { + return ctx.Err() + } + writeToFile := func() error { // this looks overly complicated and needs explanation // two competing requirements: From 2833b2f6995d3a17b9c91638519ba028bd39f04a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 12 Jul 2024 22:59:01 +0200 Subject: [PATCH 473/893] restore: fix progress bar for partially up to date files --- internal/restorer/filerestorer.go | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index 01d3e43f7fe..6486333615b 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -134,8 +134,12 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { } fileOffset := int64(0) err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) { - if largeFile && !file.state.HasMatchingBlob(idx) { - packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset}) + if largeFile { + if !file.state.HasMatchingBlob(idx) { + packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset}) + } else { + r.reportBlobProgress(file, uint64(blob.DataLength())) + } } fileOffset += int64(blob.DataLength()) pack, ok := packs[packID] @@ -244,8 +248,12 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error { if fileBlobs, ok := file.blobs.(restic.IDs); ok { fileOffset := int64(0) err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) { - if packID.Equal(pack.id) && !file.state.HasMatchingBlob(idx) { - addBlob(blob, fileOffset) + if packID.Equal(pack.id) { + if !file.state.HasMatchingBlob(idx) { + addBlob(blob, fileOffset) + } else { + r.reportBlobProgress(file, uint64(blob.DataLength())) + } } fileOffset += int64(blob.DataLength()) }) @@ -349,11 +357,7 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID, createSize = file.size } writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse) - action := restore.ActionFileUpdated - if file.state == nil { - action = restore.ActionFileRestored - } - r.progress.AddProgress(file.location, action, uint64(len(blobData)), uint64(file.size)) + r.reportBlobProgress(file, uint64(len(blobData))) return writeErr } err := r.sanitizeError(file, writeToFile()) @@ -365,3 +369,11 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID, return nil }) } + +func (r *fileRestorer) reportBlobProgress(file *fileInfo, blobSize uint64) { + action := restore.ActionFileUpdated + if file.state == nil { + action = restore.ActionFileRestored + } + r.progress.AddProgress(file.location, action, uint64(blobSize), uint64(file.size)) +} From 26aa65e0d40baa2f8344597d0119d6cf9f6dba0d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 12 Jul 2024 23:14:03 +0200 Subject: [PATCH 474/893] restore: add regression test for corrupt in-place restore of large file --- internal/restorer/restorer_test.go | 49 ++++++++++++++++++++++++------ 1 file changed, 39 insertions(+), 10 deletions(-) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 2f28265ccd0..cd2e954e525 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "math" "os" @@ -32,6 +33,7 @@ type Snapshot struct { type File struct { Data string + DataParts []string Links uint64 Inode uint64 Mode os.FileMode @@ -59,11 +61,11 @@ type FileAttributes struct { Encrypted bool } -func saveFile(t testing.TB, repo restic.BlobSaver, node File) restic.ID { +func saveFile(t testing.TB, repo restic.BlobSaver, data string) restic.ID { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - id, _, _, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(node.Data), restic.ID{}, false) + id, _, _, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(data), restic.ID{}, false) if err != nil { t.Fatal(err) } @@ -80,17 +82,24 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u inode++ switch node := n.(type) { case File: - fi := n.(File).Inode + fi := node.Inode if fi == 0 { fi = inode } - lc := n.(File).Links + lc := node.Links if lc == 0 { lc = 1 } fc := []restic.ID{} - if len(n.(File).Data) > 0 { - fc = append(fc, saveFile(t, repo, node)) + size := 0 + if len(node.Data) > 0 { + size = len(node.Data) + fc = append(fc, saveFile(t, repo, node.Data)) + } else if len(node.DataParts) > 0 { + for _, part := range node.DataParts { + fc = append(fc, saveFile(t, repo, part)) + size += len(part) + } } mode := node.Mode if mode == 0 { @@ -104,22 +113,21 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Content: fc, - Size: uint64(len(n.(File).Data)), + Size: uint64(size), Inode: fi, Links: lc, GenericAttributes: getGenericAttributes(node.attributes, false), }) rtest.OK(t, err) case Symlink: - symlink := n.(Symlink) err := tree.Insert(&restic.Node{ Type: "symlink", Mode: os.ModeSymlink | 0o777, - ModTime: symlink.ModTime, + ModTime: node.ModTime, Name: name, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), - LinkTarget: symlink.Target, + LinkTarget: node.Target, Inode: inode, Links: 1, }) @@ -1050,6 +1058,27 @@ func TestRestorerOverwriteBehavior(t *testing.T) { } } +func TestRestorerOverwriteLarge(t *testing.T) { + parts := make([]string, 100) + for i := 0; i < len(parts); i++ { + parts[i] = fmt.Sprint(i) + } + + baseTime := time.Now() + baseSnapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{DataParts: parts[0:5], ModTime: baseTime}, + }, + } + overwriteSnapshot := Snapshot{ + Nodes: map[string]Node{ + "foo": File{DataParts: parts, ModTime: baseTime}, + }, + } + + saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{Overwrite: OverwriteAlways}) +} + func TestRestorerOverwriteSpecial(t *testing.T) { baseTime := time.Now() baseSnapshot := Snapshot{ From 98cfb2c4c8abd199a6e4b6a17c4396af25cef627 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 12 Jul 2024 23:26:18 +0200 Subject: [PATCH 475/893] restore: test progress reporting for partially up to date files --- internal/restorer/restorer_test.go | 81 ++++++++++++++++++++++--- internal/restorer/restorer_unix_test.go | 12 ---- 2 files changed, 74 insertions(+), 19 deletions(-) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index cd2e954e525..c809c9e209d 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -22,6 +22,7 @@ import ( "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + restoreui "github.com/restic/restic/internal/ui/restore" "golang.org/x/sync/errgroup" ) @@ -940,7 +941,7 @@ func TestRestorerSparseFiles(t *testing.T) { len(zeros), blocks, 100*sparsity) } -func saveSnapshotsAndOverwrite(t *testing.T, baseSnapshot Snapshot, overwriteSnapshot Snapshot, options Options) string { +func saveSnapshotsAndOverwrite(t *testing.T, baseSnapshot Snapshot, overwriteSnapshot Snapshot, baseOptions, overwriteOptions Options) string { repo := repository.TestRepository(t) tempdir := filepath.Join(rtest.TempDir(t), "target") ctx, cancel := context.WithCancel(context.Background()) @@ -950,13 +951,13 @@ func saveSnapshotsAndOverwrite(t *testing.T, baseSnapshot Snapshot, overwriteSna sn, id := saveSnapshot(t, repo, baseSnapshot, noopGetGenericAttributes) t.Logf("base snapshot saved as %v", id.Str()) - res := NewRestorer(repo, sn, options) + res := NewRestorer(repo, sn, baseOptions) rtest.OK(t, res.RestoreTo(ctx, tempdir)) // overwrite snapshot sn, id = saveSnapshot(t, repo, overwriteSnapshot, noopGetGenericAttributes) t.Logf("overwrite snapshot saved as %v", id.Str()) - res = NewRestorer(repo, sn, options) + res = NewRestorer(repo, sn, overwriteOptions) rtest.OK(t, res.RestoreTo(ctx, tempdir)) _, err := res.VerifyFiles(ctx, tempdir) @@ -978,7 +979,20 @@ func TestRestorerSparseOverwrite(t *testing.T) { }, } - saveSnapshotsAndOverwrite(t, baseSnapshot, sparseSnapshot, Options{Sparse: true, Overwrite: OverwriteAlways}) + opts := Options{Sparse: true, Overwrite: OverwriteAlways} + saveSnapshotsAndOverwrite(t, baseSnapshot, sparseSnapshot, opts, opts) +} + +type printerMock struct { + s restoreui.State +} + +func (p *printerMock) Update(_ restoreui.State, _ time.Duration) { +} +func (p *printerMock) CompleteItem(action restoreui.ItemAction, item string, size uint64) { +} +func (p *printerMock) Finish(s restoreui.State, _ time.Duration) { + p.s = s } func TestRestorerOverwriteBehavior(t *testing.T) { @@ -1008,6 +1022,7 @@ func TestRestorerOverwriteBehavior(t *testing.T) { var tests = []struct { Overwrite OverwriteBehavior Files map[string]string + Progress restoreui.State }{ { Overwrite: OverwriteAlways, @@ -1015,6 +1030,14 @@ func TestRestorerOverwriteBehavior(t *testing.T) { "foo": "content: new\n", "dirtest/file": "content: file2\n", }, + Progress: restoreui.State{ + FilesFinished: 3, + FilesTotal: 3, + FilesSkipped: 0, + AllBytesWritten: 28, + AllBytesTotal: 28, + AllBytesSkipped: 0, + }, }, { Overwrite: OverwriteIfChanged, @@ -1022,6 +1045,14 @@ func TestRestorerOverwriteBehavior(t *testing.T) { "foo": "content: new\n", "dirtest/file": "content: file2\n", }, + Progress: restoreui.State{ + FilesFinished: 3, + FilesTotal: 3, + FilesSkipped: 0, + AllBytesWritten: 28, + AllBytesTotal: 28, + AllBytesSkipped: 0, + }, }, { Overwrite: OverwriteIfNewer, @@ -1029,6 +1060,14 @@ func TestRestorerOverwriteBehavior(t *testing.T) { "foo": "content: new\n", "dirtest/file": "content: file\n", }, + Progress: restoreui.State{ + FilesFinished: 2, + FilesTotal: 2, + FilesSkipped: 1, + AllBytesWritten: 13, + AllBytesTotal: 13, + AllBytesSkipped: 15, + }, }, { Overwrite: OverwriteNever, @@ -1036,12 +1075,22 @@ func TestRestorerOverwriteBehavior(t *testing.T) { "foo": "content: foo\n", "dirtest/file": "content: file\n", }, + Progress: restoreui.State{ + FilesFinished: 1, + FilesTotal: 1, + FilesSkipped: 2, + AllBytesWritten: 0, + AllBytesTotal: 0, + AllBytesSkipped: 28, + }, }, } for _, test := range tests { t.Run("", func(t *testing.T) { - tempdir := saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{Overwrite: test.Overwrite}) + mock := &printerMock{} + progress := restoreui.NewProgress(mock, 0) + tempdir := saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{}, Options{Overwrite: test.Overwrite, Progress: progress}) for filename, content := range test.Files { data, err := os.ReadFile(filepath.Join(tempdir, filepath.FromSlash(filename))) @@ -1054,14 +1103,19 @@ func TestRestorerOverwriteBehavior(t *testing.T) { t.Errorf("file %v has wrong content: want %q, got %q", filename, content, data) } } + + progress.Finish() + rtest.Equals(t, test.Progress, mock.s) }) } } func TestRestorerOverwriteLarge(t *testing.T) { parts := make([]string, 100) + size := 0 for i := 0; i < len(parts); i++ { parts[i] = fmt.Sprint(i) + size += len(parts[i]) } baseTime := time.Now() @@ -1076,7 +1130,18 @@ func TestRestorerOverwriteLarge(t *testing.T) { }, } - saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{Overwrite: OverwriteAlways}) + mock := &printerMock{} + progress := restoreui.NewProgress(mock, 0) + saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{}, Options{Overwrite: OverwriteAlways, Progress: progress}) + progress.Finish() + rtest.Equals(t, restoreui.State{ + FilesFinished: 1, + FilesTotal: 1, + FilesSkipped: 0, + AllBytesWritten: uint64(size), + AllBytesTotal: uint64(size), + AllBytesSkipped: 0, + }, mock.s) } func TestRestorerOverwriteSpecial(t *testing.T) { @@ -1109,7 +1174,8 @@ func TestRestorerOverwriteSpecial(t *testing.T) { "file": "foo2", } - tempdir := saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{Overwrite: OverwriteAlways}) + opts := Options{Overwrite: OverwriteAlways} + tempdir := saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, opts, opts) for filename, content := range files { data, err := os.ReadFile(filepath.Join(tempdir, filepath.FromSlash(filename))) @@ -1286,6 +1352,7 @@ func TestRestoreOverwriteDirectory(t *testing.T) { "dir": File{Data: "content: file\n"}, }, }, + Options{}, Options{Delete: true}, ) } diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go index febd43acea2..27d990af4be 100644 --- a/internal/restorer/restorer_unix_test.go +++ b/internal/restorer/restorer_unix_test.go @@ -65,18 +65,6 @@ func getBlockCount(t *testing.T, filename string) int64 { return st.Blocks } -type printerMock struct { - s restoreui.State -} - -func (p *printerMock) Update(_ restoreui.State, _ time.Duration) { -} -func (p *printerMock) CompleteItem(action restoreui.ItemAction, item string, size uint64) { -} -func (p *printerMock) Finish(s restoreui.State, _ time.Duration) { - p.s = s -} - func TestRestorerProgressBar(t *testing.T) { testRestorerProgressBar(t, false) } From 4a9536b829bdb66dc24cebb90fcd1180b7b453a0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 12 Jul 2024 23:31:54 +0200 Subject: [PATCH 476/893] amend restore overwrite changelog --- changelog/unreleased/issue-4817 | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog/unreleased/issue-4817 b/changelog/unreleased/issue-4817 index e9c2d01a51c..c1d5f658d1c 100644 --- a/changelog/unreleased/issue-4817 +++ b/changelog/unreleased/issue-4817 @@ -21,3 +21,4 @@ https://github.com/restic/restic/issues/2662 https://github.com/restic/restic/pull/4837 https://github.com/restic/restic/pull/4838 https://github.com/restic/restic/pull/4864 +https://github.com/restic/restic/pull/4921 From 44e3610b3279ffd984eac367c76d73f4d427575c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 13 Jul 2024 00:02:17 +0200 Subject: [PATCH 477/893] restore: progress bar total on windows --- internal/restorer/restorer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 9d5a5a11451..3f41b79a6c9 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -363,7 +363,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { err = res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{ enterDir: func(_ *restic.Node, target, location string) error { debug.Log("first pass, enterDir: mkdir %q, leaveDir should restore metadata", location) - if location != "/" { + if location != string(filepath.Separator) { res.opts.Progress.AddFile(0) } return res.ensureDir(target) From dcfffd77799f5ac027fd814d030811bf8e0390aa Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Jul 2024 11:30:41 +0200 Subject: [PATCH 478/893] restore: extend overwrite test for small files --- internal/restorer/restorer_test.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index c809c9e209d..f8f6f92c090 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -1110,23 +1110,33 @@ func TestRestorerOverwriteBehavior(t *testing.T) { } } -func TestRestorerOverwriteLarge(t *testing.T) { +func TestRestorerOverwritePartial(t *testing.T) { parts := make([]string, 100) size := 0 for i := 0; i < len(parts); i++ { parts[i] = fmt.Sprint(i) size += len(parts[i]) + if i < 8 { + // small file + size += len(parts[i]) + } } + // the data of both snapshots is stored in different pack files + // thus both small an foo in the overwriteSnapshot contain blobs from + // two different pack files. This tests basic handling of blobs from + // different pack files. baseTime := time.Now() baseSnapshot := Snapshot{ Nodes: map[string]Node{ - "foo": File{DataParts: parts[0:5], ModTime: baseTime}, + "foo": File{DataParts: parts[0:5], ModTime: baseTime}, + "small": File{DataParts: parts[0:5], ModTime: baseTime}, }, } overwriteSnapshot := Snapshot{ Nodes: map[string]Node{ - "foo": File{DataParts: parts, ModTime: baseTime}, + "foo": File{DataParts: parts, ModTime: baseTime}, + "small": File{DataParts: parts[0:8], ModTime: baseTime}, }, } @@ -1135,8 +1145,8 @@ func TestRestorerOverwriteLarge(t *testing.T) { saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{}, Options{Overwrite: OverwriteAlways, Progress: progress}) progress.Finish() rtest.Equals(t, restoreui.State{ - FilesFinished: 1, - FilesTotal: 1, + FilesFinished: 2, + FilesTotal: 2, FilesSkipped: 0, AllBytesWritten: uint64(size), AllBytesTotal: uint64(size), From 3bf2927006679c90f7d8ede8170c4b4867e75ae4 Mon Sep 17 00:00:00 2001 From: Alex Johnson Date: Thu, 11 Jul 2024 09:24:00 -0500 Subject: [PATCH 479/893] Update snapshot summary on rewrite Signed-off-by: Alex Johnson --- changelog/unreleased/issue-4902 | 6 +++ cmd/restic/cmd_rewrite.go | 31 ++++++++----- cmd/restic/cmd_rewrite_integration_test.go | 30 ++++++++++++ internal/walker/rewriter.go | 29 ++++++++++++ internal/walker/rewriter_test.go | 54 ++++++++++++++++++++++ 5 files changed, 139 insertions(+), 11 deletions(-) create mode 100644 changelog/unreleased/issue-4902 diff --git a/changelog/unreleased/issue-4902 b/changelog/unreleased/issue-4902 new file mode 100644 index 00000000000..331de00f2f3 --- /dev/null +++ b/changelog/unreleased/issue-4902 @@ -0,0 +1,6 @@ +Enhancement: Update snapshot summary on rewrite + +Restic now recalculates the total number of files and bytes processed when files are excluded during rewrite. + +https://github.com/restic/restic/issues/4902 +https://github.com/restic/restic/pull/4905 diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 73bc32f6fa9..463720ee146 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -134,20 +134,29 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti return true } - rewriter := walker.NewTreeRewriter(walker.RewriteOpts{ - RewriteNode: func(node *restic.Node, path string) *restic.Node { - if selectByName(path) { - return node - } - Verbosef(fmt.Sprintf("excluding %s\n", path)) - return nil - }, - DisableNodeCache: true, - }) + rewriteNode := func(node *restic.Node, path string) *restic.Node { + if selectByName(path) { + return node + } + Verbosef(fmt.Sprintf("excluding %s\n", path)) + return nil + } + + rewriter, querySize := walker.NewSnapshotSizeRewriter(rewriteNode) filter = func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) { - return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree) + id, err := rewriter.RewriteTree(ctx, repo, "/", *sn.Tree) + if err != nil { + return restic.ID{}, err + } + ss := querySize() + if sn.Summary != nil { + sn.Summary.TotalFilesProcessed = ss.FileCount + sn.Summary.TotalBytesProcessed = ss.FileSize + } + return id, err } + } else { filter = func(_ context.Context, sn *restic.Snapshot) (restic.ID, error) { return *sn.Tree, nil diff --git a/cmd/restic/cmd_rewrite_integration_test.go b/cmd/restic/cmd_rewrite_integration_test.go index 71d6a60a57e..781266184aa 100644 --- a/cmd/restic/cmd_rewrite_integration_test.go +++ b/cmd/restic/cmd_rewrite_integration_test.go @@ -7,6 +7,7 @@ import ( "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui" ) func testRunRewriteExclude(t testing.TB, gopts GlobalOptions, excludes []string, forget bool, metadata snapshotMetadataArgs) { @@ -33,6 +34,24 @@ func createBasicRewriteRepo(t testing.TB, env *testEnvironment) restic.ID { return snapshotIDs[0] } +func getSnapshot(t testing.TB, snapshotID restic.ID, env *testEnvironment) *restic.Snapshot { + t.Helper() + + ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false) + rtest.OK(t, err) + defer unlock() + + snapshots, err := restic.TestLoadAllSnapshots(ctx, repo, nil) + rtest.OK(t, err) + + for _, s := range snapshots { + if *s.ID() == snapshotID { + return s + } + } + return nil +} + func TestRewrite(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -63,10 +82,21 @@ func TestRewriteReplace(t *testing.T) { defer cleanup() snapshotID := createBasicRewriteRepo(t, env) + snapshot := getSnapshot(t, snapshotID, env) + // exclude some data testRunRewriteExclude(t, env.gopts, []string{"3"}, true, snapshotMetadataArgs{Hostname: "", Time: ""}) + bytesExcluded, err := ui.ParseBytes("16K") + rtest.OK(t, err) + newSnapshotIDs := testListSnapshots(t, env.gopts, 1) rtest.Assert(t, snapshotID != newSnapshotIDs[0], "snapshot id should have changed") + + newSnapshot := getSnapshot(t, newSnapshotIDs[0], env) + + rtest.Equals(t, snapshot.Summary.TotalFilesProcessed-1, newSnapshot.Summary.TotalFilesProcessed, "snapshot file count should have changed") + rtest.Equals(t, snapshot.Summary.TotalBytesProcessed-uint64(bytesExcluded), newSnapshot.Summary.TotalBytesProcessed, "snapshot size should have changed") + // check forbids unused blobs, thus remove them first testRunPrune(t, env.gopts, PruneOptions{MaxUnused: "0"}) testRunCheck(t, env.gopts) diff --git a/internal/walker/rewriter.go b/internal/walker/rewriter.go index 6d283a625b0..6c27b26acfd 100644 --- a/internal/walker/rewriter.go +++ b/internal/walker/rewriter.go @@ -11,6 +11,12 @@ import ( type NodeRewriteFunc func(node *restic.Node, path string) *restic.Node type FailedTreeRewriteFunc func(nodeID restic.ID, path string, err error) (restic.ID, error) +type QueryRewrittenSizeFunc func() SnapshotSize + +type SnapshotSize struct { + FileCount uint + FileSize uint64 +} type RewriteOpts struct { // return nil to remove the node @@ -52,6 +58,29 @@ func NewTreeRewriter(opts RewriteOpts) *TreeRewriter { return rw } +func NewSnapshotSizeRewriter(rewriteNode NodeRewriteFunc) (*TreeRewriter, QueryRewrittenSizeFunc) { + var count uint + var size uint64 + + t := NewTreeRewriter(RewriteOpts{ + RewriteNode: func(node *restic.Node, path string) *restic.Node { + node = rewriteNode(node, path) + if node != nil && node.Type == "file" { + count++ + size += node.Size + } + return node + }, + DisableNodeCache: true, + }) + + ss := func() SnapshotSize { + return SnapshotSize{count, size} + } + + return t, ss +} + type BlobLoadSaver interface { restic.BlobSaver restic.BlobLoader diff --git a/internal/walker/rewriter_test.go b/internal/walker/rewriter_test.go index e5fcb9915cb..f05e50f9bf1 100644 --- a/internal/walker/rewriter_test.go +++ b/internal/walker/rewriter_test.go @@ -303,6 +303,60 @@ func TestRewriter(t *testing.T) { } } +func TestSnapshotSizeQuery(t *testing.T) { + tree := TestTree{ + "foo": TestFile{Size: 21}, + "bar": TestFile{Size: 21}, + "subdir": TestTree{ + "subfile": TestFile{Size: 21}, + }, + } + newTree := TestTree{ + "foo": TestFile{Size: 42}, + "subdir": TestTree{ + "subfile": TestFile{Size: 42}, + }, + } + t.Run("", func(t *testing.T) { + repo, root := BuildTreeMap(tree) + expRepo, expRoot := BuildTreeMap(newTree) + modrepo := WritableTreeMap{repo} + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + rewriteNode := func(node *restic.Node, path string) *restic.Node { + if path == "/bar" { + return nil + } + if node.Type == "file" { + node.Size += 21 + } + return node + } + rewriter, querySize := NewSnapshotSizeRewriter(rewriteNode) + newRoot, err := rewriter.RewriteTree(ctx, modrepo, "/", root) + if err != nil { + t.Error(err) + } + + ss := querySize() + + test.Equals(t, uint(2), ss.FileCount, "snapshot file count mismatch") + test.Equals(t, uint64(84), ss.FileSize, "snapshot size mismatch") + + // verifying against the expected tree root also implicitly checks the structural integrity + if newRoot != expRoot { + t.Error("hash mismatch") + fmt.Println("Got") + modrepo.Dump() + fmt.Println("Expected") + WritableTreeMap{expRepo}.Dump() + } + }) + +} + func TestRewriterFailOnUnknownFields(t *testing.T) { tm := WritableTreeMap{TreeMap{}} node := []byte(`{"nodes":[{"name":"subfile","type":"file","mtime":"0001-01-01T00:00:00Z","atime":"0001-01-01T00:00:00Z","ctime":"0001-01-01T00:00:00Z","uid":0,"gid":0,"content":null,"unknown_field":42}]}`) From 59c880fce36a1133e5f514c4e1cc5f9b68c51703 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 20 Jul 2024 12:46:28 +0200 Subject: [PATCH 480/893] restorer: only show size in text output for files --- internal/restorer/restorer.go | 4 ++-- internal/ui/restore/json.go | 2 ++ internal/ui/restore/progress.go | 1 + internal/ui/restore/text.go | 4 +++- internal/ui/restore/text_test.go | 1 + 5 files changed, 9 insertions(+), 3 deletions(-) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 3f41b79a6c9..cd3fd076d2b 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -278,7 +278,7 @@ func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, targe } } - res.opts.Progress.AddProgress(location, restoreui.ActionFileRestored, 0, 0) + res.opts.Progress.AddProgress(location, restoreui.ActionOtherRestored, 0, 0) return res.restoreNodeMetadataTo(node, target, location) } @@ -305,7 +305,7 @@ func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location } } - res.opts.Progress.AddProgress(location, restoreui.ActionFileRestored, 0, 0) + res.opts.Progress.AddProgress(location, restoreui.ActionOtherRestored, 0, 0) // TODO investigate if hardlinks have separate metadata on any supported system return res.restoreNodeMetadataTo(node, path, location) } diff --git a/internal/ui/restore/json.go b/internal/ui/restore/json.go index 7db2e21a34b..c248a7951e6 100644 --- a/internal/ui/restore/json.go +++ b/internal/ui/restore/json.go @@ -52,6 +52,8 @@ func (t *jsonPrinter) CompleteItem(messageType ItemAction, item string, size uin action = "restored" case ActionFileRestored: action = "restored" + case ActionOtherRestored: + action = "restored" case ActionFileUpdated: action = "updated" case ActionFileUnchanged: diff --git a/internal/ui/restore/progress.go b/internal/ui/restore/progress.go index 71a46e9ddae..67b15f07e74 100644 --- a/internal/ui/restore/progress.go +++ b/internal/ui/restore/progress.go @@ -51,6 +51,7 @@ const ( ActionFileRestored ItemAction = "file restored" ActionFileUpdated ItemAction = "file updated" ActionFileUnchanged ItemAction = "file unchanged" + ActionOtherRestored ItemAction = "other restored" ActionDeleted ItemAction = "deleted" ) diff --git a/internal/ui/restore/text.go b/internal/ui/restore/text.go index 235e7f08557..ec512f369dd 100644 --- a/internal/ui/restore/text.go +++ b/internal/ui/restore/text.go @@ -44,6 +44,8 @@ func (t *textPrinter) CompleteItem(messageType ItemAction, item string, size uin action = "restored" case ActionFileRestored: action = "restored" + case ActionOtherRestored: + action = "restored" case ActionFileUpdated: action = "updated" case ActionFileUnchanged: @@ -54,7 +56,7 @@ func (t *textPrinter) CompleteItem(messageType ItemAction, item string, size uin panic("unknown message type") } - if messageType == ActionDirRestored || messageType == ActionDeleted { + if messageType == ActionDirRestored || messageType == ActionOtherRestored || messageType == ActionDeleted { t.terminal.Print(fmt.Sprintf("%-9v %v", action, item)) } else { t.terminal.Print(fmt.Sprintf("%-9v %v with size %v", action, item, ui.FormatBytes(size))) diff --git a/internal/ui/restore/text_test.go b/internal/ui/restore/text_test.go index eddc0d1cad1..b198a27df89 100644 --- a/internal/ui/restore/text_test.go +++ b/internal/ui/restore/text_test.go @@ -63,6 +63,7 @@ func TestPrintCompleteItem(t *testing.T) { }{ {ActionDirRestored, 0, "restored test"}, {ActionFileRestored, 123, "restored test with size 123 B"}, + {ActionOtherRestored, 0, "restored test"}, {ActionFileUpdated, 123, "updated test with size 123 B"}, {ActionFileUnchanged, 123, "unchanged test with size 123 B"}, {ActionDeleted, 0, "deleted test"}, From 5d6ce59a8d34debbfd552cd01d7ecf96fc31443f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 20 Jul 2024 12:23:12 +0200 Subject: [PATCH 481/893] restorer: also truncate files if their content is already uptodate Files for which no blobs have to be restored, still have to be truncated to the correct size. Take a file with content "foobar" that should be replaced by restore with content "foo". The first three bytes are already uptodate, such that no data has to be written. As file truncation normally happens when writing data, a special case is necessary. This no blobs written special case is unified with the empty file special case. --- internal/restorer/filerestorer.go | 49 +++++++++++++++---------------- 1 file changed, 24 insertions(+), 25 deletions(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index 6486333615b..1ea82cabb1c 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -120,26 +120,21 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { // create packInfo from fileInfo for _, file := range r.files { fileBlobs := file.blobs.(restic.IDs) - if len(fileBlobs) == 0 { - err := r.restoreEmptyFileAt(file.location) - if errFile := r.sanitizeError(file, err); errFile != nil { - return errFile - } - } - largeFile := len(fileBlobs) > largeFileBlobCount var packsMap map[restic.ID][]fileBlobInfo if largeFile { packsMap = make(map[restic.ID][]fileBlobInfo) } fileOffset := int64(0) + restoredBlobs := false err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) { - if largeFile { - if !file.state.HasMatchingBlob(idx) { + if !file.state.HasMatchingBlob(idx) { + if largeFile { packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset}) - } else { - r.reportBlobProgress(file, uint64(blob.DataLength())) } + restoredBlobs = true + } else { + r.reportBlobProgress(file, uint64(blob.DataLength())) } fileOffset += int64(blob.DataLength()) pack, ok := packs[packID] @@ -175,6 +170,19 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { if largeFile { file.blobs = packsMap } + + // empty file or one with already uptodate content. Make sure that the file size is correct + if !restoredBlobs { + err := r.truncateFileToSize(file.location, file.size) + if errFile := r.sanitizeError(file, err); errFile != nil { + return errFile + } + + // the progress events were already sent for non-zero size files + if file.size == 0 { + r.reportBlobProgress(file, 0) + } + } } // drop no longer necessary file list r.files = nil @@ -214,17 +222,12 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { return wg.Wait() } -func (r *fileRestorer) restoreEmptyFileAt(location string) error { - f, err := createFile(r.targetPath(location), 0, false, r.allowRecursiveDelete) +func (r *fileRestorer) truncateFileToSize(location string, size int64) error { + f, err := createFile(r.targetPath(location), size, false, r.allowRecursiveDelete) if err != nil { return err } - if err = f.Close(); err != nil { - return err - } - - r.progress.AddProgress(location, restore.ActionFileRestored, 0, 0) - return nil + return f.Close() } type blobToFileOffsetsMapping map[restic.ID]struct { @@ -248,12 +251,8 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error { if fileBlobs, ok := file.blobs.(restic.IDs); ok { fileOffset := int64(0) err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) { - if packID.Equal(pack.id) { - if !file.state.HasMatchingBlob(idx) { - addBlob(blob, fileOffset) - } else { - r.reportBlobProgress(file, uint64(blob.DataLength())) - } + if packID.Equal(pack.id) && !file.state.HasMatchingBlob(idx) { + addBlob(blob, fileOffset) } fileOffset += int64(blob.DataLength()) }) From 6f8e17a46351167f2bbdedba7f82b9147db07fd9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 20 Jul 2024 12:32:08 +0200 Subject: [PATCH 482/893] restorer: minor code cleanups --- internal/restorer/filerestorer.go | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index 1ea82cabb1c..6d530b25c56 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -93,17 +93,20 @@ func (r *fileRestorer) targetPath(location string) string { return filepath.Join(r.dst, location) } -func (r *fileRestorer) forEachBlob(blobIDs []restic.ID, fn func(packID restic.ID, packBlob restic.Blob, idx int)) error { +func (r *fileRestorer) forEachBlob(blobIDs []restic.ID, fn func(packID restic.ID, packBlob restic.Blob, idx int, fileOffset int64)) error { if len(blobIDs) == 0 { return nil } + fileOffset := int64(0) for i, blobID := range blobIDs { packs := r.idx(restic.DataBlob, blobID) if len(packs) == 0 { return errors.Errorf("Unknown blob %s", blobID.String()) } - fn(packs[0].PackID, packs[0].Blob, i) + pb := packs[0] + fn(pb.PackID, pb.Blob, i, fileOffset) + fileOffset += int64(pb.DataLength()) } return nil @@ -124,10 +127,10 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { var packsMap map[restic.ID][]fileBlobInfo if largeFile { packsMap = make(map[restic.ID][]fileBlobInfo) + file.blobs = packsMap } - fileOffset := int64(0) restoredBlobs := false - err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) { + err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int, fileOffset int64) { if !file.state.HasMatchingBlob(idx) { if largeFile { packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset}) @@ -136,7 +139,6 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { } else { r.reportBlobProgress(file, uint64(blob.DataLength())) } - fileOffset += int64(blob.DataLength()) pack, ok := packs[packID] if !ok { pack = &packInfo{ @@ -151,6 +153,11 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { file.sparse = r.sparse } }) + if err != nil { + // repository index is messed up, can't do anything + return err + } + if len(fileBlobs) == 1 { // no need to preallocate files with a single block, thus we can always consider them to be sparse // in addition, a short chunk will never match r.zeroChunk which would prevent sparseness for short files @@ -163,14 +170,6 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { file.sparse = false } - if err != nil { - // repository index is messed up, can't do anything - return err - } - if largeFile { - file.blobs = packsMap - } - // empty file or one with already uptodate content. Make sure that the file size is correct if !restoredBlobs { err := r.truncateFileToSize(file.location, file.size) @@ -249,12 +248,10 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error { blobInfo.files[file] = append(blobInfo.files[file], fileOffset) } if fileBlobs, ok := file.blobs.(restic.IDs); ok { - fileOffset := int64(0) - err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) { + err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int, fileOffset int64) { if packID.Equal(pack.id) && !file.state.HasMatchingBlob(idx) { addBlob(blob, fileOffset) } - fileOffset += int64(blob.DataLength()) }) if err != nil { // restoreFiles should have caught this error before From 10efa771038c451fd1752edf633589ea60c14e56 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 20 Jul 2024 12:32:27 +0200 Subject: [PATCH 483/893] restorer: add test for file truncation case --- internal/restorer/restorer_test.go | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index f8f6f92c090..9c02afe68bd 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -1003,6 +1003,7 @@ func TestRestorerOverwriteBehavior(t *testing.T) { "dirtest": Dir{ Nodes: map[string]Node{ "file": File{Data: "content: file\n", ModTime: baseTime}, + "foo": File{Data: "content: foobar", ModTime: baseTime}, }, ModTime: baseTime, }, @@ -1014,6 +1015,7 @@ func TestRestorerOverwriteBehavior(t *testing.T) { "dirtest": Dir{ Nodes: map[string]Node{ "file": File{Data: "content: file2\n", ModTime: baseTime.Add(-time.Second)}, + "foo": File{Data: "content: foo", ModTime: baseTime}, }, }, }, @@ -1029,13 +1031,14 @@ func TestRestorerOverwriteBehavior(t *testing.T) { Files: map[string]string{ "foo": "content: new\n", "dirtest/file": "content: file2\n", + "dirtest/foo": "content: foo", }, Progress: restoreui.State{ - FilesFinished: 3, - FilesTotal: 3, + FilesFinished: 4, + FilesTotal: 4, FilesSkipped: 0, - AllBytesWritten: 28, - AllBytesTotal: 28, + AllBytesWritten: 40, + AllBytesTotal: 40, AllBytesSkipped: 0, }, }, @@ -1044,13 +1047,14 @@ func TestRestorerOverwriteBehavior(t *testing.T) { Files: map[string]string{ "foo": "content: new\n", "dirtest/file": "content: file2\n", + "dirtest/foo": "content: foo", }, Progress: restoreui.State{ - FilesFinished: 3, - FilesTotal: 3, + FilesFinished: 4, + FilesTotal: 4, FilesSkipped: 0, - AllBytesWritten: 28, - AllBytesTotal: 28, + AllBytesWritten: 40, + AllBytesTotal: 40, AllBytesSkipped: 0, }, }, @@ -1059,14 +1063,15 @@ func TestRestorerOverwriteBehavior(t *testing.T) { Files: map[string]string{ "foo": "content: new\n", "dirtest/file": "content: file\n", + "dirtest/foo": "content: foobar", }, Progress: restoreui.State{ FilesFinished: 2, FilesTotal: 2, - FilesSkipped: 1, + FilesSkipped: 2, AllBytesWritten: 13, AllBytesTotal: 13, - AllBytesSkipped: 15, + AllBytesSkipped: 27, }, }, { @@ -1074,14 +1079,15 @@ func TestRestorerOverwriteBehavior(t *testing.T) { Files: map[string]string{ "foo": "content: foo\n", "dirtest/file": "content: file\n", + "dirtest/foo": "content: foobar", }, Progress: restoreui.State{ FilesFinished: 1, FilesTotal: 1, - FilesSkipped: 2, + FilesSkipped: 3, AllBytesWritten: 0, AllBytesTotal: 0, - AllBytesSkipped: 28, + AllBytesSkipped: 40, }, }, } From 0dcac90bea39ad8f763922e82d2716290dbbcb97 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 21 Jul 2024 12:03:28 +0200 Subject: [PATCH 484/893] restorer: don't track already uptodate blobs --- internal/restorer/filerestorer.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index 6d530b25c56..e517e628460 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -138,6 +138,8 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { restoredBlobs = true } else { r.reportBlobProgress(file, uint64(blob.DataLength())) + // completely ignore blob + return } pack, ok := packs[packID] if !ok { From 948c6f1da3010b2543c7abd43399998e463e3476 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 22 Jul 2024 22:11:58 +0200 Subject: [PATCH 485/893] update xattr library to 0.4.10 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1e2769c7c63..366d89a7f2f 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 github.com/pkg/sftp v1.13.6 - github.com/pkg/xattr v0.4.10-0.20221120235825-35026bbbd013 + github.com/pkg/xattr v0.4.10 github.com/restic/chunker v0.4.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 diff --git a/go.sum b/go.sum index e4caba45642..2743f20038f 100644 --- a/go.sum +++ b/go.sum @@ -167,8 +167,8 @@ github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= -github.com/pkg/xattr v0.4.10-0.20221120235825-35026bbbd013 h1:aqByeeNnF7NiEbXCi7nBxZ272+6f6FUBmj/dUzWCdvc= -github.com/pkg/xattr v0.4.10-0.20221120235825-35026bbbd013/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= +github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA= +github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= From c54d5a8eca284a12f1c25b0520e95b5a623e067a Mon Sep 17 00:00:00 2001 From: "Leo R. Lundgren" Date: Tue, 23 Jul 2024 00:37:56 +0200 Subject: [PATCH 486/893] doc: Clarify VSS extended options --- doc/040_backup.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 77176b0549c..39a24ed59c7 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -58,38 +58,38 @@ snapshot for each volume that contains files to backup. Files are read from the VSS snapshot instead of the regular filesystem. This allows to backup files that are exclusively locked by another process during the backup. -You can use additional options to change VSS behaviour: +You can use the following extended options to change the VSS behavior: - * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds + * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, default value being 120 seconds * ``-o vss.exclude-all-mount-points`` disable auto snapshotting of all volume mount points * ``-o vss.exclude-volumes`` allows excluding specific volumes or volume mount points from snapshotting * ``-o vss.provider`` specifies VSS provider used for snapshotting -For example a 2.5 minutes timeout with snapshotting of mount points disabled can be specified as +For example a 2.5 minutes timeout with snapshotting of mount points disabled can be specified as: .. code-block:: console -o vss.timeout=2m30s -o vss.exclude-all-mount-points=true -and excluding drive ``d:\``, mount point ``c:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as +and excluding drive ``d:\``, mount point ``c:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as: .. code-block:: console -o vss.exclude-volumes="d:;c:\mnt\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" -VSS provider can be specified by GUID +VSS provider can be specified by GUID: .. code-block:: console -o vss.provider={3f900f90-00e9-440e-873a-96ca5eb079e5} -or by name +or by name: .. code-block:: console -o vss.provider="Hyper-V IC Software Shadow Copy Provider" -Also ``MS`` can be used as alias for ``Microsoft Software Shadow Copy provider 1.0``. +Also, ``MS`` can be used as alias for ``Microsoft Software Shadow Copy provider 1.0``. By default VSS ignores Outlook OST files. This is not a restriction of restic but the default Windows VSS configuration. The files not to snapshot are From 77ec8ae0913a400bd5ca1f02d0b40425a9d425b2 Mon Sep 17 00:00:00 2001 From: "Leo R. Lundgren" Date: Tue, 23 Jul 2024 00:19:22 +0200 Subject: [PATCH 487/893] doc: Polish unreleased changelogs --- changelog/unreleased/issue-1786 | 19 ++++++++++--------- changelog/unreleased/issue-2348 | 10 ++++++---- changelog/unreleased/issue-3600 | 12 ++++++------ changelog/unreleased/issue-3806 | 9 +++++---- changelog/unreleased/issue-4048 | 4 ++-- changelog/unreleased/issue-4209 | 4 ++-- changelog/unreleased/issue-4251 | 2 +- changelog/unreleased/issue-4287 | 8 +++----- changelog/unreleased/issue-4437 | 9 +++++---- changelog/unreleased/issue-4472 | 4 ++-- changelog/unreleased/issue-4540 | 3 ++- changelog/unreleased/issue-4547 | 6 +++--- changelog/unreleased/issue-4549 | 9 ++++----- changelog/unreleased/issue-4568 | 15 +++++++-------- changelog/unreleased/issue-4583 | 11 ++++++----- changelog/unreleased/issue-4601 | 4 ++-- changelog/unreleased/issue-4602 | 21 ++++++++++----------- changelog/unreleased/issue-4627 | 17 +++++++++-------- changelog/unreleased/issue-4656 | 4 ++-- changelog/unreleased/issue-4676 | 8 ++++---- changelog/unreleased/issue-4678 | 6 +++--- changelog/unreleased/issue-4707 | 12 ++++++------ changelog/unreleased/issue-4733 | 8 +++++--- changelog/unreleased/issue-4744 | 4 ++-- changelog/unreleased/issue-4760 | 2 +- changelog/unreleased/issue-4768 | 6 +++--- changelog/unreleased/issue-4781 | 4 ++-- changelog/unreleased/issue-4817 | 24 +++++++++++++----------- changelog/unreleased/issue-4850 | 8 ++++---- changelog/unreleased/issue-4902 | 6 ++++-- changelog/unreleased/issue-662 | 10 +++++----- changelog/unreleased/issue-693 | 8 ++++---- changelog/unreleased/issue-828 | 2 +- changelog/unreleased/pull-3067 | 23 +++++++++++++---------- changelog/unreleased/pull-4006 | 7 +++---- changelog/unreleased/pull-4354 | 6 +++--- changelog/unreleased/pull-4503 | 6 +++--- changelog/unreleased/pull-4526 | 13 +++++++------ changelog/unreleased/pull-4573 | 5 +++-- changelog/unreleased/pull-4590 | 7 +++---- changelog/unreleased/pull-4611 | 8 +++++--- changelog/unreleased/pull-4615 | 4 ++-- changelog/unreleased/pull-4664 | 12 +++++++----- changelog/unreleased/pull-4703 | 12 +++++++----- changelog/unreleased/pull-4708 | 16 +++++++++------- changelog/unreleased/pull-4709 | 10 +++++----- changelog/unreleased/pull-4737 | 5 +++-- changelog/unreleased/pull-4764 | 12 +++++++----- changelog/unreleased/pull-4796 | 6 +++--- changelog/unreleased/pull-4807 | 7 ++++--- changelog/unreleased/pull-4839 | 2 +- changelog/unreleased/pull-4884 | 10 +++++----- 52 files changed, 237 insertions(+), 213 deletions(-) diff --git a/changelog/unreleased/issue-1786 b/changelog/unreleased/issue-1786 index 87d4942c8b2..b48ac0db6b4 100644 --- a/changelog/unreleased/issue-1786 +++ b/changelog/unreleased/issue-1786 @@ -1,17 +1,18 @@ Enhancement: Support repositories with empty password -Restic refused to create or operate on repositories with an empty password. +Restic previously required a password to create or operate on repositories. Using the new option `--insecure-no-password` it is now possible to disable -this check. Restic will not prompt for a password when using this option. -For security reasons, the option must always be specified when operating on -repositories with an empty password. +this requirement. Restic will not prompt for a password when using this option. -Specifying `--insecure-no-password` while also passing a password to restic -via a CLI option or via environment variable results in an error. +For security reasons, the option must always be specified when operating on +repositories with an empty password, and specifying `--insecure-no-password` +while also passing a password to restic via a CLI option or environment +variable results in an error. -The `init` and `copy` command also support the option `--from-insecure-no-password` -which applies to the source repository. The `key add` and `key passwd` commands -include the `--new-insecure-no-password` option to add or set an empty password. +The `init` and `copy` commands adds the related `--from-insecure-no-password` +option, which applies to the source repository. The `key add` and `key passwd` +commands add the `--new-insecure-no-password` option to add or set an empty +password. https://github.com/restic/restic/issues/1786 https://github.com/restic/restic/issues/4326 diff --git a/changelog/unreleased/issue-2348 b/changelog/unreleased/issue-2348 index a8a0849fec6..c329ae0a293 100644 --- a/changelog/unreleased/issue-2348 +++ b/changelog/unreleased/issue-2348 @@ -1,10 +1,12 @@ Enhancement: Add `--delete` option to `restore` command -The `restore` command now supports a `--delete` option that allows removing files and directories -from the target directory that do not exist in the snapshot. This option also allows files in the -snapshot to replace non-empty directories. +The `restore` command now supports a `--delete` option that allows removing +files and directories from the target directory that do not exist in the +snapshot. This option also allows files in the snapshot to replace non-empty +directories having the same name. -To check that only the expected files are deleted add the `--dry-run --verbose=2` options. +To check that only expected files are deleted, add the `--dry-run --verbose=2` +options. https://github.com/restic/restic/issues/2348 https://github.com/restic/restic/pull/4881 diff --git a/changelog/unreleased/issue-3600 b/changelog/unreleased/issue-3600 index 0da66d3821e..b1e9af12617 100644 --- a/changelog/unreleased/issue-3600 +++ b/changelog/unreleased/issue-3600 @@ -1,10 +1,10 @@ -Bugfix: `backup` works if xattrs above the backup target cannot be read +Bugfix: Handle unreadable xattrs in folders above `backup` target -When backup targets are specified using absolute paths, then `backup` also -includes information about the parent folders of the backup targets in the -snapshot. If the extended attributes for some of these folders could not be -read due to missing permissions, this caused the backup to fail. This has been -fixed. +When backup targets are specified using absolute paths, `backup` also includes +information about the parent folders of the backup targets in the snapshot. + +If the extended attributes for some of these folders could not be read due to +missing permissions, this caused the backup to fail. This has now been fixed. https://github.com/restic/restic/issues/3600 https://github.com/restic/restic/pull/4668 diff --git a/changelog/unreleased/issue-3806 b/changelog/unreleased/issue-3806 index d3ae9b507bd..6b0663c9f95 100644 --- a/changelog/unreleased/issue-3806 +++ b/changelog/unreleased/issue-3806 @@ -1,10 +1,11 @@ -Enhancement: Make `prune` command resumable +Enhancement: Optimize and make `prune` command resumable -When `prune` was interrupted, it a latter `prune` run previously started repacking -the pack files from the start as `prune` did not update the index while repacking. +Previously, if the `prune` command was interrupted, a later `prune` run would +start repacking pack files from the start, as `prune` did not update the index +while repacking. The `prune` command now supports resuming interrupted prune runs. The update -of the repository index also has been optimized to use less memory and only +of the repository index has also been optimized to use less memory and only rewrite parts of the index that have changed. https://github.com/restic/restic/issues/3806 diff --git a/changelog/unreleased/issue-4048 b/changelog/unreleased/issue-4048 index 0754171f332..3b9c61d2048 100644 --- a/changelog/unreleased/issue-4048 +++ b/changelog/unreleased/issue-4048 @@ -1,6 +1,6 @@ -Enhancement: Add support for fuse-t for `mount` on macOS +Enhancement: Add support for FUSE-T with `mount` on macOS -Restic now supports creating fuse mounts using fuse-t on macOS. +The restic `mount` command now supports creating FUSE mounts using FUSE-T on macOS. https://github.com/restic/restic/issues/4048 https://github.com/restic/restic/pull/4825 diff --git a/changelog/unreleased/issue-4209 b/changelog/unreleased/issue-4209 index 2e49191c1e5..04eb8ef18b8 100644 --- a/changelog/unreleased/issue-4209 +++ b/changelog/unreleased/issue-4209 @@ -1,7 +1,7 @@ -Bugfix: Fix slow sftp upload performance +Bugfix: Fix slow SFTP upload performance Since restic 0.12.1, the upload speed of the sftp backend to a remote server -has regressed significantly. This has been fixed. +has regressed significantly. This has now been fixed. https://github.com/restic/restic/issues/4209 https://github.com/restic/restic/pull/4782 diff --git a/changelog/unreleased/issue-4251 b/changelog/unreleased/issue-4251 index d1d3f450842..5541f2d7ea0 100644 --- a/changelog/unreleased/issue-4251 +++ b/changelog/unreleased/issue-4251 @@ -1,4 +1,4 @@ -Enhancement: Support reading backup from a commands's standard output +Enhancement: Support reading backup from a command's standard output The `backup` command now supports the `--stdin-from-command` option. When using this option, the arguments to `backup` are interpreted as a command instead of diff --git a/changelog/unreleased/issue-4287 b/changelog/unreleased/issue-4287 index df4fc55903f..cd25a8deeaa 100644 --- a/changelog/unreleased/issue-4287 +++ b/changelog/unreleased/issue-4287 @@ -1,9 +1,7 @@ -Enhancement: support connection to rest-server using unix socket +Enhancement: Support connection to rest-server using unix socket -Restic now supports connecting to rest-server using a unix socket for -rest-server version 0.13.0 or later. - -This allows running restic as follows: +Restic now supports using a unix socket to connect to a rest-server +version 0.13.0 or later. This allows running restic as follows: ``` rest-server --listen unix:/tmp/rest.socket --data /path/to/data & diff --git a/changelog/unreleased/issue-4437 b/changelog/unreleased/issue-4437 index ee0981c50e8..bc76c09835c 100644 --- a/changelog/unreleased/issue-4437 +++ b/changelog/unreleased/issue-4437 @@ -1,9 +1,10 @@ -Enhancement: `check` command creates cache directory if it does not exist +Enhancement: Make `check` command create non-existent cache directory -If a custom cache directory was specified for the `check` command but the directory did not exist, -then `check` continued with cache disabled. +Previously, if a custom cache directory was specified for the `check` command, +but the directory did not exist, `check` continued with the cache disabled. -The `check` command now attempts to create the cache directory before initializing the cache. +The `check` command now attempts to create the cache directory before +initializing the cache. https://github.com/restic/restic/issues/4437 https://github.com/restic/restic/pull/4805 diff --git a/changelog/unreleased/issue-4472 b/changelog/unreleased/issue-4472 index 3049fdf30f4..beb3612b858 100644 --- a/changelog/unreleased/issue-4472 +++ b/changelog/unreleased/issue-4472 @@ -1,11 +1,11 @@ -Enhancement: Allow AWS Assume Role to be used for S3 backend +Enhancement: Support AWS Assume Role for S3 backend Previously only credentials discovered via the Minio discovery methods were used to authenticate. However, there are many circumstances where the discovered credentials have lower permissions and need to assume a specific role. This is now possible -using the following new environment variables. +using the following new environment variables: - RESTIC_AWS_ASSUME_ROLE_ARN - RESTIC_AWS_ASSUME_ROLE_SESSION_NAME diff --git a/changelog/unreleased/issue-4540 b/changelog/unreleased/issue-4540 index 9a706141e6c..25358c33236 100644 --- a/changelog/unreleased/issue-4540 +++ b/changelog/unreleased/issue-4540 @@ -1,6 +1,7 @@ Change: Require at least ARMv6 for ARM binaries -The official release binaries of restic now require at least ARMv6 support for ARM platforms. +The official release binaries of restic now require +at least ARMv6 support for ARM platforms. https://github.com/restic/restic/issues/4540 https://github.com/restic/restic/pull/4542 diff --git a/changelog/unreleased/issue-4547 b/changelog/unreleased/issue-4547 index edb1cf69329..bb69a59e6aa 100644 --- a/changelog/unreleased/issue-4547 +++ b/changelog/unreleased/issue-4547 @@ -1,7 +1,7 @@ -Enhancement: Add support for `--json` option to `version` command +Enhancement: Add `--json` option to `version` command -Restic now supports outputting restic version and used go version, OS and -architecture via JSON when using the version command. +Restic now supports outputting restic version along with the Go version, OS +and architecture used to build restic in JSON format using `version --json`. https://github.com/restic/restic/issues/4547 https://github.com/restic/restic/pull/4553 diff --git a/changelog/unreleased/issue-4549 b/changelog/unreleased/issue-4549 index 8f35b02335d..245ed484ada 100644 --- a/changelog/unreleased/issue-4549 +++ b/changelog/unreleased/issue-4549 @@ -1,11 +1,10 @@ Enhancement: Add `--ncdu` option to `ls` command -NCDU (NCurses Disk Usage) is a tool to analyse disk usage of directories. -It has an option to save a directory tree and analyse it later. -The `ls` command now supports the `--ncdu` option which outputs information -about a snapshot in the NCDU format. +NCDU (NCurses Disk Usage) is a tool to analyse disk usage of directories. It has +an option to save a directory tree and analyse it later. -You can use it as follows: `restic ls latest --ncdu | ncdu -f -` +The `ls` command now supports outputting snapshot information in the NCDU format +using the `--ncdu` option. Example usage: `restic ls latest --ncdu | ncdu -f -` https://github.com/restic/restic/issues/4549 https://github.com/restic/restic/pull/4550 diff --git a/changelog/unreleased/issue-4568 b/changelog/unreleased/issue-4568 index fedf373eae0..00394fc449f 100644 --- a/changelog/unreleased/issue-4568 +++ b/changelog/unreleased/issue-4568 @@ -1,17 +1,16 @@ -Bugfix: Prevent `forget --keep-tags invalid` from deleting all snapshots +Bugfix: Prevent `forget --keep-tags ` from deleting all snapshots -Running `forget --keep-tags invalid`, where the tag `invalid` does not -exist in the repository, would remove all snapshots. This is especially +Running `forget --keep-tags `, where `` is a tag that does +not exist in the repository, would remove all snapshots. This is especially problematic if the tag name contains a typo. The `forget` command now fails with an error if all snapshots in a snapshot group would be deleted. This prevents the above example from deleting all snapshots. -It is possible to temporarily disable the new check by setting the environment variable -`RESTIC_FEATURES=safe-forget-keep-tags=false`. Note that this feature flag -will be removed in the next minor restic version. +It is possible to temporarily disable the new check by setting the environment +variable `RESTIC_FEATURES=safe-forget-keep-tags=false`. Note that this feature +flag will be removed in the next minor restic version. -https://github.com/restic/restic/issues/4568 +https://github.com/restic/restic/pull/4568 https://github.com/restic/restic/pull/4764 -https://forum.restic.net/t/delete-all-snapshots-in-one-command-is-this-feature-intentional/6923/3 diff --git a/changelog/unreleased/issue-4583 b/changelog/unreleased/issue-4583 index 97b0e6ba702..bc1d030cc04 100644 --- a/changelog/unreleased/issue-4583 +++ b/changelog/unreleased/issue-4583 @@ -1,12 +1,13 @@ -Enhancement: Ignore s3.storage-class for metadata if archive tier is specified +Enhancement: Ignore `s3.storage-class` archive tiers for metadata -There is no official cold storage support in restic, use this option at your -own risk. +Restic used to store all files on S3 using the specified `s3.storage-class`. -Restic always stored all files on s3 using the specified `s3.storage-class`. -Now, restic will store metadata using a non-archive storage tier to avoid +Now, restic will only use non-archive storage tiers for metadata, to avoid problems when accessing a repository. To restore any data, it is still necessary to manually warm up the required data beforehand. +NOTE: There is no official cold storage support in restic, use this option at +your own risk. + https://github.com/restic/restic/issues/4583 https://github.com/restic/restic/pull/4584 diff --git a/changelog/unreleased/issue-4601 b/changelog/unreleased/issue-4601 index f99dbe18722..8efeba47f3c 100644 --- a/changelog/unreleased/issue-4601 +++ b/changelog/unreleased/issue-4601 @@ -2,8 +2,8 @@ Enhancement: Add support for feature flags Restic now supports feature flags that can be used to enable and disable experimental features. The flags can be set using the environment variable -`RESTIC_FEATURES`. To get a list of currently supported feature flags, -run the `features` command. +`RESTIC_FEATURES`. To get a list of currently supported feature flags, use +the `features` command. https://github.com/restic/restic/issues/4601 https://github.com/restic/restic/pull/4666 diff --git a/changelog/unreleased/issue-4602 b/changelog/unreleased/issue-4602 index 7532bcb1e60..3fe19db7988 100644 --- a/changelog/unreleased/issue-4602 +++ b/changelog/unreleased/issue-4602 @@ -1,22 +1,21 @@ -Change: Deprecate legacy index format and s3legacy layout +Change: Deprecate legacy index format and `s3legacy` repository layout Support for the legacy index format used by restic before version 0.2.0 has been deprecated and will be removed in the next minor restic version. You can use `restic repair index` to update the index to the current format. It is possible to temporarily reenable support for the legacy index format by -setting the environment variable -`RESTIC_FEATURES=deprecate-legacy-index=false`. Note that this feature flag -will be removed in the next minor restic version. +setting the environment variable `RESTIC_FEATURES=deprecate-legacy-index=false`. +Note that this feature flag will be removed in the next minor restic version. -Support for the s3legacy layout used for the S3 backend before restic 0.7.0 -has been deprecated and will be removed in the next minor restic version. You -can migrate your S3 repository using `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout`. +Support for the `s3legacy` repository layout used for the S3 backend before +restic 0.7.0 has been deprecated and will be removed in the next minor restic +version. You can migrate your S3 repository to the current layout using +`RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout`. -It is possible to temporarily reenable support for the legacy s3layout by -setting the environment variable -`RESTIC_FEATURES=deprecate-s3-legacy-layout=false`. Note that this feature flag -will be removed in the next minor restic version. +It is possible to temporarily reenable support for the `s3legacy` layout by +setting the environment variable `RESTIC_FEATURES=deprecate-s3-legacy-layout=false`. +Note that this feature flag will be removed in the next minor restic version. https://github.com/restic/restic/issues/4602 https://github.com/restic/restic/pull/4724 diff --git a/changelog/unreleased/issue-4627 b/changelog/unreleased/issue-4627 index e9c66c17544..87a18560482 100644 --- a/changelog/unreleased/issue-4627 +++ b/changelog/unreleased/issue-4627 @@ -2,23 +2,24 @@ Change: Redesign backend error handling to improve reliability Restic now downloads pack files in large chunks instead of using a streaming download. This prevents failures due to interrupted streams. The `restore` -command now also retries downloading individual blobs that cannot be retrieved. +command now also retries downloading individual blobs that could not be +retrieved. HTTP requests that are stuck for more than two minutes while uploading or downloading are now forcibly interrupted. This ensures that stuck requests are retried after a short timeout. -Attempts to access a missing file or a truncated file will no longer be retried. -This avoids unnecessary retries in those cases. All other backend requests are -retried for up to 15 minutes. This ensures that a temporarily interrupted network +Attempts to access a missing or truncated file will no longer be retried. This +avoids unnecessary retries in those cases. All other backend requests are +retried for up to 15 minutes. This ensures that temporarily interrupted network connections can be tolerated. -If a download yields a corrupt file or blob, then the download will be retried once. +If a download yields a corrupt file or blob, then the download will be retried +once. Most parts of the new backend error handling can temporarily be disabled by -setting the environment variable -`RESTIC_FEATURES=backend-error-redesign=false`. Note that this feature flag will -be removed in the next minor restic version. +setting the environment variable `RESTIC_FEATURES=backend-error-redesign=false`. +Note that this feature flag will be removed in the next minor restic version. https://github.com/restic/restic/issues/4627 https://github.com/restic/restic/issues/4193 diff --git a/changelog/unreleased/issue-4656 b/changelog/unreleased/issue-4656 index 8d16f0b4885..ef8c1e12ae6 100644 --- a/changelog/unreleased/issue-4656 +++ b/changelog/unreleased/issue-4656 @@ -1,6 +1,6 @@ -Bugfix: Properly report the ID of newly added keys +Bugfix: Properly report ID of newly added keys -`restic key add` now reports the ID of a newly added key. This simplifies +`restic key add` now reports the ID of the newly added key. This simplifies selecting a specific key using the `--key-hint key` option. https://github.com/restic/restic/issues/4656 diff --git a/changelog/unreleased/issue-4676 b/changelog/unreleased/issue-4676 index e95118e726a..ecea793617c 100644 --- a/changelog/unreleased/issue-4676 +++ b/changelog/unreleased/issue-4676 @@ -1,8 +1,8 @@ -Enhancement: Move key add, list, remove and passwd as separate sub-commands +Enhancement: Make `key` command's actions separate sub-commands -Restic now provides usage documentation for the `key` command. Each sub-command; -`add`, `list`, `remove` and `passwd` now have their own sub-command documentation -which can be invoked using `restic key --help`. +Each of the `add`, `list`, `remove` and `passwd` actions provided by the `key` +command is now a separate sub-command and have its own documentation which can +be invoked using `restic key --help`. https://github.com/restic/restic/issues/4676 https://github.com/restic/restic/pull/4685 diff --git a/changelog/unreleased/issue-4678 b/changelog/unreleased/issue-4678 index 9f9a213e105..401449bd2ee 100644 --- a/changelog/unreleased/issue-4678 +++ b/changelog/unreleased/issue-4678 @@ -1,7 +1,7 @@ -Enhancement: Add --target flag to the dump command +Enhancement: Add `--target` option to the `dump` command -Restic `dump` always printed to the standard output. It now permits to select a -`--target` file to write the output to. +Restic `dump` always printed to the standard output. It now supports specifying +a `--target` file to write its output to. https://github.com/restic/restic/issues/4678 https://github.com/restic/restic/pull/4682 diff --git a/changelog/unreleased/issue-4707 b/changelog/unreleased/issue-4707 index 3c5ffa2ad8f..3c8f1a2f39a 100644 --- a/changelog/unreleased/issue-4707 +++ b/changelog/unreleased/issue-4707 @@ -1,12 +1,12 @@ -Change: Disallow S3 anonymous authentication by default +Change: Disable S3 anonymous authentication by default -When using the S3 backend with anonymous authentication, it continuously tried -to retrieve new authentication credentials, which caused bad performance. +When using the S3 backend with anonymous authentication, it continuously +tried to retrieve new authentication credentials, causing bad performance. -Now, to use anonymous authentication, it is necessary to pass the option `-o -s3.unsafe-anonymous-auth=true` to restic. +Now, to use anonymous authentication, it is necessary to pass the extended +option `-o s3.unsafe-anonymous-auth=true` to restic. -It is temporarily possible to revert to the old behavior by setting the +It is possible to temporarily revert to the old behavior by setting the environment variable `RESTIC_FEATURES=explicit-s3-anonymous-auth=false`. Note that this feature flag will be removed in the next minor restic version. diff --git a/changelog/unreleased/issue-4733 b/changelog/unreleased/issue-4733 index 1fc2715870e..fb5a072d68c 100644 --- a/changelog/unreleased/issue-4733 +++ b/changelog/unreleased/issue-4733 @@ -1,9 +1,11 @@ Enhancement: Allow specifying `--host` via environment variable Restic commands that operate on snapshots, such as `restic backup` and -`restic snapshots`, support the `--host` flag to specify the hostname for -grouoping snapshots. They now permit selecting the hostname via the -environment variable `RESTIC_HOST`. `--host` still takes precedence over the +`restic snapshots`, support the `--host` option to specify the hostname +for grouping snapshots. + +Such commands now also support specifying the hostname via the environment +variable `RESTIC_HOST`. Note that `--host` still takes precedence over the environment variable. https://github.com/restic/restic/issues/4733 diff --git a/changelog/unreleased/issue-4744 b/changelog/unreleased/issue-4744 index b0ede1c5c01..b5c759bedfd 100644 --- a/changelog/unreleased/issue-4744 +++ b/changelog/unreleased/issue-4744 @@ -1,7 +1,7 @@ Change: Include full key ID in JSON output of `key list` -We have changed the JSON output of the `key list` command to include the full -key ID instead of just a shortened version, as the latter can be ambiguous +The JSON output of the `key list` command has changed to include the full key +ID instead of just a shortened version of the ID, as the latter can be ambiguous in some rare cases. To derive the short ID, please truncate the full ID down to eight characters. diff --git a/changelog/unreleased/issue-4760 b/changelog/unreleased/issue-4760 index bb2d9c5b4e0..e56f41a443d 100644 --- a/changelog/unreleased/issue-4760 +++ b/changelog/unreleased/issue-4760 @@ -2,7 +2,7 @@ Bugfix: Fix possible error on concurrent cache cleanup If multiple restic processes concurrently cleaned up no longer existing files from the cache, this could cause some of the processes to fail with an `no such -file or directory` error. This has been fixed. +file or directory` error. This has now been fixed. https://github.com/restic/restic/issues/4760 https://github.com/restic/restic/pull/4761 diff --git a/changelog/unreleased/issue-4768 b/changelog/unreleased/issue-4768 index dd52089e1d3..9fb1a29de81 100644 --- a/changelog/unreleased/issue-4768 +++ b/changelog/unreleased/issue-4768 @@ -1,7 +1,7 @@ -Enhancement: Allow custom User-Agent to be specified for outgoing requests +Enhancement: Allow specifying custom User-Agent for outgoing requests -Restic now permits setting a custom `User-Agent` for outgoing HTTP requests -using the global flag `--http-user-agent` or the `RESTIC_HTTP_USER_AGENT` +Restic now supports setting a custom `User-Agent` for outgoing HTTP requests +using the global option `--http-user-agent` or the `RESTIC_HTTP_USER_AGENT` environment variable. https://github.com/restic/restic/issues/4768 diff --git a/changelog/unreleased/issue-4781 b/changelog/unreleased/issue-4781 index b4af20885a4..2c9584d7758 100644 --- a/changelog/unreleased/issue-4781 +++ b/changelog/unreleased/issue-4781 @@ -1,8 +1,8 @@ -Enhancement: Add restore flags to read include and exclude patterns from files +Enhancement: Add `restore` options to read include/exclude patterns from files Restic now supports reading include and exclude patterns from files using the `--include-file`, `--exclude-file`, `--iinclude-file` and `--iexclude-file` -flags. +options of the `restore` command. https://github.com/restic/restic/issues/4781 https://github.com/restic/restic/pull/4811 \ No newline at end of file diff --git a/changelog/unreleased/issue-4817 b/changelog/unreleased/issue-4817 index c1d5f658d1c..83c68277563 100644 --- a/changelog/unreleased/issue-4817 +++ b/changelog/unreleased/issue-4817 @@ -1,18 +1,20 @@ Enhancement: Make overwrite behavior of `restore` customizable The `restore` command now supports an `--overwrite` option to configure whether -already existing files are overwritten. This behavior can now be configured via -the `--overwrite` option. The following values are supported: +already existing files are overwritten. The overwrite behavior can be configured +using the following option values: -* `--overwrite always` (default): always overwrites already existing files. `restore` - will verify the existing file content and only restore mismatching parts to minimize - downloads. Updates the metadata of all files. -* `--overwrite if-changed`: like the previous case, but speeds up the file content check - by assuming that files with matching size and modification time (mtime) are already up to date. - In case of a mismatch, the full file content is verified. Updates the metadata of all files. -* `--overwrite if-newer`: only overwrite existing files if the file in the snapshot has a - newer modification time (mtime). -* `--overwrite never`: never overwrite existing files. +- `--overwrite always` (default): Always overwrites already existing files. + The `restore` command will verify the existing file content and only restore + mismatching parts to minimize downloads. Updates the metadata of all files. +- `--overwrite if-changed`: Like `always`, but speeds up the file content check + by assuming that files with matching size and modification time (mtime) are + already up to date. In case of a mismatch, the full file content is verified + like with `always`. Updates the metadata of all files. +- `--overwrite if-newer`: Like `always`, but only overwrites existing files + when the file in the snapshot has a newer modification time (mtime) than the + existing file. +- `--overwrite never`: Never overwrites existing files. https://github.com/restic/restic/issues/4817 https://github.com/restic/restic/issues/200 diff --git a/changelog/unreleased/issue-4850 b/changelog/unreleased/issue-4850 index ad065cc2188..bc752cb05e5 100644 --- a/changelog/unreleased/issue-4850 +++ b/changelog/unreleased/issue-4850 @@ -1,8 +1,8 @@ -Bugfix: correctly handle UTF-16 password files in `key add/passwd` +Bugfix: Handle UTF-16 password files in `key` command correctly -`key add` and `key passwd` did not properly decode UTF-16 encoded password read -from a password file. This has been fix to match the decoding when opening a -repository. +Previously, `key add` and `key passwd` did not properly decode UTF-16 +encoded passwords read from a password file. This has now been fix to +correctly match the encoding when opening a repository. https://github.com/restic/restic/issues/4850 https://github.com/restic/restic/pull/4851 diff --git a/changelog/unreleased/issue-4902 b/changelog/unreleased/issue-4902 index 331de00f2f3..507d8abbea9 100644 --- a/changelog/unreleased/issue-4902 +++ b/changelog/unreleased/issue-4902 @@ -1,6 +1,8 @@ -Enhancement: Update snapshot summary on rewrite +Bugfix: Update snapshot summary on `rewrite` -Restic now recalculates the total number of files and bytes processed when files are excluded during rewrite. +Restic previously did not recalculate the total number of files and bytes +processed when files were excluded from a snapshot by the `rewrite` command. +This has now been fixed. https://github.com/restic/restic/issues/4902 https://github.com/restic/restic/pull/4905 diff --git a/changelog/unreleased/issue-662 b/changelog/unreleased/issue-662 index e5a7c6fe80d..4e99a8f7352 100644 --- a/changelog/unreleased/issue-662 +++ b/changelog/unreleased/issue-662 @@ -1,11 +1,11 @@ -Enhancement: `backup` can omit snapshot creation if there was no change +Enhancement: Omit snapshot creation if nothing changed -The `backup` command always created a snapshot even if nothing changed -compared to the parent snapshot. +The `backup` command always created a snapshot even if nothing in the +backup set changed compared to the parent snapshot. Restic now supports the `--skip-if-unchanged` option for the `backup` -command to omit creating a snapshot if the new snapshot's content would -be identical to that of the parent snapshot. +command, which omits creating a snapshot if the new snapshot's content +would be identical to that of the parent snapshot. https://github.com/restic/restic/issues/662 https://github.com/restic/restic/pull/4816 diff --git a/changelog/unreleased/issue-693 b/changelog/unreleased/issue-693 index 34b5b945510..4a8c766a42a 100644 --- a/changelog/unreleased/issue-693 +++ b/changelog/unreleased/issue-693 @@ -1,8 +1,8 @@ -Enhancement: Support printing snapshot size in `snapshots` command +Enhancement: Include snapshot size in `snapshots` output -The `snapshots` command now supports printing the snapshot size for snapshots -created using this or a future restic version. For this, the `backup` command -now stores the backup summary statistics in the snapshot. +The `snapshots` command now prints the size for snapshots created using this +or a future restic version. To achieve this, the `backup` command now stores +the backup summary statistics in the snapshot. The text output of the `snapshots` command only shows the snapshot size. The other statistics are only included in the JSON output. To inspect these diff --git a/changelog/unreleased/issue-828 b/changelog/unreleased/issue-828 index 2e8bcd0b023..0c752dea857 100644 --- a/changelog/unreleased/issue-828 +++ b/changelog/unreleased/issue-828 @@ -1,4 +1,4 @@ -Enhancement: Improve `repair packs` command +Enhancement: Improve features of the `repair packs` command The `repair packs` command has been improved to also be able to process truncated pack files. The `check` and `check --read-data` command will provide diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 index fbdcfd7e5cb..9ecec4838d9 100644 --- a/changelog/unreleased/pull-3067 +++ b/changelog/unreleased/pull-3067 @@ -1,22 +1,25 @@ -Enhancement: Add options to configure Windows Shadow Copy Service +Enhancement: Add extended options to configure Windows Shadow Copy Service -Restic always used 120 seconds timeout and unconditionally created VSS snapshots -for all volume mount points on disk. Now this behavior can be fine-tuned by -new options, like exclude specific volumes and mount points or completely -disable auto snapshotting of volume mount points. +Previous, restic always used a 120 seconds timeout and unconditionally created +VSS snapshots for all volume mount points on disk. This behavior can now be +fine-tuned by the following new extended options (available only on Windows): -For example: +- `-o vss.timeout`: Time that VSS can spend creating snapshot before timing out (default: 120s) +- `-o vss.exclude-all-mount-points`: Exclude mountpoints from snapshotting on all volumes (default: false) +- `-o vss.exclude-volumes`: Semicolon separated list of volumes to exclude from snapshotting +- `-o vss.provider`: VSS provider identifier which will be used for snapshotting + +For example, change VSS timeout to five minutes and disable snapshotting of +mount points on all volumes: restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.exclude-all-mount-points=true -changes timeout to five minutes and disable snapshotting of mount points on all volumes, and +Exclude drive `d:`, mount point `c:\mnt` and a specific volume from snapshotting: restic backup --use-fs-snapshot -o vss.exclude-volumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" -excludes drive `d:`, mount point `c:\mnt` and specific volume from VSS snapshotting. +Uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider: restic backup --use-fs-snapshot -o vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5} -uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider. - https://github.com/restic/restic/pull/3067 diff --git a/changelog/unreleased/pull-4006 b/changelog/unreleased/pull-4006 index 01f4ddb6e98..3bfacb8a060 100644 --- a/changelog/unreleased/pull-4006 +++ b/changelog/unreleased/pull-4006 @@ -5,12 +5,11 @@ The feature flag will be removed after repository format version 3 becomes available or be replaced with a different solution. When creating backups from a filesystem snapshot, for example created using -btrfs subvolumes, the deviceID of the filesystem changes compared to previous +BTRFS subvolumes, the deviceID of the filesystem changes compared to previous snapshots. This prevented restic from deduplicating the directory metadata of a snapshot. -When this alpha feature is enabled, then the deviceID is only stored for -hardlinks. This significantly reduces the metadata duplication for most -backups. +When this alpha feature is enabled, the deviceID is only stored for hardlinks, +which significantly reduces the metadata duplication for most backups. https://github.com/restic/restic/pull/4006 diff --git a/changelog/unreleased/pull-4354 b/changelog/unreleased/pull-4354 index dddbbc76530..d3cf3324924 100644 --- a/changelog/unreleased/pull-4354 +++ b/changelog/unreleased/pull-4354 @@ -1,7 +1,7 @@ -Enhancement: Significantly reduce prune memory usage +Enhancement: Significantly reduce `prune` memory usage -Prune has been optimized to use up to 60% less memory. The memory usage should -now be roughly similar to creating a backup. +The `prune` command has been optimized to use up to 60% less memory. +The memory usage should now be roughly similar to creating a backup. https://github.com/restic/restic/pull/4354 https://github.com/restic/restic/pull/4812 diff --git a/changelog/unreleased/pull-4503 b/changelog/unreleased/pull-4503 index b52552d6967..549aa9f532b 100644 --- a/changelog/unreleased/pull-4503 +++ b/changelog/unreleased/pull-4503 @@ -1,8 +1,8 @@ Bugfix: Correct hardlink handling in `stats` command -If files on different devices had the same inode id, then the `stats` command -did not correctly calculate the snapshot size. This has been fixed. +If files on different devices had the same inode ID, the `stats` command +did not correctly calculate the snapshot size. This has now been fixed. +https://forum.restic.net/t/possible-bug-in-stats/6461/8 https://github.com/restic/restic/pull/4503 https://github.com/restic/restic/pull/4006 -https://forum.restic.net/t/possible-bug-in-stats/6461/8 diff --git a/changelog/unreleased/pull-4526 b/changelog/unreleased/pull-4526 index 3a538f57a70..4d0fee69104 100644 --- a/changelog/unreleased/pull-4526 +++ b/changelog/unreleased/pull-4526 @@ -1,11 +1,12 @@ Enhancement: Add bitrot detection to `diff` command -The output of the `diff` command now includes the modifier `?` for files -to indicate bitrot in backed up files. It will appear whenever there is a -difference in content while the metadata is exactly the same. Since files with -unchanged metadata are normally not read again when creating a backup, the -detection is only effective if the right-hand side of the diff has been created -with "backup --force". +The output of the `diff` command now includes the modifier `?` for files to +indicate bitrot in backed up files. The `?` will appear whenever there is a +difference in content while the metadata is exactly the same. + +Since files with unchanged metadata are normally not read again when creating +a backup, the detection is only effective when the right-hand side of the diff +has been created with `backup --force`. https://github.com/restic/restic/issues/805 https://github.com/restic/restic/pull/4526 diff --git a/changelog/unreleased/pull-4573 b/changelog/unreleased/pull-4573 index bd5c2c423cd..36fc727bef0 100644 --- a/changelog/unreleased/pull-4573 +++ b/changelog/unreleased/pull-4573 @@ -1,5 +1,6 @@ -Enhancement: Add `--new-host` and `--new-time` options to `rewrite` command +Enhancement: Support rewriting host and time metadata in snapshots -`restic rewrite` now allows rewriting the host and / or time metadata of a snapshot. +The `rewrite` command now supports rewriting the host and/or time metadata of +a snapshot using the new `--new-host` and `--new-time` options. https://github.com/restic/restic/pull/4573 diff --git a/changelog/unreleased/pull-4590 b/changelog/unreleased/pull-4590 index 353d2161615..7904c18af42 100644 --- a/changelog/unreleased/pull-4590 +++ b/changelog/unreleased/pull-4590 @@ -1,7 +1,6 @@ -Enhancement: `mount` tests mountpoint existence before opening the repository +Enhancement: Speed up `mount` command's error detection -The restic `mount` command now checks for the existence of the -mountpoint before opening the repository, leading to quicker error -detection. +The `mount` command now checks for the existence of the mountpoint before +opening the repository, leading to quicker error detection. https://github.com/restic/restic/pull/4590 diff --git a/changelog/unreleased/pull-4611 b/changelog/unreleased/pull-4611 index a3c7a24d0c8..426ed590ffd 100644 --- a/changelog/unreleased/pull-4611 +++ b/changelog/unreleased/pull-4611 @@ -1,7 +1,9 @@ -Enhancement: Back up windows created time and file attributes like hidden flag +Enhancement: Back up more file metadata on Windows -Restic did not back up windows-specific meta-data like created time and file attributes like hidden flag. -Restic now backs up file created time and file attributes like hidden, readonly and encrypted flag when backing up files and folders on Windows. +Previously, restic did not back up all common Windows-specific metadata. + +Restic now stores file creation time and file attributes like the hidden, +read-only and encrypted flags when backing up files and folders on Windows. https://github.com/restic/restic/pull/4611 diff --git a/changelog/unreleased/pull-4615 b/changelog/unreleased/pull-4615 index 7e2d4a01712..a8916df3c8d 100644 --- a/changelog/unreleased/pull-4615 +++ b/changelog/unreleased/pull-4615 @@ -1,6 +1,6 @@ -Bugfix: `find` ignored directories in some cases +Bugfix: Make `find` not sometimes ignore directories In some cases, the `find` command ignored empty or moved directories. This has -been fixed. +now been fixed. https://github.com/restic/restic/pull/4615 diff --git a/changelog/unreleased/pull-4664 b/changelog/unreleased/pull-4664 index 74196cd9bf6..655ccd082bd 100644 --- a/changelog/unreleased/pull-4664 +++ b/changelog/unreleased/pull-4664 @@ -1,8 +1,10 @@ -Enhancement: `ls` uses `message_type` field to distinguish JSON messages +Enhancement: Make `ls` use `message_type` field in JSON output -The `ls` command was the only command that used the `struct_type` field to determine -the message type in the JSON output format. Now, the JSON output of the -`ls` command also includes the `message_type`. The `struct_type` field is -still included, but it deprecated. +The `ls` command was the only restic command that used the `struct_type` field +in its JSON output format to specify the message type. + +The JSON output of the `ls` command now also includes the `message_type` field, +which is consistent with other commands. The `struct_type` field is still +included, but now deprecated. https://github.com/restic/restic/pull/4664 diff --git a/changelog/unreleased/pull-4703 b/changelog/unreleased/pull-4703 index 4df3385a019..178842c6c62 100644 --- a/changelog/unreleased/pull-4703 +++ b/changelog/unreleased/pull-4703 @@ -1,9 +1,11 @@ -Bugfix: Shutdown cleanly when SIGTERM is received +Bugfix: Shutdown cleanly when receiving SIGTERM -Prior, if restic received SIGTERM it'd just immediately terminate skipping -cleanup- resulting in potential issues like stale locks being left behind. +Previously, when restic received the SIGTERM signal it would terminate +immediately, skipping cleanup and potentially causing issues like stale locks +being left behind. This primarily effected containerized restic invocations +that use SIGTERM, but could also be triggered via a simple `killall restic`. -This primarily effected containerized restic invocations- they use SIGTERM- -but this could be triggered via a simple `killall restic` in addition. +This has now been fixed, such that restic shuts down cleanly when receiving +the SIGTERM signal. https://github.com/restic/restic/pull/4703 diff --git a/changelog/unreleased/pull-4708 b/changelog/unreleased/pull-4708 index 5c5d426b551..16bf33e5795 100644 --- a/changelog/unreleased/pull-4708 +++ b/changelog/unreleased/pull-4708 @@ -1,11 +1,13 @@ Enhancement: Back up and restore SecurityDescriptors on Windows -Restic now backs up and restores SecurityDescriptors when backing up files and folders -on Windows which includes owner, group, discretionary access control list (DACL), -system access control list (SACL). This requires the user to be a member of backup -operators or the application must be run as admin. -If that is not the case, only the current user's owner, group and DACL will be backed up -and during restore only the DACL of the backed file will be restored while the current -user's owner and group will be set during the restore. +Restic now backs up and restores SecurityDescriptors for files and folders on +Windows which includes owner, group, discretionary access control list (DACL) +and system access control list (SACL). + +This requires the user to be a member of backup operators or the application +must be run as admin. If that is not the case, only the current user's owner, +group and DACL will be backed up, and during restore only the DACL of the +backed up file will be restored, with the current user's owner and group +being set on the restored file. https://github.com/restic/restic/pull/4708 diff --git a/changelog/unreleased/pull-4709 b/changelog/unreleased/pull-4709 index 5ffb2a6a684..62be8b54b31 100644 --- a/changelog/unreleased/pull-4709 +++ b/changelog/unreleased/pull-4709 @@ -1,10 +1,10 @@ -Bugfix: Correct `--no-lock` handling of `ls` and `tag` command +Bugfix: Correct `--no-lock` handling of `ls` and `tag` commands -The `ls` command never locked the repository. This has been fixed. The old -behavior is still supported using `ls --no-lock`. The latter invocation also -works with older restic versions. +The `ls` command never locked the repository. This has now been fixed, with the +old behavior still being supported using `ls --no-lock`. The latter invocation +also works with older restic versions. -The `tag` command erroneously accepted the `--no-lock` command. The command +The `tag` command erroneously accepted the `--no-lock` command. This command now always requires an exclusive lock. https://github.com/restic/restic/pull/4709 diff --git a/changelog/unreleased/pull-4737 b/changelog/unreleased/pull-4737 index 2637c8f83e2..bf528237db8 100644 --- a/changelog/unreleased/pull-4737 +++ b/changelog/unreleased/pull-4737 @@ -1,5 +1,6 @@ -Enhancement: include snapshot id in reason field of forget JSON output +Enhancement: Include snapshot ID in `reason` field of `forget` JSON output -The JSON output of the `forget` command now includes the `id` and `short_id` of a snapshot in the `reason` field. +The JSON output of the `forget` command now includes `id` and `short_id` of +snapshots in the `reason` field. https://github.com/restic/restic/pull/4737 diff --git a/changelog/unreleased/pull-4764 b/changelog/unreleased/pull-4764 index 61b2edac939..d85eadbc317 100644 --- a/changelog/unreleased/pull-4764 +++ b/changelog/unreleased/pull-4764 @@ -1,8 +1,10 @@ -Enhancement: Remove all snapshots using `forget --unsafe-allow-remove-all` +Enhancement: Support forgetting all snapshots -The forget command now supports the `--unsafe-allow-remove-all` option. It must -always be combined with a snapshot filter (by host, path or tag). -For example the command `forget --tag example --unsafe-allow-remove-all`, -removes all snapshots with tag `example`. +The `forget` command now supports the `--unsafe-allow-remove-all` option, which +removes all snapshots in the repository. + +This option must always be combined with a snapshot filter (by host, path or +tag). For example, the command `forget --tag example --unsafe-allow-remove-all` +removes all snapshots with the tag "example". https://github.com/restic/restic/pull/4764 diff --git a/changelog/unreleased/pull-4796 b/changelog/unreleased/pull-4796 index 319b9ccdcec..2729c635e60 100644 --- a/changelog/unreleased/pull-4796 +++ b/changelog/unreleased/pull-4796 @@ -1,8 +1,8 @@ Enhancement: Improve `dump` performance for large files -The `dump` command now retrieves the data chunks for a file in parallel. This -improves the download performance by up to the configured number of parallel -backend connections. +The `dump` command now retrieves the data chunks for a file in +parallel. This improves the download performance by up to as many +times as the configured number of parallel backend connections. https://github.com/restic/restic/issues/3406 https://github.com/restic/restic/pull/4796 diff --git a/changelog/unreleased/pull-4807 b/changelog/unreleased/pull-4807 index 12f8391e32e..b5e5cd7fd2f 100644 --- a/changelog/unreleased/pull-4807 +++ b/changelog/unreleased/pull-4807 @@ -1,5 +1,6 @@ -Enhancement: Back up and restore Extended Attributes on Windows NTFS +Enhancement: Support Extended Attributes on Windows NTFS -Restic now backs up and restores Extended Attributes on Windows NTFS when backing up files and folders. +Restic now backs up and restores Extended Attributes for files +and folders on Windows NTFS. -https://github.com/restic/restic/pull/4807 \ No newline at end of file +https://github.com/restic/restic/pull/4807 diff --git a/changelog/unreleased/pull-4839 b/changelog/unreleased/pull-4839 index 8d644407b14..672ac2e69fb 100644 --- a/changelog/unreleased/pull-4839 +++ b/changelog/unreleased/pull-4839 @@ -2,6 +2,6 @@ Enhancement: Add dry-run support to `restore` command The `restore` command now supports the `--dry-run` option to perform a dry run. Pass the `--verbose=2` option to see which files would -remain unchanged, which would be updated or freshly restored. +remain unchanged, and which would be updated or freshly restored. https://github.com/restic/restic/pull/4839 diff --git a/changelog/unreleased/pull-4884 b/changelog/unreleased/pull-4884 index ff2bff96756..3a7e0d3423c 100644 --- a/changelog/unreleased/pull-4884 +++ b/changelog/unreleased/pull-4884 @@ -1,10 +1,10 @@ -Change: return exit code 10 or 11 if repository does not exist or is locked +Change: Return exit code 10 and 11 for non-existing and locked repository -If a repository does not exist or cannot be locked, then restic always returned -exit code 1. This made it difficult to distinguish these cases from other -errors. +If a repository does not exist or cannot be locked, restic previously always +returned exit code 1. This made it difficult to distinguish these cases from +other errors. -Now, restic returns exit code 10 if the repository does not exist and exit code +Restic now returns exit code 10 if the repository does not exist, and exit code 11 if the repository could be not locked due to a conflicting lock. https://github.com/restic/restic/issues/956 From 729e009485472aced0e677d80a7647d4a9b5899c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 24 Jul 2024 19:40:53 +0200 Subject: [PATCH 488/893] replace all uses of target in relation to backup --- CHANGELOG.md | 6 +++--- changelog/0.12.0_2021-02-14/issue-3232 | 4 ++-- changelog/0.16.0_2023-07-31/issue-3941 | 2 +- doc/040_backup.rst | 2 +- internal/archiver/archiver.go | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b7ab571588c..670f1a40e79 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -603,7 +603,7 @@ restic users. The changes are ordered by importance. * Enhancement #3941: Support `--group-by` for backup parent selection Previously, the `backup` command by default selected the parent snapshot based on the - hostname and the backup targets. When the backup path list changed, the `backup` command was + hostname and the backup paths. When the backup path list changed, the `backup` command was unable to determine a suitable parent snapshot and had to read all files again. The new `--group-by` option for the `backup` command allows filtering snapshots for the @@ -2575,10 +2575,10 @@ restic users. The changes are ordered by importance. https://github.com/restic/restic/issues/3166 https://github.com/restic/restic/pull/3207 - * Bugfix #3232: Correct statistics for overlapping targets + * Bugfix #3232: Correct statistics for overlapping backup sources A user reported that restic's statistics and progress information during backup was not - correctly calculated when the backup targets (files/dirs to save) overlap. For example, + correctly calculated when the backup sources (files/dirs to save) overlap. For example, consider a directory `foo` which contains (among others) a file `foo/bar`. When `restic backup foo foo/bar` was run, restic counted the size of the file `foo/bar` twice, so the completeness percentage as well as the number of files was wrong. This is now corrected. diff --git a/changelog/0.12.0_2021-02-14/issue-3232 b/changelog/0.12.0_2021-02-14/issue-3232 index 7d9f5c3b7ae..30b9ee29396 100644 --- a/changelog/0.12.0_2021-02-14/issue-3232 +++ b/changelog/0.12.0_2021-02-14/issue-3232 @@ -1,7 +1,7 @@ -Bugfix: Correct statistics for overlapping targets +Bugfix: Correct statistics for overlapping backup sources A user reported that restic's statistics and progress information during backup -was not correctly calculated when the backup targets (files/dirs to save) +was not correctly calculated when the backup sources (files/dirs to save) overlap. For example, consider a directory `foo` which contains (among others) a file `foo/bar`. When `restic backup foo foo/bar` was run, restic counted the size of the file `foo/bar` twice, so the completeness percentage as well as the diff --git a/changelog/0.16.0_2023-07-31/issue-3941 b/changelog/0.16.0_2023-07-31/issue-3941 index ff56d52ccc6..f1f02db9387 100644 --- a/changelog/0.16.0_2023-07-31/issue-3941 +++ b/changelog/0.16.0_2023-07-31/issue-3941 @@ -1,7 +1,7 @@ Enhancement: Support `--group-by` for backup parent selection Previously, the `backup` command by default selected the parent snapshot based -on the hostname and the backup targets. When the backup path list changed, the +on the hostname and the backup paths. When the backup path list changed, the `backup` command was unable to determine a suitable parent snapshot and had to read all files again. diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 39a24ed59c7..81d99e07110 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -239,7 +239,7 @@ By default, restic always creates a new snapshot even if nothing has changed compared to the parent snapshot. To omit the creation of a new snapshot in this case, specify the ``--skip-if-unchanged`` option. -Note that when using absolute paths to specify the backup target, then also +Note that when using absolute paths to specify the backup source, then also changes to the parent folders result in a changed snapshot. For example, a backup of ``/home/user/work`` will create a new snapshot if the metadata of either ``/``, ``/home`` or ``/home/user`` change. To avoid this problem run restic from diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 19ad12ab8c2..d9f089e81a1 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -634,7 +634,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, } debug.Log("%v, dir node data loaded from %v", snPath, atree.FileInfoPath) - // in some cases reading xattrs for directories above the backup target is not allowed + // in some cases reading xattrs for directories above the backup source is not allowed // thus ignore errors for such folders. node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi, true) if err != nil { From 636bf2915e8fab3cbff615365898e9d051038f47 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 24 Jul 2024 19:28:45 +0200 Subject: [PATCH 489/893] minor tweaks --- changelog/unreleased/issue-1786 | 2 +- changelog/unreleased/issue-3600 | 6 +++--- changelog/unreleased/issue-4850 | 4 ++-- changelog/unreleased/issue-662 | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/changelog/unreleased/issue-1786 b/changelog/unreleased/issue-1786 index b48ac0db6b4..41517f5dbca 100644 --- a/changelog/unreleased/issue-1786 +++ b/changelog/unreleased/issue-1786 @@ -9,7 +9,7 @@ repositories with an empty password, and specifying `--insecure-no-password` while also passing a password to restic via a CLI option or environment variable results in an error. -The `init` and `copy` commands adds the related `--from-insecure-no-password` +The `init` and `copy` commands add the related `--from-insecure-no-password` option, which applies to the source repository. The `key add` and `key passwd` commands add the `--new-insecure-no-password` option to add or set an empty password. diff --git a/changelog/unreleased/issue-3600 b/changelog/unreleased/issue-3600 index b1e9af12617..b972ecc647f 100644 --- a/changelog/unreleased/issue-3600 +++ b/changelog/unreleased/issue-3600 @@ -1,7 +1,7 @@ -Bugfix: Handle unreadable xattrs in folders above `backup` target +Bugfix: Handle unreadable xattrs in folders above `backup` source -When backup targets are specified using absolute paths, `backup` also includes -information about the parent folders of the backup targets in the snapshot. +When backup sources are specified using absolute paths, `backup` also includes +information about the parent folders of the backup sources in the snapshot. If the extended attributes for some of these folders could not be read due to missing permissions, this caused the backup to fail. This has now been fixed. diff --git a/changelog/unreleased/issue-4850 b/changelog/unreleased/issue-4850 index bc752cb05e5..b04edd1599f 100644 --- a/changelog/unreleased/issue-4850 +++ b/changelog/unreleased/issue-4850 @@ -1,8 +1,8 @@ Bugfix: Handle UTF-16 password files in `key` command correctly Previously, `key add` and `key passwd` did not properly decode UTF-16 -encoded passwords read from a password file. This has now been fix to -correctly match the encoding when opening a repository. +encoded passwords read from a password file. This has now been fixed +to correctly match the encoding when opening a repository. https://github.com/restic/restic/issues/4850 https://github.com/restic/restic/pull/4851 diff --git a/changelog/unreleased/issue-662 b/changelog/unreleased/issue-662 index 4e99a8f7352..9fd2f27d0df 100644 --- a/changelog/unreleased/issue-662 +++ b/changelog/unreleased/issue-662 @@ -1,4 +1,4 @@ -Enhancement: Omit snapshot creation if nothing changed +Enhancement: Optionally skip snapshot creation if nothing changed The `backup` command always created a snapshot even if nothing in the backup set changed compared to the parent snapshot. From 5a8c91544333cae9076a3ac63e4909d71422779d Mon Sep 17 00:00:00 2001 From: "Leo R. Lundgren" Date: Thu, 25 Jul 2024 14:27:43 +0200 Subject: [PATCH 490/893] doc: Remove unrelated PR URL from changelog entry --- changelog/unreleased/issue-828 | 1 - 1 file changed, 1 deletion(-) diff --git a/changelog/unreleased/issue-828 b/changelog/unreleased/issue-828 index 0c752dea857..72d66dae04c 100644 --- a/changelog/unreleased/issue-828 +++ b/changelog/unreleased/issue-828 @@ -8,5 +8,4 @@ further instructions. https://github.com/restic/restic/issues/828 https://github.com/restic/restic/pull/4644 -https://github.com/restic/restic/pull/4655 https://github.com/restic/restic/pull/4882 From ad09d21aafe32f3350e45a9901a5f171b64122e7 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 26 Jul 2024 11:28:55 +0200 Subject: [PATCH 491/893] Prepare changelog for 0.17.0 --- changelog/{unreleased => 0.17.0_2024-07-26}/issue-1786 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-2348 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-3600 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-3806 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4048 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4209 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4251 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4287 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4437 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4472 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4540 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4547 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4549 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4568 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4583 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4601 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4602 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4627 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4656 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4676 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4678 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4707 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4733 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4744 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4760 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4768 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4781 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4817 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4850 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-4902 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-662 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-693 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/issue-828 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-3067 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4006 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4354 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4503 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4526 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4573 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4590 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4611 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4615 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4664 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4703 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4708 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4709 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4737 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4764 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4796 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4807 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4839 | 0 changelog/{unreleased => 0.17.0_2024-07-26}/pull-4884 | 0 52 files changed, 0 insertions(+), 0 deletions(-) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-1786 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-2348 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-3600 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-3806 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4048 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4209 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4251 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4287 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4437 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4472 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4540 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4547 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4549 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4568 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4583 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4601 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4602 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4627 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4656 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4676 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4678 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4707 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4733 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4744 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4760 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4768 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4781 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4817 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4850 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-4902 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-662 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-693 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/issue-828 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-3067 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4006 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4354 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4503 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4526 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4573 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4590 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4611 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4615 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4664 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4703 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4708 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4709 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4737 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4764 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4796 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4807 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4839 (100%) rename changelog/{unreleased => 0.17.0_2024-07-26}/pull-4884 (100%) diff --git a/changelog/unreleased/issue-1786 b/changelog/0.17.0_2024-07-26/issue-1786 similarity index 100% rename from changelog/unreleased/issue-1786 rename to changelog/0.17.0_2024-07-26/issue-1786 diff --git a/changelog/unreleased/issue-2348 b/changelog/0.17.0_2024-07-26/issue-2348 similarity index 100% rename from changelog/unreleased/issue-2348 rename to changelog/0.17.0_2024-07-26/issue-2348 diff --git a/changelog/unreleased/issue-3600 b/changelog/0.17.0_2024-07-26/issue-3600 similarity index 100% rename from changelog/unreleased/issue-3600 rename to changelog/0.17.0_2024-07-26/issue-3600 diff --git a/changelog/unreleased/issue-3806 b/changelog/0.17.0_2024-07-26/issue-3806 similarity index 100% rename from changelog/unreleased/issue-3806 rename to changelog/0.17.0_2024-07-26/issue-3806 diff --git a/changelog/unreleased/issue-4048 b/changelog/0.17.0_2024-07-26/issue-4048 similarity index 100% rename from changelog/unreleased/issue-4048 rename to changelog/0.17.0_2024-07-26/issue-4048 diff --git a/changelog/unreleased/issue-4209 b/changelog/0.17.0_2024-07-26/issue-4209 similarity index 100% rename from changelog/unreleased/issue-4209 rename to changelog/0.17.0_2024-07-26/issue-4209 diff --git a/changelog/unreleased/issue-4251 b/changelog/0.17.0_2024-07-26/issue-4251 similarity index 100% rename from changelog/unreleased/issue-4251 rename to changelog/0.17.0_2024-07-26/issue-4251 diff --git a/changelog/unreleased/issue-4287 b/changelog/0.17.0_2024-07-26/issue-4287 similarity index 100% rename from changelog/unreleased/issue-4287 rename to changelog/0.17.0_2024-07-26/issue-4287 diff --git a/changelog/unreleased/issue-4437 b/changelog/0.17.0_2024-07-26/issue-4437 similarity index 100% rename from changelog/unreleased/issue-4437 rename to changelog/0.17.0_2024-07-26/issue-4437 diff --git a/changelog/unreleased/issue-4472 b/changelog/0.17.0_2024-07-26/issue-4472 similarity index 100% rename from changelog/unreleased/issue-4472 rename to changelog/0.17.0_2024-07-26/issue-4472 diff --git a/changelog/unreleased/issue-4540 b/changelog/0.17.0_2024-07-26/issue-4540 similarity index 100% rename from changelog/unreleased/issue-4540 rename to changelog/0.17.0_2024-07-26/issue-4540 diff --git a/changelog/unreleased/issue-4547 b/changelog/0.17.0_2024-07-26/issue-4547 similarity index 100% rename from changelog/unreleased/issue-4547 rename to changelog/0.17.0_2024-07-26/issue-4547 diff --git a/changelog/unreleased/issue-4549 b/changelog/0.17.0_2024-07-26/issue-4549 similarity index 100% rename from changelog/unreleased/issue-4549 rename to changelog/0.17.0_2024-07-26/issue-4549 diff --git a/changelog/unreleased/issue-4568 b/changelog/0.17.0_2024-07-26/issue-4568 similarity index 100% rename from changelog/unreleased/issue-4568 rename to changelog/0.17.0_2024-07-26/issue-4568 diff --git a/changelog/unreleased/issue-4583 b/changelog/0.17.0_2024-07-26/issue-4583 similarity index 100% rename from changelog/unreleased/issue-4583 rename to changelog/0.17.0_2024-07-26/issue-4583 diff --git a/changelog/unreleased/issue-4601 b/changelog/0.17.0_2024-07-26/issue-4601 similarity index 100% rename from changelog/unreleased/issue-4601 rename to changelog/0.17.0_2024-07-26/issue-4601 diff --git a/changelog/unreleased/issue-4602 b/changelog/0.17.0_2024-07-26/issue-4602 similarity index 100% rename from changelog/unreleased/issue-4602 rename to changelog/0.17.0_2024-07-26/issue-4602 diff --git a/changelog/unreleased/issue-4627 b/changelog/0.17.0_2024-07-26/issue-4627 similarity index 100% rename from changelog/unreleased/issue-4627 rename to changelog/0.17.0_2024-07-26/issue-4627 diff --git a/changelog/unreleased/issue-4656 b/changelog/0.17.0_2024-07-26/issue-4656 similarity index 100% rename from changelog/unreleased/issue-4656 rename to changelog/0.17.0_2024-07-26/issue-4656 diff --git a/changelog/unreleased/issue-4676 b/changelog/0.17.0_2024-07-26/issue-4676 similarity index 100% rename from changelog/unreleased/issue-4676 rename to changelog/0.17.0_2024-07-26/issue-4676 diff --git a/changelog/unreleased/issue-4678 b/changelog/0.17.0_2024-07-26/issue-4678 similarity index 100% rename from changelog/unreleased/issue-4678 rename to changelog/0.17.0_2024-07-26/issue-4678 diff --git a/changelog/unreleased/issue-4707 b/changelog/0.17.0_2024-07-26/issue-4707 similarity index 100% rename from changelog/unreleased/issue-4707 rename to changelog/0.17.0_2024-07-26/issue-4707 diff --git a/changelog/unreleased/issue-4733 b/changelog/0.17.0_2024-07-26/issue-4733 similarity index 100% rename from changelog/unreleased/issue-4733 rename to changelog/0.17.0_2024-07-26/issue-4733 diff --git a/changelog/unreleased/issue-4744 b/changelog/0.17.0_2024-07-26/issue-4744 similarity index 100% rename from changelog/unreleased/issue-4744 rename to changelog/0.17.0_2024-07-26/issue-4744 diff --git a/changelog/unreleased/issue-4760 b/changelog/0.17.0_2024-07-26/issue-4760 similarity index 100% rename from changelog/unreleased/issue-4760 rename to changelog/0.17.0_2024-07-26/issue-4760 diff --git a/changelog/unreleased/issue-4768 b/changelog/0.17.0_2024-07-26/issue-4768 similarity index 100% rename from changelog/unreleased/issue-4768 rename to changelog/0.17.0_2024-07-26/issue-4768 diff --git a/changelog/unreleased/issue-4781 b/changelog/0.17.0_2024-07-26/issue-4781 similarity index 100% rename from changelog/unreleased/issue-4781 rename to changelog/0.17.0_2024-07-26/issue-4781 diff --git a/changelog/unreleased/issue-4817 b/changelog/0.17.0_2024-07-26/issue-4817 similarity index 100% rename from changelog/unreleased/issue-4817 rename to changelog/0.17.0_2024-07-26/issue-4817 diff --git a/changelog/unreleased/issue-4850 b/changelog/0.17.0_2024-07-26/issue-4850 similarity index 100% rename from changelog/unreleased/issue-4850 rename to changelog/0.17.0_2024-07-26/issue-4850 diff --git a/changelog/unreleased/issue-4902 b/changelog/0.17.0_2024-07-26/issue-4902 similarity index 100% rename from changelog/unreleased/issue-4902 rename to changelog/0.17.0_2024-07-26/issue-4902 diff --git a/changelog/unreleased/issue-662 b/changelog/0.17.0_2024-07-26/issue-662 similarity index 100% rename from changelog/unreleased/issue-662 rename to changelog/0.17.0_2024-07-26/issue-662 diff --git a/changelog/unreleased/issue-693 b/changelog/0.17.0_2024-07-26/issue-693 similarity index 100% rename from changelog/unreleased/issue-693 rename to changelog/0.17.0_2024-07-26/issue-693 diff --git a/changelog/unreleased/issue-828 b/changelog/0.17.0_2024-07-26/issue-828 similarity index 100% rename from changelog/unreleased/issue-828 rename to changelog/0.17.0_2024-07-26/issue-828 diff --git a/changelog/unreleased/pull-3067 b/changelog/0.17.0_2024-07-26/pull-3067 similarity index 100% rename from changelog/unreleased/pull-3067 rename to changelog/0.17.0_2024-07-26/pull-3067 diff --git a/changelog/unreleased/pull-4006 b/changelog/0.17.0_2024-07-26/pull-4006 similarity index 100% rename from changelog/unreleased/pull-4006 rename to changelog/0.17.0_2024-07-26/pull-4006 diff --git a/changelog/unreleased/pull-4354 b/changelog/0.17.0_2024-07-26/pull-4354 similarity index 100% rename from changelog/unreleased/pull-4354 rename to changelog/0.17.0_2024-07-26/pull-4354 diff --git a/changelog/unreleased/pull-4503 b/changelog/0.17.0_2024-07-26/pull-4503 similarity index 100% rename from changelog/unreleased/pull-4503 rename to changelog/0.17.0_2024-07-26/pull-4503 diff --git a/changelog/unreleased/pull-4526 b/changelog/0.17.0_2024-07-26/pull-4526 similarity index 100% rename from changelog/unreleased/pull-4526 rename to changelog/0.17.0_2024-07-26/pull-4526 diff --git a/changelog/unreleased/pull-4573 b/changelog/0.17.0_2024-07-26/pull-4573 similarity index 100% rename from changelog/unreleased/pull-4573 rename to changelog/0.17.0_2024-07-26/pull-4573 diff --git a/changelog/unreleased/pull-4590 b/changelog/0.17.0_2024-07-26/pull-4590 similarity index 100% rename from changelog/unreleased/pull-4590 rename to changelog/0.17.0_2024-07-26/pull-4590 diff --git a/changelog/unreleased/pull-4611 b/changelog/0.17.0_2024-07-26/pull-4611 similarity index 100% rename from changelog/unreleased/pull-4611 rename to changelog/0.17.0_2024-07-26/pull-4611 diff --git a/changelog/unreleased/pull-4615 b/changelog/0.17.0_2024-07-26/pull-4615 similarity index 100% rename from changelog/unreleased/pull-4615 rename to changelog/0.17.0_2024-07-26/pull-4615 diff --git a/changelog/unreleased/pull-4664 b/changelog/0.17.0_2024-07-26/pull-4664 similarity index 100% rename from changelog/unreleased/pull-4664 rename to changelog/0.17.0_2024-07-26/pull-4664 diff --git a/changelog/unreleased/pull-4703 b/changelog/0.17.0_2024-07-26/pull-4703 similarity index 100% rename from changelog/unreleased/pull-4703 rename to changelog/0.17.0_2024-07-26/pull-4703 diff --git a/changelog/unreleased/pull-4708 b/changelog/0.17.0_2024-07-26/pull-4708 similarity index 100% rename from changelog/unreleased/pull-4708 rename to changelog/0.17.0_2024-07-26/pull-4708 diff --git a/changelog/unreleased/pull-4709 b/changelog/0.17.0_2024-07-26/pull-4709 similarity index 100% rename from changelog/unreleased/pull-4709 rename to changelog/0.17.0_2024-07-26/pull-4709 diff --git a/changelog/unreleased/pull-4737 b/changelog/0.17.0_2024-07-26/pull-4737 similarity index 100% rename from changelog/unreleased/pull-4737 rename to changelog/0.17.0_2024-07-26/pull-4737 diff --git a/changelog/unreleased/pull-4764 b/changelog/0.17.0_2024-07-26/pull-4764 similarity index 100% rename from changelog/unreleased/pull-4764 rename to changelog/0.17.0_2024-07-26/pull-4764 diff --git a/changelog/unreleased/pull-4796 b/changelog/0.17.0_2024-07-26/pull-4796 similarity index 100% rename from changelog/unreleased/pull-4796 rename to changelog/0.17.0_2024-07-26/pull-4796 diff --git a/changelog/unreleased/pull-4807 b/changelog/0.17.0_2024-07-26/pull-4807 similarity index 100% rename from changelog/unreleased/pull-4807 rename to changelog/0.17.0_2024-07-26/pull-4807 diff --git a/changelog/unreleased/pull-4839 b/changelog/0.17.0_2024-07-26/pull-4839 similarity index 100% rename from changelog/unreleased/pull-4839 rename to changelog/0.17.0_2024-07-26/pull-4839 diff --git a/changelog/unreleased/pull-4884 b/changelog/0.17.0_2024-07-26/pull-4884 similarity index 100% rename from changelog/unreleased/pull-4884 rename to changelog/0.17.0_2024-07-26/pull-4884 From 905da91e610cacafd6001cfe8cb9053ef80d842b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 26 Jul 2024 11:29:39 +0200 Subject: [PATCH 492/893] Generate CHANGELOG.md for 0.17.0 --- CHANGELOG.md | 4161 +++++++++++++++++++++++++++++++------------------- 1 file changed, 2548 insertions(+), 1613 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 670f1a40e79..2a6926755a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ # Table of Contents +* [Changelog for 0.17.0](#changelog-for-restic-0170-2024-07-26) * [Changelog for 0.16.5](#changelog-for-restic-0165-2024-07-01) * [Changelog for 0.16.4](#changelog-for-restic-0164-2024-02-04) * [Changelog for 0.16.3](#changelog-for-restic-0163-2024-01-14) @@ -34,6 +35,712 @@ * [Changelog for 0.6.0](#changelog-for-restic-060-2017-05-29) +# Changelog for restic 0.17.0 (2024-07-26) +The following sections list the changes in restic 0.17.0 relevant to +restic users. The changes are ordered by importance. + +## Summary + + * Fix #3600: Handle unreadable xattrs in folders above `backup` source + * Fix #4209: Fix slow SFTP upload performance + * Fix #4503: Correct hardlink handling in `stats` command + * Fix #4568: Prevent `forget --keep-tags ` from deleting all snapshots + * Fix #4615: Make `find` not sometimes ignore directories + * Fix #4656: Properly report ID of newly added keys + * Fix #4703: Shutdown cleanly when receiving SIGTERM + * Fix #4709: Correct `--no-lock` handling of `ls` and `tag` commands + * Fix #4760: Fix possible error on concurrent cache cleanup + * Fix #4850: Handle UTF-16 password files in `key` command correctly + * Fix #4902: Update snapshot summary on `rewrite` + * Chg #956: Return exit code 10 and 11 for non-existing and locked repository + * Chg #4540: Require at least ARMv6 for ARM binaries + * Chg #4602: Deprecate legacy index format and `s3legacy` repository layout + * Chg #4627: Redesign backend error handling to improve reliability + * Chg #4707: Disable S3 anonymous authentication by default + * Chg #4744: Include full key ID in JSON output of `key list` + * Enh #662: Optionally skip snapshot creation if nothing changed + * Enh #693: Include snapshot size in `snapshots` output + * Enh #805: Add bitrot detection to `diff` command + * Enh #828: Improve features of the `repair packs` command + * Enh #1786: Support repositories with empty password + * Enh #2348: Add `--delete` option to `restore` command + * Enh #3067: Add extended options to configure Windows Shadow Copy Service + * Enh #3406: Improve `dump` performance for large files + * Enh #3806: Optimize and make `prune` command resumable + * Enh #4006: (alpha) Store deviceID only for hardlinks + * Enh #4048: Add support for FUSE-T with `mount` on macOS + * Enh #4251: Support reading backup from a command's standard output + * Enh #4287: Support connection to rest-server using unix socket + * Enh #4354: Significantly reduce `prune` memory usage + * Enh #4437: Make `check` command create non-existent cache directory + * Enh #4472: Support AWS Assume Role for S3 backend + * Enh #4547: Add `--json` option to `version` command + * Enh #4549: Add `--ncdu` option to `ls` command + * Enh #4573: Support rewriting host and time metadata in snapshots + * Enh #4583: Ignore `s3.storage-class` archive tiers for metadata + * Enh #4590: Speed up `mount` command's error detection + * Enh #4601: Add support for feature flags + * Enh #4611: Back up more file metadata on Windows + * Enh #4664: Make `ls` use `message_type` field in JSON output + * Enh #4676: Make `key` command's actions separate sub-commands + * Enh #4678: Add `--target` option to the `dump` command + * Enh #4708: Back up and restore SecurityDescriptors on Windows + * Enh #4733: Allow specifying `--host` via environment variable + * Enh #4737: Include snapshot ID in `reason` field of `forget` JSON output + * Enh #4764: Support forgetting all snapshots + * Enh #4768: Allow specifying custom User-Agent for outgoing requests + * Enh #4781: Add `restore` options to read include/exclude patterns from files + * Enh #4807: Support Extended Attributes on Windows NTFS + * Enh #4817: Make overwrite behavior of `restore` customizable + * Enh #4839: Add dry-run support to `restore` command + +## Details + + * Bugfix #3600: Handle unreadable xattrs in folders above `backup` source + + When backup sources are specified using absolute paths, `backup` also includes + information about the parent folders of the backup sources in the snapshot. + + If the extended attributes for some of these folders could not be read due to + missing permissions, this caused the backup to fail. This has now been fixed. + + https://github.com/restic/restic/issues/3600 + https://github.com/restic/restic/pull/4668 + https://forum.restic.net/t/parent-directories-above-the-snapshot-source-path-fatal-error-permission-denied/7216 + + * Bugfix #4209: Fix slow SFTP upload performance + + Since restic 0.12.1, the upload speed of the sftp backend to a remote server has + regressed significantly. This has now been fixed. + + https://github.com/restic/restic/issues/4209 + https://github.com/restic/restic/pull/4782 + + * Bugfix #4503: Correct hardlink handling in `stats` command + + If files on different devices had the same inode ID, the `stats` command did not + correctly calculate the snapshot size. This has now been fixed. + + https://github.com/restic/restic/pull/4503 + https://github.com/restic/restic/pull/4006 + https://forum.restic.net/t/possible-bug-in-stats/6461/8 + + * Bugfix #4568: Prevent `forget --keep-tags ` from deleting all snapshots + + Running `forget --keep-tags `, where `` is a tag that does not + exist in the repository, would remove all snapshots. This is especially + problematic if the tag name contains a typo. + + The `forget` command now fails with an error if all snapshots in a snapshot + group would be deleted. This prevents the above example from deleting all + snapshots. + + It is possible to temporarily disable the new check by setting the environment + variable `RESTIC_FEATURES=safe-forget-keep-tags=false`. Note that this feature + flag will be removed in the next minor restic version. + + https://github.com/restic/restic/pull/4568 + https://github.com/restic/restic/pull/4764 + + * Bugfix #4615: Make `find` not sometimes ignore directories + + In some cases, the `find` command ignored empty or moved directories. This has + now been fixed. + + https://github.com/restic/restic/pull/4615 + + * Bugfix #4656: Properly report ID of newly added keys + + `restic key add` now reports the ID of the newly added key. This simplifies + selecting a specific key using the `--key-hint key` option. + + https://github.com/restic/restic/issues/4656 + https://github.com/restic/restic/pull/4657 + + * Bugfix #4703: Shutdown cleanly when receiving SIGTERM + + Previously, when restic received the SIGTERM signal it would terminate + immediately, skipping cleanup and potentially causing issues like stale locks + being left behind. This primarily effected containerized restic invocations that + use SIGTERM, but could also be triggered via a simple `killall restic`. + + This has now been fixed, such that restic shuts down cleanly when receiving the + SIGTERM signal. + + https://github.com/restic/restic/pull/4703 + + * Bugfix #4709: Correct `--no-lock` handling of `ls` and `tag` commands + + The `ls` command never locked the repository. This has now been fixed, with the + old behavior still being supported using `ls --no-lock`. The latter invocation + also works with older restic versions. + + The `tag` command erroneously accepted the `--no-lock` command. This command now + always requires an exclusive lock. + + https://github.com/restic/restic/pull/4709 + + * Bugfix #4760: Fix possible error on concurrent cache cleanup + + If multiple restic processes concurrently cleaned up no longer existing files + from the cache, this could cause some of the processes to fail with an `no such + file or directory` error. This has now been fixed. + + https://github.com/restic/restic/issues/4760 + https://github.com/restic/restic/pull/4761 + + * Bugfix #4850: Handle UTF-16 password files in `key` command correctly + + Previously, `key add` and `key passwd` did not properly decode UTF-16 encoded + passwords read from a password file. This has now been fixed to correctly match + the encoding when opening a repository. + + https://github.com/restic/restic/issues/4850 + https://github.com/restic/restic/pull/4851 + + * Bugfix #4902: Update snapshot summary on `rewrite` + + Restic previously did not recalculate the total number of files and bytes + processed when files were excluded from a snapshot by the `rewrite` command. + This has now been fixed. + + https://github.com/restic/restic/issues/4902 + https://github.com/restic/restic/pull/4905 + + * Change #956: Return exit code 10 and 11 for non-existing and locked repository + + If a repository does not exist or cannot be locked, restic previously always + returned exit code 1. This made it difficult to distinguish these cases from + other errors. + + Restic now returns exit code 10 if the repository does not exist, and exit code + 11 if the repository could be not locked due to a conflicting lock. + + https://github.com/restic/restic/issues/956 + https://github.com/restic/restic/pull/4884 + + * Change #4540: Require at least ARMv6 for ARM binaries + + The official release binaries of restic now require at least ARMv6 support for + ARM platforms. + + https://github.com/restic/restic/issues/4540 + https://github.com/restic/restic/pull/4542 + + * Change #4602: Deprecate legacy index format and `s3legacy` repository layout + + Support for the legacy index format used by restic before version 0.2.0 has been + deprecated and will be removed in the next minor restic version. You can use + `restic repair index` to update the index to the current format. + + It is possible to temporarily reenable support for the legacy index format by + setting the environment variable `RESTIC_FEATURES=deprecate-legacy-index=false`. + Note that this feature flag will be removed in the next minor restic version. + + Support for the `s3legacy` repository layout used for the S3 backend before + restic 0.7.0 has been deprecated and will be removed in the next minor restic + version. You can migrate your S3 repository to the current layout using + `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout`. + + It is possible to temporarily reenable support for the `s3legacy` layout by + setting the environment variable + `RESTIC_FEATURES=deprecate-s3-legacy-layout=false`. Note that this feature flag + will be removed in the next minor restic version. + + https://github.com/restic/restic/issues/4602 + https://github.com/restic/restic/pull/4724 + https://github.com/restic/restic/pull/4743 + + * Change #4627: Redesign backend error handling to improve reliability + + Restic now downloads pack files in large chunks instead of using a streaming + download. This prevents failures due to interrupted streams. The `restore` + command now also retries downloading individual blobs that could not be + retrieved. + + HTTP requests that are stuck for more than two minutes while uploading or + downloading are now forcibly interrupted. This ensures that stuck requests are + retried after a short timeout. + + Attempts to access a missing or truncated file will no longer be retried. This + avoids unnecessary retries in those cases. All other backend requests are + retried for up to 15 minutes. This ensures that temporarily interrupted network + connections can be tolerated. + + If a download yields a corrupt file or blob, then the download will be retried + once. + + Most parts of the new backend error handling can temporarily be disabled by + setting the environment variable `RESTIC_FEATURES=backend-error-redesign=false`. + Note that this feature flag will be removed in the next minor restic version. + + https://github.com/restic/restic/issues/4627 + https://github.com/restic/restic/issues/4193 + https://github.com/restic/restic/issues/4515 + https://github.com/restic/restic/issues/1523 + https://github.com/restic/restic/pull/4605 + https://github.com/restic/restic/pull/4792 + https://github.com/restic/restic/pull/4520 + https://github.com/restic/restic/pull/4800 + https://github.com/restic/restic/pull/4784 + https://github.com/restic/restic/pull/4844 + + * Change #4707: Disable S3 anonymous authentication by default + + When using the S3 backend with anonymous authentication, it continuously tried + to retrieve new authentication credentials, causing bad performance. + + Now, to use anonymous authentication, it is necessary to pass the extended + option `-o s3.unsafe-anonymous-auth=true` to restic. + + It is possible to temporarily revert to the old behavior by setting the + environment variable `RESTIC_FEATURES=explicit-s3-anonymous-auth=false`. Note + that this feature flag will be removed in the next minor restic version. + + https://github.com/restic/restic/issues/4707 + https://github.com/restic/restic/pull/4908 + + * Change #4744: Include full key ID in JSON output of `key list` + + The JSON output of the `key list` command has changed to include the full key ID + instead of just a shortened version of the ID, as the latter can be ambiguous in + some rare cases. To derive the short ID, please truncate the full ID down to + eight characters. + + https://github.com/restic/restic/issues/4744 + https://github.com/restic/restic/pull/4745 + + * Enhancement #662: Optionally skip snapshot creation if nothing changed + + The `backup` command always created a snapshot even if nothing in the backup set + changed compared to the parent snapshot. + + Restic now supports the `--skip-if-unchanged` option for the `backup` command, + which omits creating a snapshot if the new snapshot's content would be identical + to that of the parent snapshot. + + https://github.com/restic/restic/issues/662 + https://github.com/restic/restic/pull/4816 + + * Enhancement #693: Include snapshot size in `snapshots` output + + The `snapshots` command now prints the size for snapshots created using this or + a future restic version. To achieve this, the `backup` command now stores the + backup summary statistics in the snapshot. + + The text output of the `snapshots` command only shows the snapshot size. The + other statistics are only included in the JSON output. To inspect these + statistics use `restic snapshots --json` or `restic cat snapshot `. + + https://github.com/restic/restic/issues/693 + https://github.com/restic/restic/pull/4705 + https://github.com/restic/restic/pull/4913 + + * Enhancement #805: Add bitrot detection to `diff` command + + The output of the `diff` command now includes the modifier `?` for files to + indicate bitrot in backed up files. The `?` will appear whenever there is a + difference in content while the metadata is exactly the same. + + Since files with unchanged metadata are normally not read again when creating a + backup, the detection is only effective when the right-hand side of the diff has + been created with `backup --force`. + + https://github.com/restic/restic/issues/805 + https://github.com/restic/restic/pull/4526 + + * Enhancement #828: Improve features of the `repair packs` command + + The `repair packs` command has been improved to also be able to process + truncated pack files. The `check` and `check --read-data` command will provide + instructions on using the command if necessary to repair a repository. See the + guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html for + further instructions. + + https://github.com/restic/restic/issues/828 + https://github.com/restic/restic/pull/4644 + https://github.com/restic/restic/pull/4882 + + * Enhancement #1786: Support repositories with empty password + + Restic previously required a password to create or operate on repositories. + Using the new option `--insecure-no-password` it is now possible to disable this + requirement. Restic will not prompt for a password when using this option. + + For security reasons, the option must always be specified when operating on + repositories with an empty password, and specifying `--insecure-no-password` + while also passing a password to restic via a CLI option or environment variable + results in an error. + + The `init` and `copy` commands add the related `--from-insecure-no-password` + option, which applies to the source repository. The `key add` and `key passwd` + commands add the `--new-insecure-no-password` option to add or set an empty + password. + + https://github.com/restic/restic/issues/1786 + https://github.com/restic/restic/issues/4326 + https://github.com/restic/restic/pull/4698 + https://github.com/restic/restic/pull/4808 + + * Enhancement #2348: Add `--delete` option to `restore` command + + The `restore` command now supports a `--delete` option that allows removing + files and directories from the target directory that do not exist in the + snapshot. This option also allows files in the snapshot to replace non-empty + directories having the same name. + + To check that only expected files are deleted, add the `--dry-run --verbose=2` + options. + + https://github.com/restic/restic/issues/2348 + https://github.com/restic/restic/pull/4881 + + * Enhancement #3067: Add extended options to configure Windows Shadow Copy Service + + Previous, restic always used a 120 seconds timeout and unconditionally created + VSS snapshots for all volume mount points on disk. This behavior can now be + fine-tuned by the following new extended options (available only on Windows): + + - `-o vss.timeout`: Time that VSS can spend creating snapshot before timing out + (default: 120s) - `-o vss.exclude-all-mount-points`: Exclude mountpoints from + snapshotting on all volumes (default: false) - `-o vss.exclude-volumes`: + Semicolon separated list of volumes to exclude from snapshotting - `-o + vss.provider`: VSS provider identifier which will be used for snapshotting + + For example, change VSS timeout to five minutes and disable snapshotting of + mount points on all volumes: + + Restic backup --use-fs-snapshot -o vss.timeout=5m -o + vss.exclude-all-mount-points=true + + Exclude drive `d:`, mount point `c:\mnt` and a specific volume from + snapshotting: + + Restic backup --use-fs-snapshot -o + vss.exclude-volumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" + + Uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default + provider: + + Restic backup --use-fs-snapshot -o + vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5} + + https://github.com/restic/restic/pull/3067 + + * Enhancement #3406: Improve `dump` performance for large files + + The `dump` command now retrieves the data chunks for a file in parallel. This + improves the download performance by up to as many times as the configured + number of parallel backend connections. + + https://github.com/restic/restic/issues/3406 + https://github.com/restic/restic/pull/4796 + + * Enhancement #3806: Optimize and make `prune` command resumable + + Previously, if the `prune` command was interrupted, a later `prune` run would + start repacking pack files from the start, as `prune` did not update the index + while repacking. + + The `prune` command now supports resuming interrupted prune runs. The update of + the repository index has also been optimized to use less memory and only rewrite + parts of the index that have changed. + + https://github.com/restic/restic/issues/3806 + https://github.com/restic/restic/pull/4812 + + * Enhancement #4006: (alpha) Store deviceID only for hardlinks + + Set `RESTIC_FEATURES=device-id-for-hardlinks` to enable this alpha feature. The + feature flag will be removed after repository format version 3 becomes available + or be replaced with a different solution. + + When creating backups from a filesystem snapshot, for example created using + BTRFS subvolumes, the deviceID of the filesystem changes compared to previous + snapshots. This prevented restic from deduplicating the directory metadata of a + snapshot. + + When this alpha feature is enabled, the deviceID is only stored for hardlinks, + which significantly reduces the metadata duplication for most backups. + + https://github.com/restic/restic/pull/4006 + + * Enhancement #4048: Add support for FUSE-T with `mount` on macOS + + The restic `mount` command now supports creating FUSE mounts using FUSE-T on + macOS. + + https://github.com/restic/restic/issues/4048 + https://github.com/restic/restic/pull/4825 + + * Enhancement #4251: Support reading backup from a command's standard output + + The `backup` command now supports the `--stdin-from-command` option. When using + this option, the arguments to `backup` are interpreted as a command instead of + paths to back up. `backup` then executes the given command and stores the + standard output from it in the backup, similar to the what the `--stdin` option + does. This also enables restic to verify that the command completes with exit + code zero. A non-zero exit code causes the backup to fail. + + Note that the `--stdin` option does not have to be specified at the same time, + and that the `--stdin-filename` option also applies to `--stdin-from-command`. + + Example: `restic backup --stdin-from-command --stdin-filename dump.sql mysqldump + [...]` + + https://github.com/restic/restic/issues/4251 + https://github.com/restic/restic/pull/4410 + + * Enhancement #4287: Support connection to rest-server using unix socket + + Restic now supports using a unix socket to connect to a rest-server version + 0.13.0 or later. This allows running restic as follows: + + ``` + rest-server --listen unix:/tmp/rest.socket --data /path/to/data & + restic -r rest:http+unix:///tmp/rest.socket:/my_backup_repo/ [...] + ``` + + https://github.com/restic/restic/issues/4287 + https://github.com/restic/restic/pull/4655 + + * Enhancement #4354: Significantly reduce `prune` memory usage + + The `prune` command has been optimized to use up to 60% less memory. The memory + usage should now be roughly similar to creating a backup. + + https://github.com/restic/restic/pull/4354 + https://github.com/restic/restic/pull/4812 + + * Enhancement #4437: Make `check` command create non-existent cache directory + + Previously, if a custom cache directory was specified for the `check` command, + but the directory did not exist, `check` continued with the cache disabled. + + The `check` command now attempts to create the cache directory before + initializing the cache. + + https://github.com/restic/restic/issues/4437 + https://github.com/restic/restic/pull/4805 + https://github.com/restic/restic/pull/4883 + + * Enhancement #4472: Support AWS Assume Role for S3 backend + + Previously only credentials discovered via the Minio discovery methods were used + to authenticate. + + However, there are many circumstances where the discovered credentials have + lower permissions and need to assume a specific role. This is now possible using + the following new environment variables: + + - RESTIC_AWS_ASSUME_ROLE_ARN - RESTIC_AWS_ASSUME_ROLE_SESSION_NAME - + RESTIC_AWS_ASSUME_ROLE_EXTERNAL_ID - RESTIC_AWS_ASSUME_ROLE_REGION (defaults to + us-east-1) - RESTIC_AWS_ASSUME_ROLE_POLICY - RESTIC_AWS_ASSUME_ROLE_STS_ENDPOINT + + https://github.com/restic/restic/issues/4472 + https://github.com/restic/restic/pull/4474 + + * Enhancement #4547: Add `--json` option to `version` command + + Restic now supports outputting restic version along with the Go version, OS and + architecture used to build restic in JSON format using `version --json`. + + https://github.com/restic/restic/issues/4547 + https://github.com/restic/restic/pull/4553 + + * Enhancement #4549: Add `--ncdu` option to `ls` command + + NCDU (NCurses Disk Usage) is a tool to analyse disk usage of directories. It has + an option to save a directory tree and analyse it later. + + The `ls` command now supports outputting snapshot information in the NCDU format + using the `--ncdu` option. Example usage: `restic ls latest --ncdu | ncdu -f -` + + https://github.com/restic/restic/issues/4549 + https://github.com/restic/restic/pull/4550 + https://github.com/restic/restic/pull/4911 + + * Enhancement #4573: Support rewriting host and time metadata in snapshots + + The `rewrite` command now supports rewriting the host and/or time metadata of a + snapshot using the new `--new-host` and `--new-time` options. + + https://github.com/restic/restic/pull/4573 + + * Enhancement #4583: Ignore `s3.storage-class` archive tiers for metadata + + Restic used to store all files on S3 using the specified `s3.storage-class`. + + Now, restic will only use non-archive storage tiers for metadata, to avoid + problems when accessing a repository. To restore any data, it is still necessary + to manually warm up the required data beforehand. + + NOTE: There is no official cold storage support in restic, use this option at + your own risk. + + https://github.com/restic/restic/issues/4583 + https://github.com/restic/restic/pull/4584 + + * Enhancement #4590: Speed up `mount` command's error detection + + The `mount` command now checks for the existence of the mountpoint before + opening the repository, leading to quicker error detection. + + https://github.com/restic/restic/pull/4590 + + * Enhancement #4601: Add support for feature flags + + Restic now supports feature flags that can be used to enable and disable + experimental features. The flags can be set using the environment variable + `RESTIC_FEATURES`. To get a list of currently supported feature flags, use the + `features` command. + + https://github.com/restic/restic/issues/4601 + https://github.com/restic/restic/pull/4666 + + * Enhancement #4611: Back up more file metadata on Windows + + Previously, restic did not back up all common Windows-specific metadata. + + Restic now stores file creation time and file attributes like the hidden, + read-only and encrypted flags when backing up files and folders on Windows. + + https://github.com/restic/restic/pull/4611 + + * Enhancement #4664: Make `ls` use `message_type` field in JSON output + + The `ls` command was the only restic command that used the `struct_type` field + in its JSON output format to specify the message type. + + The JSON output of the `ls` command now also includes the `message_type` field, + which is consistent with other commands. The `struct_type` field is still + included, but now deprecated. + + https://github.com/restic/restic/pull/4664 + + * Enhancement #4676: Make `key` command's actions separate sub-commands + + Each of the `add`, `list`, `remove` and `passwd` actions provided by the `key` + command is now a separate sub-command and have its own documentation which can + be invoked using `restic key --help`. + + https://github.com/restic/restic/issues/4676 + https://github.com/restic/restic/pull/4685 + + * Enhancement #4678: Add `--target` option to the `dump` command + + Restic `dump` always printed to the standard output. It now supports specifying + a `--target` file to write its output to. + + https://github.com/restic/restic/issues/4678 + https://github.com/restic/restic/pull/4682 + https://github.com/restic/restic/pull/4692 + + * Enhancement #4708: Back up and restore SecurityDescriptors on Windows + + Restic now backs up and restores SecurityDescriptors for files and folders on + Windows which includes owner, group, discretionary access control list (DACL) + and system access control list (SACL). + + This requires the user to be a member of backup operators or the application + must be run as admin. If that is not the case, only the current user's owner, + group and DACL will be backed up, and during restore only the DACL of the backed + up file will be restored, with the current user's owner and group being set on + the restored file. + + https://github.com/restic/restic/pull/4708 + + * Enhancement #4733: Allow specifying `--host` via environment variable + + Restic commands that operate on snapshots, such as `restic backup` and `restic + snapshots`, support the `--host` option to specify the hostname for grouping + snapshots. + + Such commands now also support specifying the hostname via the environment + variable `RESTIC_HOST`. Note that `--host` still takes precedence over the + environment variable. + + https://github.com/restic/restic/issues/4733 + https://github.com/restic/restic/pull/4734 + + * Enhancement #4737: Include snapshot ID in `reason` field of `forget` JSON output + + The JSON output of the `forget` command now includes `id` and `short_id` of + snapshots in the `reason` field. + + https://github.com/restic/restic/pull/4737 + + * Enhancement #4764: Support forgetting all snapshots + + The `forget` command now supports the `--unsafe-allow-remove-all` option, which + removes all snapshots in the repository. + + This option must always be combined with a snapshot filter (by host, path or + tag). For example, the command `forget --tag example --unsafe-allow-remove-all` + removes all snapshots with the tag "example". + + https://github.com/restic/restic/pull/4764 + + * Enhancement #4768: Allow specifying custom User-Agent for outgoing requests + + Restic now supports setting a custom `User-Agent` for outgoing HTTP requests + using the global option `--http-user-agent` or the `RESTIC_HTTP_USER_AGENT` + environment variable. + + https://github.com/restic/restic/issues/4768 + https://github.com/restic/restic/pull/4810 + + * Enhancement #4781: Add `restore` options to read include/exclude patterns from files + + Restic now supports reading include and exclude patterns from files using the + `--include-file`, `--exclude-file`, `--iinclude-file` and `--iexclude-file` + options of the `restore` command. + + https://github.com/restic/restic/issues/4781 + https://github.com/restic/restic/pull/4811 + + * Enhancement #4807: Support Extended Attributes on Windows NTFS + + Restic now backs up and restores Extended Attributes for files and folders on + Windows NTFS. + + https://github.com/restic/restic/pull/4807 + + * Enhancement #4817: Make overwrite behavior of `restore` customizable + + The `restore` command now supports an `--overwrite` option to configure whether + already existing files are overwritten. The overwrite behavior can be configured + using the following option values: + + - `--overwrite always` (default): Always overwrites already existing files. The + `restore` command will verify the existing file content and only restore + mismatching parts to minimize downloads. Updates the metadata of all files. - + `--overwrite if-changed`: Like `always`, but speeds up the file content check by + assuming that files with matching size and modification time (mtime) are already + up to date. In case of a mismatch, the full file content is verified like with + `always`. Updates the metadata of all files. - `--overwrite if-newer`: Like + `always`, but only overwrites existing files when the file in the snapshot has a + newer modification time (mtime) than the existing file. - `--overwrite never`: + Never overwrites existing files. + + https://github.com/restic/restic/issues/4817 + https://github.com/restic/restic/issues/200 + https://github.com/restic/restic/issues/407 + https://github.com/restic/restic/issues/2662 + https://github.com/restic/restic/pull/4837 + https://github.com/restic/restic/pull/4838 + https://github.com/restic/restic/pull/4864 + https://github.com/restic/restic/pull/4921 + + * Enhancement #4839: Add dry-run support to `restore` command + + The `restore` command now supports the `--dry-run` option to perform a dry run. + Pass the `--verbose=2` option to see which files would remain unchanged, and + which would be updated or freshly restored. + + https://github.com/restic/restic/pull/4839 + + # Changelog for restic 0.16.5 (2024-07-01) The following sections list the changes in restic 0.16.5 relevant to restic users. The changes are ordered by importance. @@ -47,8 +754,8 @@ restic users. The changes are ordered by importance. * Enhancement #4799: Add option to force use of Azure CLI credential - A new environment variable `AZURE_FORCE_CLI_CREDENTIAL=true` allows forcing the use of - Azure CLI credential, ignoring other credentials like managed identity. + A new environment variable `AZURE_FORCE_CLI_CREDENTIAL=true` allows forcing the + use of Azure CLI credential, ignoring other credentials like managed identity. https://github.com/restic/restic/pull/4799 @@ -73,35 +780,39 @@ restic users. The changes are ordered by importance. * Bugfix #4677: Downgrade zstd library to fix rare data corruption at max. compression - In restic 0.16.3, backups where the compression level was set to `max` (using `--compression - max`) could in rare and very specific circumstances result in data corruption due to a bug in the - library used for compressing data. Restic 0.16.1 and 0.16.2 were not affected. + In restic 0.16.3, backups where the compression level was set to `max` (using + `--compression max`) could in rare and very specific circumstances result in + data corruption due to a bug in the library used for compressing data. Restic + 0.16.1 and 0.16.2 were not affected. - Restic now uses the previous version of the library used to compress data, the same version used - by restic 0.16.2. Please note that the `auto` compression level (which restic uses by default) - was never affected, and even if you used `max` compression, chances of being affected by this - issue are small. + Restic now uses the previous version of the library used to compress data, the + same version used by restic 0.16.2. Please note that the `auto` compression + level (which restic uses by default) was never affected, and even if you used + `max` compression, chances of being affected by this issue are small. - To check a repository for any corruption, run `restic check --read-data`. This will download - and verify the whole repository and can be used at any time to completely verify the integrity of - a repository. If the `check` command detects anomalies, follow the suggested steps. + To check a repository for any corruption, run `restic check --read-data`. This + will download and verify the whole repository and can be used at any time to + completely verify the integrity of a repository. If the `check` command detects + anomalies, follow the suggested steps. https://github.com/restic/restic/issues/4677 https://github.com/restic/restic/pull/4679 * Enhancement #4529: Add extra verification of data integrity before upload - Hardware issues, or a bug in restic or its dependencies, could previously cause corruption in - the files restic created and stored in the repository. Detecting such corruption previously - required explicitly running the `check --read-data` or `check --read-data-subset` - commands. + Hardware issues, or a bug in restic or its dependencies, could previously cause + corruption in the files restic created and stored in the repository. Detecting + such corruption previously required explicitly running the `check --read-data` + or `check --read-data-subset` commands. - To further ensure data integrity, even in the case of hardware issues or software bugs, restic - now performs additional verification of the files about to be uploaded to the repository. + To further ensure data integrity, even in the case of hardware issues or + software bugs, restic now performs additional verification of the files about to + be uploaded to the repository. - These extra checks will increase CPU usage during backups. They can therefore, if absolutely - necessary, be disabled using the `--no-extra-verify` global option. Please note that this - should be combined with more active checking using the previously mentioned check commands. + These extra checks will increase CPU usage during backups. They can therefore, + if absolutely necessary, be disabled using the `--no-extra-verify` global + option. Please note that this should be combined with more active checking using + the previously mentioned check commands. https://github.com/restic/restic/issues/4529 https://github.com/restic/restic/pull/4681 @@ -123,13 +834,14 @@ restic users. The changes are ordered by importance. * Bugfix #4560: Improve errors for irregular files on Windows - Since Go 1.21, most filesystem reparse points on Windows are considered to be irregular files. - This caused restic to show an `error: invalid node type ""` error message for those files. + Since Go 1.21, most filesystem reparse points on Windows are considered to be + irregular files. This caused restic to show an `error: invalid node type ""` + error message for those files. - This error message has now been improved and includes the relevant file path: `error: - nodeFromFileInfo path/to/file: unsupported file type "irregular"`. As irregular files are - not required to behave like regular files, it is not possible to provide a generic way to back up - those files. + This error message has now been improved and includes the relevant file path: + `error: nodeFromFileInfo path/to/file: unsupported file type "irregular"`. As + irregular files are not required to behave like regular files, it is not + possible to provide a generic way to back up those files. https://github.com/restic/restic/issues/4560 https://github.com/restic/restic/pull/4620 @@ -137,9 +849,10 @@ restic users. The changes are ordered by importance. * Bugfix #4574: Support backup of deduplicated files on Windows again - With the official release builds of restic 0.16.1 and 0.16.2, it was not possible to back up - files that were deduplicated by the corresponding Windows Server feature. This also applied - to restic versions built using Go 1.21.0-1.21.4. + With the official release builds of restic 0.16.1 and 0.16.2, it was not + possible to back up files that were deduplicated by the corresponding Windows + Server feature. This also applied to restic versions built using Go + 1.21.0-1.21.4. The Go version used to build restic has now been updated to fix this. @@ -148,10 +861,10 @@ restic users. The changes are ordered by importance. * Bugfix #4612: Improve error handling for `rclone` backend - Since restic 0.16.0, if rclone encountered an error while listing files, this could in rare - circumstances cause restic to assume that there are no files. Although unlikely, this - situation could result in data loss if it were to happen right when the `prune` command is - listing existing snapshots. + Since restic 0.16.0, if rclone encountered an error while listing files, this + could in rare circumstances cause restic to assume that there are no files. + Although unlikely, this situation could result in data loss if it were to happen + right when the `prune` command is listing existing snapshots. Error handling has now been improved to detect and work around this case. @@ -160,9 +873,10 @@ restic users. The changes are ordered by importance. * Bugfix #4624: Correct `restore` progress information if an error occurs - If an error occurred while restoring a snapshot, this could cause the `restore` progress bar to - show incorrect information. In addition, if a data file could not be loaded completely, then - errors would also be reported for some already restored files. + If an error occurred while restoring a snapshot, this could cause the `restore` + progress bar to show incorrect information. In addition, if a data file could + not be loaded completely, then errors would also be reported for some already + restored files. Error reporting of the `restore` command has now been made more accurate. @@ -171,11 +885,12 @@ restic users. The changes are ordered by importance. * Bugfix #4626: Improve reliability of restoring large files - In some cases restic failed to restore large files that frequently contain the same file chunk. - In combination with certain backends, this could result in network connection timeouts that - caused incomplete restores. + In some cases restic failed to restore large files that frequently contain the + same file chunk. In combination with certain backends, this could result in + network connection timeouts that caused incomplete restores. - Restic now includes special handling for such file chunks to ensure reliable restores. + Restic now includes special handling for such file chunks to ensure reliable + restores. https://github.com/restic/restic/pull/4626 https://forum.restic.net/t/errors-restoring-with-restic-on-windows-server-s3/6943 @@ -194,16 +909,18 @@ restic users. The changes are ordered by importance. * Bugfix #4540: Restore ARMv5 support for ARM binaries - The official release binaries for restic 0.16.1 were accidentally built to require ARMv7. The - build process is now updated to restore support for ARMv5. + The official release binaries for restic 0.16.1 were accidentally built to + require ARMv7. The build process is now updated to restore support for ARMv5. - Please note that restic 0.17.0 will drop support for ARMv5 and require at least ARMv6. + Please note that restic 0.17.0 will drop support for ARMv5 and require at least + ARMv6. https://github.com/restic/restic/issues/4540 * Bugfix #4545: Repair documentation build on Read the Docs - For restic 0.16.1, no documentation was available at https://restic.readthedocs.io/ . + For restic 0.16.1, no documentation was available at + https://restic.readthedocs.io/ . The documentation build process is now updated to work again. @@ -230,65 +947,67 @@ restic users. The changes are ordered by importance. * Bugfix #4513: Make `key list` command honor `--no-lock` - The `key list` command now supports the `--no-lock` options. This allows determining which - keys a repo can be accessed by without the need for having write access (e.g., read-only sftp - access, filesystem snapshot). + The `key list` command now supports the `--no-lock` options. This allows + determining which keys a repo can be accessed by without the need for having + write access (e.g., read-only sftp access, filesystem snapshot). https://github.com/restic/restic/issues/4513 https://github.com/restic/restic/pull/4514 * Bugfix #4516: Do not try to load password on command line autocomplete - The command line autocompletion previously tried to load the repository password. This could - cause the autocompletion not to work. Now, this step gets skipped. + The command line autocompletion previously tried to load the repository + password. This could cause the autocompletion not to work. Now, this step gets + skipped. https://github.com/restic/restic/issues/4516 https://github.com/restic/restic/pull/4526 * Bugfix #4523: Update zstd library to fix possible data corruption at max. compression - In restic 0.16.0, backups where the compression level was set to `max` (using `--compression - max`) could in rare and very specific circumstances result in data corruption due to a bug in the - library used for compressing data. + In restic 0.16.0, backups where the compression level was set to `max` (using + `--compression max`) could in rare and very specific circumstances result in + data corruption due to a bug in the library used for compressing data. - Restic now uses the latest version of the library used to compress data, which includes a fix for - this issue. Please note that the `auto` compression level (which restic uses by default) was - never affected, and even if you used `max` compression, chances of being affected by this issue - were very small. + Restic now uses the latest version of the library used to compress data, which + includes a fix for this issue. Please note that the `auto` compression level + (which restic uses by default) was never affected, and even if you used `max` + compression, chances of being affected by this issue were very small. - To check a repository for any corruption, run `restic check --read-data`. This will download - and verify the whole repository and can be used at any time to completely verify the integrity of - a repository. If the `check` command detects anomalies, follow the suggested steps. + To check a repository for any corruption, run `restic check --read-data`. This + will download and verify the whole repository and can be used at any time to + completely verify the integrity of a repository. If the `check` command detects + anomalies, follow the suggested steps. - To simplify any needed repository repair and minimize data loss, there is also a new and - experimental `repair packs` command that salvages all valid data from the affected pack files - (see `restic help repair packs` for more information). + To simplify any needed repository repair and minimize data loss, there is also a + new and experimental `repair packs` command that salvages all valid data from + the affected pack files (see `restic help repair packs` for more information). https://github.com/restic/restic/issues/4523 https://github.com/restic/restic/pull/4530 * Change #4532: Update dependencies and require Go 1.19 or newer - We have updated all dependencies. Since some libraries require newer Go standard library - features, support for Go 1.18 has been dropped, which means that restic now requires at least Go - 1.19 to build. + We have updated all dependencies. Since some libraries require newer Go standard + library features, support for Go 1.18 has been dropped, which means that restic + now requires at least Go 1.19 to build. https://github.com/restic/restic/pull/4532 https://github.com/restic/restic/pull/4533 * Enhancement #229: Show progress bar while loading the index - Restic did not provide any feedback while loading index files. Now, there is a progress bar that - shows the index loading progress. + Restic did not provide any feedback while loading index files. Now, there is a + progress bar that shows the index loading progress. https://github.com/restic/restic/issues/229 https://github.com/restic/restic/pull/4419 * Enhancement #4128: Automatically set `GOMAXPROCS` in resource-constrained containers - When running restic in a Linux container with CPU-usage limits, restic now automatically - adjusts `GOMAXPROCS`. This helps to reduce the memory consumption on hosts with many CPU - cores. + When running restic in a Linux container with CPU-usage limits, restic now + automatically adjusts `GOMAXPROCS`. This helps to reduce the memory consumption + on hosts with many CPU cores. https://github.com/restic/restic/issues/4128 https://github.com/restic/restic/pull/4485 @@ -296,32 +1015,33 @@ restic users. The changes are ordered by importance. * Enhancement #4480: Allow setting REST password and username via environment variables - Previously, it was only possible to specify the REST-server username and password in the - repository URL, or by using the `--repository-file` option. This meant it was not possible to - use authentication in contexts where the repository URL is stored in publicly accessible way. + Previously, it was only possible to specify the REST-server username and + password in the repository URL, or by using the `--repository-file` option. This + meant it was not possible to use authentication in contexts where the repository + URL is stored in publicly accessible way. - Restic now allows setting the username and password using the `RESTIC_REST_USERNAME` and - `RESTIC_REST_PASSWORD` variables. + Restic now allows setting the username and password using the + `RESTIC_REST_USERNAME` and `RESTIC_REST_PASSWORD` variables. https://github.com/restic/restic/pull/4480 * Enhancement #4511: Include inode numbers in JSON output for `find` and `ls` commands - Restic used to omit the inode numbers in the JSON messages emitted for nodes by the `ls` command - as well as for matches by the `find` command. It now includes those values whenever they are - available. + Restic used to omit the inode numbers in the JSON messages emitted for nodes by + the `ls` command as well as for matches by the `find` command. It now includes + those values whenever they are available. https://github.com/restic/restic/pull/4511 * Enhancement #4519: Add config option to set SFTP command arguments - When using the `sftp` backend, scenarios where a custom identity file was needed for the SSH - connection, required the full command to be specified: `-o sftp.command='ssh - user@host:port -i /ssh/my_private_key -s sftp'` + When using the `sftp` backend, scenarios where a custom identity file was needed + for the SSH connection, required the full command to be specified: `-o + sftp.command='ssh user@host:port -i /ssh/my_private_key -s sftp'` - Now, the `-o sftp.args=...` option can be passed to restic to specify custom arguments for the - SSH command executed by the SFTP backend. This simplifies the above example to `-o - sftp.args='-i /ssh/my_private_key'`. + Now, the `-o sftp.args=...` option can be passed to restic to specify custom + arguments for the SSH command executed by the SFTP backend. This simplifies the + above example to `-o sftp.args='-i /ssh/my_private_key'`. https://github.com/restic/restic/issues/4241 https://github.com/restic/restic/pull/4519 @@ -367,31 +1087,32 @@ restic users. The changes are ordered by importance. * Bugfix #2565: Support "unlimited" in `forget --keep-*` options - Restic would previously forget snapshots that should have been kept when a negative value was - passed to the `--keep-*` options. Negative values are now forbidden. To keep all snapshots, - the special value `unlimited` is now supported. For example, `--keep-monthly unlimited` - will keep all monthly snapshots. + Restic would previously forget snapshots that should have been kept when a + negative value was passed to the `--keep-*` options. Negative values are now + forbidden. To keep all snapshots, the special value `unlimited` is now + supported. For example, `--keep-monthly unlimited` will keep all monthly + snapshots. https://github.com/restic/restic/issues/2565 https://github.com/restic/restic/pull/4234 * Bugfix #3311: Support non-UTF8 paths as symlink target - Earlier restic versions did not correctly `backup` and `restore` symlinks that contain a - non-UTF8 target. Note that this only affected systems that still use a non-Unicode encoding - for filesystem paths. + Earlier restic versions did not correctly `backup` and `restore` symlinks that + contain a non-UTF8 target. Note that this only affected systems that still use a + non-Unicode encoding for filesystem paths. - The repository format is now extended to add support for such symlinks. Please note that - snapshots must have been created with at least restic version 0.16.0 for `restore` to - correctly handle non-UTF8 symlink targets when restoring them. + The repository format is now extended to add support for such symlinks. Please + note that snapshots must have been created with at least restic version 0.16.0 + for `restore` to correctly handle non-UTF8 symlink targets when restoring them. https://github.com/restic/restic/issues/3311 https://github.com/restic/restic/pull/3802 * Bugfix #4199: Avoid lock refresh issues on slow network connections - On network connections with a low upload speed, backups and other operations could fail with - the error message `Fatal: failed to refresh lock in time`. + On network connections with a low upload speed, backups and other operations + could fail with the error message `Fatal: failed to refresh lock in time`. This has now been fixed by reworking the lock refresh handling. @@ -400,21 +1121,21 @@ restic users. The changes are ordered by importance. * Bugfix #4274: Improve lock refresh handling after standby - If the restic process was stopped or the host running restic entered standby during a long - running operation such as a backup, this previously resulted in the operation failing with - `Fatal: failed to refresh lock in time`. + If the restic process was stopped or the host running restic entered standby + during a long running operation such as a backup, this previously resulted in + the operation failing with `Fatal: failed to refresh lock in time`. - This has now been fixed such that restic first checks whether it is safe to continue the current - operation and only throws an error if not. + This has now been fixed such that restic first checks whether it is safe to + continue the current operation and only throws an error if not. https://github.com/restic/restic/issues/4274 https://github.com/restic/restic/pull/4374 * Bugfix #4319: Correctly clean up status bar output of the `backup` command - Due to a regression in restic 0.15.2, the status bar of the `backup` command could leave some - output behind. This happened if filenames were printed that are wider than the current - terminal width. This has now been fixed. + Due to a regression in restic 0.15.2, the status bar of the `backup` command + could leave some output behind. This happened if filenames were printed that are + wider than the current terminal width. This has now been fixed. https://github.com/restic/restic/issues/4319 https://github.com/restic/restic/pull/4318 @@ -425,25 +1146,26 @@ restic users. The changes are ordered by importance. * Bugfix #4400: Ignore missing folders in `rest` backend - If a repository accessed via the REST backend was missing folders, then restic would fail with - an error while trying to list the data in the repository. This has been now fixed. + If a repository accessed via the REST backend was missing folders, then restic + would fail with an error while trying to list the data in the repository. This + has been now fixed. https://github.com/restic/rest-server/issues/235 https://github.com/restic/restic/pull/4400 * Change #4176: Fix JSON message type of `scan_finished` for the `backup` command - Restic incorrectly set the `message_type` of the `scan_finished` message to `status` - instead of `verbose_status`. This has now been corrected so that the messages report the - correct type. + Restic incorrectly set the `message_type` of the `scan_finished` message to + `status` instead of `verbose_status`. This has now been corrected so that the + messages report the correct type. https://github.com/restic/restic/pull/4176 * Change #4201: Require Go 1.20 for Solaris builds - Building restic on Solaris now requires Go 1.20, as the library used to access Azure uses the - mmap syscall, which is only available on Solaris starting from Go 1.20. All other platforms - however continue to build with Go 1.18. + Building restic on Solaris now requires Go 1.20, as the library used to access + Azure uses the mmap syscall, which is only available on Solaris starting from Go + 1.20. All other platforms however continue to build with Go 1.18. https://github.com/restic/restic/pull/4201 @@ -464,8 +1186,8 @@ restic users. The changes are ordered by importance. * Enhancement #719: Add `--retry-lock` option - This option allows specifying a duration for which restic will wait if the repository is - already locked. + This option allows specifying a duration for which restic will wait if the + repository is already locked. https://github.com/restic/restic/issues/719 https://github.com/restic/restic/pull/2214 @@ -473,24 +1195,25 @@ restic users. The changes are ordered by importance. * Enhancement #1495: Sort snapshots by timestamp in `restic find` - The `find` command used to print snapshots in an arbitrary order. Restic now prints snapshots - sorted by timestamp. + The `find` command used to print snapshots in an arbitrary order. Restic now + prints snapshots sorted by timestamp. https://github.com/restic/restic/issues/1495 https://github.com/restic/restic/pull/4409 * Enhancement #1759: Add `repair index` and `repair snapshots` commands - The `rebuild-index` command has been renamed to `repair index`. The old name will still work, - but is deprecated. + The `rebuild-index` command has been renamed to `repair index`. The old name + will still work, but is deprecated. - When a snapshot was damaged, the only option up to now was to completely forget the snapshot, - even if only some unimportant files in it were damaged and other files were still fine. + When a snapshot was damaged, the only option up to now was to completely forget + the snapshot, even if only some unimportant files in it were damaged and other + files were still fine. - Restic now has a `repair snapshots` command, which can salvage any non-damaged files and parts - of files in the snapshots by removing damaged directories and missing file contents. Please - note that the damaged data may still be lost and see the "Troubleshooting" section in the - documentation for more details. + Restic now has a `repair snapshots` command, which can salvage any non-damaged + files and parts of files in the snapshots by removing damaged directories and + missing file contents. Please note that the damaged data may still be lost and + see the "Troubleshooting" section in the documentation for more details. https://github.com/restic/restic/issues/1759 https://github.com/restic/restic/issues/1714 @@ -502,19 +1225,20 @@ restic users. The changes are ordered by importance. * Enhancement #1926: Allow certificate paths to be passed through environment variables - Restic will now read paths to certificates from the environment variables `RESTIC_CACERT` or - `RESTIC_TLS_CLIENT_CERT` if `--cacert` or `--tls-client-cert` are not specified. + Restic will now read paths to certificates from the environment variables + `RESTIC_CACERT` or `RESTIC_TLS_CLIENT_CERT` if `--cacert` or `--tls-client-cert` + are not specified. https://github.com/restic/restic/issues/1926 https://github.com/restic/restic/pull/4384 * Enhancement #2359: Provide multi-platform Docker images - The official Docker images are now built for the architectures linux/386, linux/amd64, - linux/arm and linux/arm64. + The official Docker images are now built for the architectures linux/386, + linux/amd64, linux/arm and linux/arm64. - As an alternative to the Docker Hub, the Docker images are also available on ghcr.io, the GitHub - Container Registry. + As an alternative to the Docker Hub, the Docker images are also available on + ghcr.io, the GitHub Container Registry. https://github.com/restic/restic/issues/2359 https://github.com/restic/restic/issues/4269 @@ -524,25 +1248,26 @@ restic users. The changes are ordered by importance. The `azure` backend previously only supported storages using the global domain `core.windows.net`. This meant that backups to other domains such as Azure China - (`core.chinacloudapi.cn`) or Azure Germany (`core.cloudapi.de`) were not supported. - Restic now allows overriding the global domain using the environment variable - `AZURE_ENDPOINT_SUFFIX`. + (`core.chinacloudapi.cn`) or Azure Germany (`core.cloudapi.de`) were not + supported. Restic now allows overriding the global domain using the environment + variable `AZURE_ENDPOINT_SUFFIX`. https://github.com/restic/restic/issues/2468 https://github.com/restic/restic/pull/4387 * Enhancement #2679: Reduce file fragmentation for local backend - Before this change, local backend files could become fragmented. Now restic will try to - preallocate space for pack files to avoid their fragmentation. + Before this change, local backend files could become fragmented. Now restic will + try to preallocate space for pack files to avoid their fragmentation. https://github.com/restic/restic/issues/2679 https://github.com/restic/restic/pull/3261 * Enhancement #3328: Reduce memory usage by up to 25% - The in-memory index has been optimized to be more garbage collection friendly. Restic now - defaults to `GOGC=50` to run the Go garbage collector more frequently. + The in-memory index has been optimized to be more garbage collection friendly. + Restic now defaults to `GOGC=50` to run the Go garbage collector more + frequently. https://github.com/restic/restic/issues/3328 https://github.com/restic/restic/pull/4352 @@ -550,21 +1275,21 @@ restic users. The changes are ordered by importance. * Enhancement #3397: Improve accuracy of ETA displayed during backup - Restic's `backup` command displayed an ETA that did not adapt when the rate of progress made - during the backup changed during the course of the backup. + Restic's `backup` command displayed an ETA that did not adapt when the rate of + progress made during the backup changed during the course of the backup. - Restic now uses recent progress when computing the ETA. It is important to realize that the - estimate may still be wrong, because restic cannot predict the future, but the hope is that the - ETA will be more accurate in most cases. + Restic now uses recent progress when computing the ETA. It is important to + realize that the estimate may still be wrong, because restic cannot predict the + future, but the hope is that the ETA will be more accurate in most cases. https://github.com/restic/restic/issues/3397 https://github.com/restic/restic/pull/3563 * Enhancement #3624: Keep oldest snapshot when there are not enough snapshots - The `forget` command now additionally preserves the oldest snapshot if fewer snapshots than - allowed by the `--keep-*` parameters would otherwise be kept. This maximizes the amount of - history kept within the specified limits. + The `forget` command now additionally preserves the oldest snapshot if fewer + snapshots than allowed by the `--keep-*` parameters would otherwise be kept. + This maximizes the amount of history kept within the specified limits. https://github.com/restic/restic/issues/3624 https://github.com/restic/restic/pull/4366 @@ -572,99 +1297,106 @@ restic users. The changes are ordered by importance. * Enhancement #3698: Add support for Managed / Workload Identity to `azure` backend - Restic now additionally supports authenticating to Azure using Workload Identity or Managed - Identity credentials, which are automatically injected in several environments such as a - managed Kubernetes cluster. + Restic now additionally supports authenticating to Azure using Workload Identity + or Managed Identity credentials, which are automatically injected in several + environments such as a managed Kubernetes cluster. https://github.com/restic/restic/issues/3698 https://github.com/restic/restic/pull/4029 * Enhancement #3871: Support `:` syntax to select subfolders - Commands like `diff` or `restore` always worked with the full snapshot. This did not allow - comparing only a specific subfolder or only restoring that folder (`restore --include - subfolder` filters the restored files, but still creates the directories included in - `subfolder`). + Commands like `diff` or `restore` always worked with the full snapshot. This did + not allow comparing only a specific subfolder or only restoring that folder + (`restore --include subfolder` filters the restored files, but still creates the + directories included in `subfolder`). - The commands `diff`, `dump`, `ls` and `restore` now support the `:` - syntax, where `snapshot` is the ID of a snapshot (or the string `latest`) and `subfolder` is a - path within the snapshot. The commands will then only work with the specified path of the - snapshot. The `subfolder` must be a path to a folder as returned by `ls`. Two examples: + The commands `diff`, `dump`, `ls` and `restore` now support the + `:` syntax, where `snapshot` is the ID of a snapshot (or + the string `latest`) and `subfolder` is a path within the snapshot. The commands + will then only work with the specified path of the snapshot. The `subfolder` + must be a path to a folder as returned by `ls`. Two examples: `restic restore -t target latest:/some/path` `restic diff 12345678:/some/path 90abcef:/some/path` - For debugging purposes, the `cat` command now supports `cat tree :` to - return the directory metadata for the given subfolder. + For debugging purposes, the `cat` command now supports `cat tree + :` to return the directory metadata for the given + subfolder. https://github.com/restic/restic/issues/3871 https://github.com/restic/restic/pull/4334 * Enhancement #3941: Support `--group-by` for backup parent selection - Previously, the `backup` command by default selected the parent snapshot based on the - hostname and the backup paths. When the backup path list changed, the `backup` command was - unable to determine a suitable parent snapshot and had to read all files again. + Previously, the `backup` command by default selected the parent snapshot based + on the hostname and the backup paths. When the backup path list changed, the + `backup` command was unable to determine a suitable parent snapshot and had to + read all files again. - The new `--group-by` option for the `backup` command allows filtering snapshots for the - parent selection by `host`, `paths` and `tags`. It defaults to `host,paths` which selects the - latest snapshot with hostname and paths matching those of the backup run. This matches the - behavior of prior restic versions. + The new `--group-by` option for the `backup` command allows filtering snapshots + for the parent selection by `host`, `paths` and `tags`. It defaults to + `host,paths` which selects the latest snapshot with hostname and paths matching + those of the backup run. This matches the behavior of prior restic versions. - The new `--group-by` option should be set to the same value as passed to `forget --group-by`. + The new `--group-by` option should be set to the same value as passed to `forget + --group-by`. https://github.com/restic/restic/issues/3941 https://github.com/restic/restic/pull/4081 * Enhancement #4130: Cancel current command if cache becomes unusable - If the cache directory was removed or ran out of space while restic was running, this would - previously cause further caching attempts to fail and thereby drastically slow down the - command execution. Now, the currently running command is instead canceled. + If the cache directory was removed or ran out of space while restic was running, + this would previously cause further caching attempts to fail and thereby + drastically slow down the command execution. Now, the currently running command + is instead canceled. https://github.com/restic/restic/issues/4130 https://github.com/restic/restic/pull/4166 * Enhancement #4159: Add `--human-readable` option to `ls` and `find` commands - Previously, when using the `-l` option with the `ls` and `find` commands, the displayed size - was always in bytes, without an option for a more human readable format such as MiB or GiB. + Previously, when using the `-l` option with the `ls` and `find` commands, the + displayed size was always in bytes, without an option for a more human readable + format such as MiB or GiB. - The new `--human-readable` option will convert longer size values into more human friendly - values with an appropriate suffix depending on the output size. For example, a size of - `14680064` will be shown as `14.000 MiB`. + The new `--human-readable` option will convert longer size values into more + human friendly values with an appropriate suffix depending on the output size. + For example, a size of `14680064` will be shown as `14.000 MiB`. https://github.com/restic/restic/issues/4159 https://github.com/restic/restic/pull/4351 * Enhancement #4188: Include restic version in snapshot metadata - The restic version used to backup a snapshot is now included in its metadata and shown when - inspecting a snapshot using `restic cat snapshot ` or `restic snapshots - --json`. + The restic version used to backup a snapshot is now included in its metadata and + shown when inspecting a snapshot using `restic cat snapshot ` or + `restic snapshots --json`. https://github.com/restic/restic/issues/4188 https://github.com/restic/restic/pull/4378 * Enhancement #4220: Add `jq` binary to Docker image - The Docker image now contains `jq`, which can be useful to process JSON data output by restic. + The Docker image now contains `jq`, which can be useful to process JSON data + output by restic. https://github.com/restic/restic/pull/4220 * Enhancement #4226: Allow specifying region of new buckets in the `gs` backend - Previously, buckets used by the Google Cloud Storage backend would always get created in the - "us" region. It is now possible to specify the region where a bucket should be created by using - the `-o gs.region=us` option. + Previously, buckets used by the Google Cloud Storage backend would always get + created in the "us" region. It is now possible to specify the region where a + bucket should be created by using the `-o gs.region=us` option. https://github.com/restic/restic/pull/4226 * Enhancement #4375: Add support for extended attributes on symlinks - Restic now supports extended attributes on symlinks when backing up, restoring, or - FUSE-mounting snapshots. This includes, for example, the `security.selinux` xattr on Linux - distributions that use SELinux. + Restic now supports extended attributes on symlinks when backing up, restoring, + or FUSE-mounting snapshots. This includes, for example, the `security.selinux` + xattr on Linux distributions that use SELinux. https://github.com/restic/restic/issues/4375 https://github.com/restic/restic/pull/4379 @@ -693,12 +1425,12 @@ restic users. The changes are ordered by importance. * Bugfix #2260: Sanitize filenames printed by `backup` during processing - The `backup` command would previously not sanitize the filenames it printed during - processing, potentially causing newlines or terminal control characters to mangle the - status output or even change the state of a terminal. + The `backup` command would previously not sanitize the filenames it printed + during processing, potentially causing newlines or terminal control characters + to mangle the status output or even change the state of a terminal. - Filenames are now checked and quoted if they contain non-printable or non-Unicode - characters. + Filenames are now checked and quoted if they contain non-printable or + non-Unicode characters. https://github.com/restic/restic/issues/2260 https://github.com/restic/restic/issues/4191 @@ -707,44 +1439,47 @@ restic users. The changes are ordered by importance. * Bugfix #4211: Make `dump` interpret `--host` and `--path` correctly A regression in restic 0.15.0 caused `dump` to confuse its `--host=` and - `--path=` options: it looked for snapshots with paths called `` from hosts - called ``. It now treats the options as intended. + `--path=` options: it looked for snapshots with paths called `` from + hosts called ``. It now treats the options as intended. https://github.com/restic/restic/issues/4211 https://github.com/restic/restic/pull/4212 * Bugfix #4239: Correct number of blocks reported in mount point - Restic mount points reported an incorrect number of 512-byte (POSIX standard) blocks for - files and links due to a rounding bug. In particular, empty files were reported as taking one - block instead of zero. + Restic mount points reported an incorrect number of 512-byte (POSIX standard) + blocks for files and links due to a rounding bug. In particular, empty files + were reported as taking one block instead of zero. - The rounding is now fixed: the number of blocks reported is the file size (or link target size) - divided by 512 and rounded up to a whole number. + The rounding is now fixed: the number of blocks reported is the file size (or + link target size) divided by 512 and rounded up to a whole number. https://github.com/restic/restic/issues/4239 https://github.com/restic/restic/pull/4240 * Bugfix #4253: Minimize risk of spurious filesystem loops with `mount` - When a backup contains a directory that has the same name as its parent, say `a/b/b`, and the GNU - `find` command was run on this backup in a restic mount, `find` would refuse to traverse the - lowest `b` directory, instead printing `File system loop detected`. This was due to the way the - restic mount command generates inode numbers for directories in the mount point. + When a backup contains a directory that has the same name as its parent, say + `a/b/b`, and the GNU `find` command was run on this backup in a restic mount, + `find` would refuse to traverse the lowest `b` directory, instead printing `File + system loop detected`. This was due to the way the restic mount command + generates inode numbers for directories in the mount point. - The rule for generating these inode numbers was changed in 0.15.0. It has now been changed again - to avoid this issue. A perfect rule does not exist, but the probability of this behavior - occurring is now extremely small. + The rule for generating these inode numbers was changed in 0.15.0. It has now + been changed again to avoid this issue. A perfect rule does not exist, but the + probability of this behavior occurring is now extremely small. - When it does occur, the mount point is not broken, and scripts that traverse the mount point - should work as long as they don't rely on inode numbers for detecting filesystem loops. + When it does occur, the mount point is not broken, and scripts that traverse the + mount point should work as long as they don't rely on inode numbers for + detecting filesystem loops. https://github.com/restic/restic/issues/4253 https://github.com/restic/restic/pull/4255 * Enhancement #4180: Add release binaries for riscv64 architecture on Linux - Builds for the `riscv64` architecture on Linux are now included in the release binaries. + Builds for the `riscv64` architecture on Linux are now included in the release + binaries. https://github.com/restic/restic/pull/4180 @@ -771,8 +1506,8 @@ restic users. The changes are ordered by importance. * Bugfix #3750: Remove `b2_download_file_by_name: 404` warning from B2 backend - In some cases the B2 backend could print `b2_download_file_by_name: 404: : b2.b2err` - warnings. These are only debug messages and can be safely ignored. + In some cases the B2 backend could print `b2_download_file_by_name: 404: : + b2.b2err` warnings. These are only debug messages and can be safely ignored. Restic now uses an updated library for accessing B2, which removes the warning. @@ -782,19 +1517,19 @@ restic users. The changes are ordered by importance. * Bugfix #4147: Make `prune --quiet` not print progress bar - A regression in restic 0.15.0 caused `prune --quiet` to show a progress bar while deciding how - to process each pack files. This has now been fixed. + A regression in restic 0.15.0 caused `prune --quiet` to show a progress bar + while deciding how to process each pack files. This has now been fixed. https://github.com/restic/restic/issues/4147 https://github.com/restic/restic/pull/4153 * Bugfix #4163: Make `self-update --output` work with new filename on Windows - Since restic 0.14.0 the `self-update` command did not work when a custom output filename was - specified via the `--output` option. This has now been fixed. + Since restic 0.14.0 the `self-update` command did not work when a custom output + filename was specified via the `--output` option. This has now been fixed. - As a workaround, either use an older restic version to run the self-update or create an empty - file with the output filename before updating e.g. using CMD: + As a workaround, either use an older restic version to run the self-update or + create an empty file with the output filename before updating e.g. using CMD: `type nul > new-file.exe` `restic self-update --output new-file.exe` @@ -803,24 +1538,27 @@ restic users. The changes are ordered by importance. * Bugfix #4167: Add missing ETA in `backup` progress bar - A regression in restic 0.15.0 caused the ETA to be missing from the progress bar displayed by the - `backup` command. This has now been fixed. + A regression in restic 0.15.0 caused the ETA to be missing from the progress bar + displayed by the `backup` command. This has now been fixed. https://github.com/restic/restic/pull/4167 * Enhancement #4143: Ignore empty lock files - With restic 0.15.0 the checks for stale locks became much stricter than before. In particular, - empty or unreadable locks were no longer silently ignored. This made restic to complain with - `Load(, 0, 0) returned error, retrying after 552.330144ms: - load(): invalid data returned` and fail in the end. + With restic 0.15.0 the checks for stale locks became much stricter than before. + In particular, empty or unreadable locks were no longer silently ignored. This + made restic to complain with `Load(, 0, 0) returned error, + retrying after 552.330144ms: load(): invalid data returned` and + fail in the end. - The error message is now clarified and the implementation changed to ignore empty lock files - which are sometimes created as the result of a failed uploads on some backends. + The error message is now clarified and the implementation changed to ignore + empty lock files which are sometimes created as the result of a failed uploads + on some backends. - Please note that unreadable lock files still have to cleaned up manually. To do so, you can run - `restic unlock --remove-all` which removes all existing lock files. But first make sure that - no other restic process is currently using the repository. + Please note that unreadable lock files still have to cleaned up manually. To do + so, you can run `restic unlock --remove-all` which removes all existing lock + files. But first make sure that no other restic process is currently using the + repository. https://github.com/restic/restic/issues/4143 https://github.com/restic/restic/pull/4152 @@ -876,63 +1614,65 @@ restic users. The changes are ordered by importance. * Bugfix #2015: Make `mount` return exit code 0 after receiving Ctrl-C / SIGINT - To stop the `mount` command, a user has to press Ctrl-C or send a SIGINT signal to restic. This - used to cause restic to exit with a non-zero exit code. + To stop the `mount` command, a user has to press Ctrl-C or send a SIGINT signal + to restic. This used to cause restic to exit with a non-zero exit code. - The exit code has now been changed to zero as the above is the expected way to stop the `mount` - command and should therefore be considered successful. + The exit code has now been changed to zero as the above is the expected way to + stop the `mount` command and should therefore be considered successful. https://github.com/restic/restic/issues/2015 https://github.com/restic/restic/pull/3894 * Bugfix #2578: Make `restore` replace existing symlinks - When restoring a symlink, restic used to report an error if the target path already existed. - This has now been fixed such that the potentially existing target path is first removed before - the symlink is restored. + When restoring a symlink, restic used to report an error if the target path + already existed. This has now been fixed such that the potentially existing + target path is first removed before the symlink is restored. https://github.com/restic/restic/issues/2578 https://github.com/restic/restic/pull/3780 * Bugfix #2591: Don't read password from stdin for `backup --stdin` - The `backup` command when used with `--stdin` previously tried to read first the password, - then the data to be backed up from standard input. This meant it would often confuse part of the - data for the password. + The `backup` command when used with `--stdin` previously tried to read first the + password, then the data to be backed up from standard input. This meant it would + often confuse part of the data for the password. - From now on, it will instead exit with the message `Fatal: cannot read both password and data - from stdin` unless the password is passed in some other way (such as - `--restic-password-file`, `RESTIC_PASSWORD`, etc). + From now on, it will instead exit with the message `Fatal: cannot read both + password and data from stdin` unless the password is passed in some other way + (such as `--restic-password-file`, `RESTIC_PASSWORD`, etc). - To enter the password interactively a password command has to be used. For example on Linux, - `mysqldump somedatabase | restic backup --stdin --password-command='sh -c - "systemd-ask-password < /dev/tty"'` securely reads the password from the terminal. + To enter the password interactively a password command has to be used. For + example on Linux, `mysqldump somedatabase | restic backup --stdin + --password-command='sh -c "systemd-ask-password < /dev/tty"'` securely reads the + password from the terminal. https://github.com/restic/restic/issues/2591 https://github.com/restic/restic/pull/4011 * Bugfix #3161: Delete files on Backblaze B2 more reliably - Restic used to only delete the latest version of files stored in B2. In most cases this worked - well as there was only a single version of the file. However, due to retries while uploading it is - possible for multiple file versions to be stored at B2. This could lead to various problems for - files that should have been deleted but still existed. + Restic used to only delete the latest version of files stored in B2. In most + cases this worked well as there was only a single version of the file. However, + due to retries while uploading it is possible for multiple file versions to be + stored at B2. This could lead to various problems for files that should have + been deleted but still existed. - The implementation has now been changed to delete all versions of files, which doubles the - amount of Class B transactions necessary to delete files, but assures that no file versions are - left behind. + The implementation has now been changed to delete all versions of files, which + doubles the amount of Class B transactions necessary to delete files, but + assures that no file versions are left behind. https://github.com/restic/restic/issues/3161 https://github.com/restic/restic/pull/3885 * Bugfix #3336: Make SFTP backend report no space left on device - Backing up to an SFTP backend would spew repeated SSH_FX_FAILURE messages when the remote disk - was full. Restic now reports "sftp: no space left on device" and exits immediately when it - detects this condition. + Backing up to an SFTP backend would spew repeated SSH_FX_FAILURE messages when + the remote disk was full. Restic now reports "sftp: no space left on device" and + exits immediately when it detects this condition. - A fix for this issue was implemented in restic 0.12.1, but unfortunately the fix itself - contained a bug that prevented it from taking effect. + A fix for this issue was implemented in restic 0.12.1, but unfortunately the fix + itself contained a bug that prevented it from taking effect. https://github.com/restic/restic/issues/3336 https://github.com/restic/restic/pull/3345 @@ -940,9 +1680,10 @@ restic users. The changes are ordered by importance. * Bugfix #3567: Improve handling of interrupted syscalls in `mount` command - Accessing restic's FUSE mount could result in "input/output" errors when using programs in - which syscalls can be interrupted. This is for example the case for Go programs. This has now - been fixed by improved error handling of interrupted syscalls. + Accessing restic's FUSE mount could result in "input/output" errors when using + programs in which syscalls can be interrupted. This is for example the case for + Go programs. This has now been fixed by improved error handling of interrupted + syscalls. https://github.com/restic/restic/issues/3567 https://github.com/restic/restic/issues/3694 @@ -950,50 +1691,53 @@ restic users. The changes are ordered by importance. * Bugfix #3897: Fix stuck `copy` command when `-o .connections=1` - When running the `copy` command with `-o .connections=1` the command would be - infinitely stuck. This has now been fixed. + When running the `copy` command with `-o .connections=1` the command + would be infinitely stuck. This has now been fixed. https://github.com/restic/restic/issues/3897 https://github.com/restic/restic/pull/3898 * Bugfix #3918: Correct prune statistics for partially compressed repositories - In a partially compressed repository, one data blob can exist both in an uncompressed and a - compressed version. This caused the `prune` statistics to become inaccurate and e.g. report a - too high value for the unused size, such as "unused size after prune: 16777215.991 TiB". This - has now been fixed. + In a partially compressed repository, one data blob can exist both in an + uncompressed and a compressed version. This caused the `prune` statistics to + become inaccurate and e.g. report a too high value for the unused size, such as + "unused size after prune: 16777215.991 TiB". This has now been fixed. https://github.com/restic/restic/issues/3918 https://github.com/restic/restic/pull/3980 * Bugfix #3951: Make `ls` return exit code 1 if snapshot cannot be loaded - The `ls` command used to show a warning and return exit code 0 when failing to load a snapshot. - This has now been fixed such that it instead returns exit code 1 (still showing a warning). + The `ls` command used to show a warning and return exit code 0 when failing to + load a snapshot. This has now been fixed such that it instead returns exit code + 1 (still showing a warning). https://github.com/restic/restic/pull/3951 * Bugfix #4003: Make `backup` no longer hang on Solaris when seeing a FIFO file - The `backup` command used to hang on Solaris whenever it encountered a FIFO file (named pipe), - due to a bug in the handling of extended attributes. This bug has now been fixed. + The `backup` command used to hang on Solaris whenever it encountered a FIFO file + (named pipe), due to a bug in the handling of extended attributes. This bug has + now been fixed. https://github.com/restic/restic/issues/4003 https://github.com/restic/restic/pull/4053 * Bugfix #4016: Support ExFAT-formatted local backends on macOS Ventura - ExFAT-formatted disks could not be used as local backends starting from macOS Ventura. Restic - commands would fail with an "inappropriate ioctl for device" error. This has now been fixed. + ExFAT-formatted disks could not be used as local backends starting from macOS + Ventura. Restic commands would fail with an "inappropriate ioctl for device" + error. This has now been fixed. https://github.com/restic/restic/issues/4016 https://github.com/restic/restic/pull/4021 * Bugfix #4085: Make `init` ignore "Access Denied" errors when creating S3 buckets - In restic 0.9.0 through 0.13.0, the `init` command ignored some permission errors from S3 - backends when trying to check for bucket existence, so that manually created buckets with - custom permissions could be used for backups. + In restic 0.9.0 through 0.13.0, the `init` command ignored some permission + errors from S3 backends when trying to check for bucket existence, so that + manually created buckets with custom permissions could be used for backups. This feature became broken in 0.14.0, but has now been restored again. @@ -1002,20 +1746,21 @@ restic users. The changes are ordered by importance. * Bugfix #4100: Make `self-update` enabled by default only in release builds - The `self-update` command was previously included by default in all builds of restic as - opposed to only in official release builds, even if the `selfupdate` tag was not explicitly - enabled when building. + The `self-update` command was previously included by default in all builds of + restic as opposed to only in official release builds, even if the `selfupdate` + tag was not explicitly enabled when building. - This has now been corrected, and the `self-update` command is only available if restic was - built with `-tags selfupdate` (as done for official release builds by `build.go`). + This has now been corrected, and the `self-update` command is only available if + restic was built with `-tags selfupdate` (as done for official release builds by + `build.go`). https://github.com/restic/restic/pull/4100 * Bugfix #4103: Don't generate negative UIDs and GIDs in tar files from `dump` - When using a 32-bit build of restic, the `dump` command could in some cases create tar files - containing negative UIDs and GIDs, which cannot be read by GNU tar. This corner case especially - applies to backups from stdin on Windows. + When using a 32-bit build of restic, the `dump` command could in some cases + create tar files containing negative UIDs and GIDs, which cannot be read by GNU + tar. This corner case especially applies to backups from stdin on Windows. This is now fixed such that `dump` creates valid tar files in these cases too. @@ -1024,48 +1769,50 @@ restic users. The changes are ordered by importance. * Change #2724: Include full snapshot ID in JSON output of `backup` - We have changed the JSON output of the backup command to include the full snapshot ID instead of - just a shortened version, as the latter can be ambiguous in some rare cases. To derive the short - ID, please truncate the full ID down to eight characters. + We have changed the JSON output of the backup command to include the full + snapshot ID instead of just a shortened version, as the latter can be ambiguous + in some rare cases. To derive the short ID, please truncate the full ID down to + eight characters. https://github.com/restic/restic/issues/2724 https://github.com/restic/restic/pull/3993 * Change #3929: Make `unlock` display message only when locks were actually removed - The `unlock` command used to print the "successfully removed locks" message whenever it was - run, regardless of lock files having being removed or not. + The `unlock` command used to print the "successfully removed locks" message + whenever it was run, regardless of lock files having being removed or not. - This has now been changed such that it only prints the message if any lock files were actually - removed. In addition, it also reports the number of removed lock files. + This has now been changed such that it only prints the message if any lock files + were actually removed. In addition, it also reports the number of removed lock + files. https://github.com/restic/restic/issues/3929 https://github.com/restic/restic/pull/3935 * Change #4033: Don't print skipped snapshots by default in `copy` command - The `copy` command used to print each snapshot that was skipped because it already existed in - the target repository. The amount of this output could practically bury the list of snapshots - that were actually copied. + The `copy` command used to print each snapshot that was skipped because it + already existed in the target repository. The amount of this output could + practically bury the list of snapshots that were actually copied. - From now on, the skipped snapshots are by default not printed at all, but this can be re-enabled - by increasing the verbosity level of the command. + From now on, the skipped snapshots are by default not printed at all, but this + can be re-enabled by increasing the verbosity level of the command. https://github.com/restic/restic/issues/4033 https://github.com/restic/restic/pull/4066 * Change #4041: Update dependencies and require Go 1.18 or newer - Most dependencies have been updated. Since some libraries require newer language features, - support for Go 1.15-1.17 has been dropped, which means that restic now requires at least Go 1.18 - to build. + Most dependencies have been updated. Since some libraries require newer language + features, support for Go 1.15-1.17 has been dropped, which means that restic now + requires at least Go 1.18 to build. https://github.com/restic/restic/pull/4041 * Enhancement #14: Implement `rewrite` command - Restic now has a `rewrite` command which allows to rewrite existing snapshots to remove - unwanted files. + Restic now has a `rewrite` command which allows to rewrite existing snapshots to + remove unwanted files. https://github.com/restic/restic/issues/14 https://github.com/restic/restic/pull/2731 @@ -1073,15 +1820,15 @@ restic users. The changes are ordered by importance. * Enhancement #79: Restore files with long runs of zeros as sparse files - When using `restore --sparse`, the restorer may now write files containing long runs of zeros - as sparse files (also called files with holes), where the zeros are not actually written to - disk. + When using `restore --sparse`, the restorer may now write files containing long + runs of zeros as sparse files (also called files with holes), where the zeros + are not actually written to disk. - How much space is saved by writing sparse files depends on the operating system, file system and - the distribution of zeros in the file. + How much space is saved by writing sparse files depends on the operating system, + file system and the distribution of zeros in the file. - During backup restic still reads the whole file including sparse regions, but with optimized - processing speed of sparse regions. + During backup restic still reads the whole file including sparse regions, but + with optimized processing speed of sparse regions. https://github.com/restic/restic/issues/79 https://github.com/restic/restic/issues/3903 @@ -1091,9 +1838,9 @@ restic users. The changes are ordered by importance. * Enhancement #1078: Support restoring symbolic links on Windows - The `restore` command now supports restoring symbolic links on Windows. Because of Windows - specific restrictions this is only possible when running restic with the - `SeCreateSymbolicLinkPrivilege` privilege or as an administrator. + The `restore` command now supports restoring symbolic links on Windows. Because + of Windows specific restrictions this is only possible when running restic with + the `SeCreateSymbolicLinkPrivilege` privilege or as an administrator. https://github.com/restic/restic/issues/1078 https://github.com/restic/restic/issues/2699 @@ -1101,14 +1848,14 @@ restic users. The changes are ordered by importance. * Enhancement #1734: Inform about successful retries after errors - When a recoverable error is encountered, restic shows a warning message saying that it's - retrying, e.g.: + When a recoverable error is encountered, restic shows a warning message saying + that it's retrying, e.g.: `Save() returned error, retrying after 357.131936ms: ...` - This message can be confusing in that it never clearly states whether the retry is successful or - not. This has now been fixed such that restic follows up with a message confirming a successful - retry, e.g.: + This message can be confusing in that it never clearly states whether the retry + is successful or not. This has now been fixed such that restic follows up with a + message confirming a successful retry, e.g.: `Save() operation successful after 1 retries` @@ -1117,12 +1864,12 @@ restic users. The changes are ordered by importance. * Enhancement #1866: Improve handling of directories with duplicate entries - If for some reason a directory contains a duplicate entry, the `backup` command would - previously fail with a `node "path/to/file" already present` or `nodes are not ordered got - "path/to/file", last "path/to/file"` error. + If for some reason a directory contains a duplicate entry, the `backup` command + would previously fail with a `node "path/to/file" already present` or `nodes are + not ordered got "path/to/file", last "path/to/file"` error. - The error handling has been improved to only report a warning in this case. Make sure to check - that the filesystem in question is not damaged if you see this! + The error handling has been improved to only report a warning in this case. Make + sure to check that the filesystem in question is not damaged if you see this! https://github.com/restic/restic/issues/1866 https://github.com/restic/restic/issues/3937 @@ -1130,29 +1877,31 @@ restic users. The changes are ordered by importance. * Enhancement #2134: Support B2 API keys restricted to hiding but not deleting files - When the B2 backend does not have the necessary permissions to permanently delete files, it now - automatically falls back to hiding files. This allows using restic with an application key - which is not allowed to delete files. This can prevent an attacker from deleting backups with - such an API key. + When the B2 backend does not have the necessary permissions to permanently + delete files, it now automatically falls back to hiding files. This allows using + restic with an application key which is not allowed to delete files. This can + prevent an attacker from deleting backups with such an API key. - To use this feature create an application key without the `deleteFiles` capability. It is - recommended to restrict the key to just one bucket. For example using the `b2` command line - tool: + To use this feature create an application key without the `deleteFiles` + capability. It is recommended to restrict the key to just one bucket. For + example using the `b2` command line tool: `b2 create-key --bucket listBuckets,readFiles,writeFiles,listFiles` - Alternatively, you can use the S3 backend to access B2, as described in the documentation. In - this mode, files are also only hidden instead of being deleted permanently. + Alternatively, you can use the S3 backend to access B2, as described in the + documentation. In this mode, files are also only hidden instead of being deleted + permanently. https://github.com/restic/restic/issues/2134 https://github.com/restic/restic/pull/2398 * Enhancement #2152: Make `init` open only one connection for the SFTP backend - The `init` command using the SFTP backend used to connect twice to the repository. This could be - inconvenient if the user must enter a password, or cause `init` to fail if the server does not - correctly close the first SFTP connection. + The `init` command using the SFTP backend used to connect twice to the + repository. This could be inconvenient if the user must enter a password, or + cause `init` to fail if the server does not correctly close the first SFTP + connection. This has now been fixed by reusing the first/initial SFTP connection opened. @@ -1161,40 +1910,44 @@ restic users. The changes are ordered by importance. * Enhancement #2533: Handle cache corruption on disk and in downloads - In rare situations, like for example after a system crash, the data stored in the cache might be - corrupted. This could cause restic to fail and required manually deleting the cache. + In rare situations, like for example after a system crash, the data stored in + the cache might be corrupted. This could cause restic to fail and required + manually deleting the cache. - Restic now automatically removes broken data from the cache, allowing it to recover from such a - situation without user intervention. In addition, restic retries downloads which return - corrupt data in order to also handle temporary download problems. + Restic now automatically removes broken data from the cache, allowing it to + recover from such a situation without user intervention. In addition, restic + retries downloads which return corrupt data in order to also handle temporary + download problems. https://github.com/restic/restic/issues/2533 https://github.com/restic/restic/pull/3521 * Enhancement #2715: Stricter repository lock handling - Previously, restic commands kept running even if they failed to refresh their locks in time. - This could be a problem e.g. in case the client system running a backup entered the standby power - mode while the backup was still in progress (which would prevent the client from refreshing its - lock), and after a short delay another host successfully runs `unlock` and `prune` on the - repository, which would remove all data added by the in-progress backup. If the backup client - later continues its backup, even though its lock had expired in the meantime, this would lead to - an incomplete snapshot. - - To address this, lock handling is now much stricter. Commands requiring a lock are canceled if - the lock is not refreshed successfully in time. In addition, if a lock file is not readable - restic will not allow starting a command. It may be necessary to remove invalid lock files - manually or use `unlock --remove-all`. Please make sure that no other restic processes are - running concurrently before doing this, however. + Previously, restic commands kept running even if they failed to refresh their + locks in time. This could be a problem e.g. in case the client system running a + backup entered the standby power mode while the backup was still in progress + (which would prevent the client from refreshing its lock), and after a short + delay another host successfully runs `unlock` and `prune` on the repository, + which would remove all data added by the in-progress backup. If the backup + client later continues its backup, even though its lock had expired in the + meantime, this would lead to an incomplete snapshot. + + To address this, lock handling is now much stricter. Commands requiring a lock + are canceled if the lock is not refreshed successfully in time. In addition, if + a lock file is not readable restic will not allow starting a command. It may be + necessary to remove invalid lock files manually or use `unlock --remove-all`. + Please make sure that no other restic processes are running concurrently before + doing this, however. https://github.com/restic/restic/issues/2715 https://github.com/restic/restic/pull/3569 * Enhancement #2750: Make backup file read concurrency configurable - The `backup` command now supports a `--read-concurrency` option which allows tuning restic - for very fast storage like NVMe disks by controlling the number of concurrent file reads during - the backup process. + The `backup` command now supports a `--read-concurrency` option which allows + tuning restic for very fast storage like NVMe disks by controlling the number of + concurrent file reads during the backup process. https://github.com/restic/restic/pull/2750 @@ -1209,75 +1962,78 @@ restic users. The changes are ordered by importance. * Enhancement #3096: Make `mount` command support macOS using macFUSE 4.x - Restic now uses a different FUSE library for mounting snapshots and making them available as a - FUSE filesystem using the `mount` command. This adds support for macFUSE 4.x which can be used - to make this work on recent macOS versions. + Restic now uses a different FUSE library for mounting snapshots and making them + available as a FUSE filesystem using the `mount` command. This adds support for + macFUSE 4.x which can be used to make this work on recent macOS versions. https://github.com/restic/restic/issues/3096 https://github.com/restic/restic/pull/4024 * Enhancement #3124: Support JSON output for the `init` command - The `init` command used to ignore the `--json` option, but now outputs a JSON message if the - repository was created successfully. + The `init` command used to ignore the `--json` option, but now outputs a JSON + message if the repository was created successfully. https://github.com/restic/restic/issues/3124 https://github.com/restic/restic/pull/3132 * Enhancement #3899: Optimize prune memory usage - The `prune` command needs large amounts of memory in order to determine what to keep and what to - remove. This is now optimized to use up to 30% less memory. + The `prune` command needs large amounts of memory in order to determine what to + keep and what to remove. This is now optimized to use up to 30% less memory. https://github.com/restic/restic/pull/3899 * Enhancement #3905: Improve speed of parent snapshot detection in `backup` command - Backing up a large number of files using `--files-from-verbatim` or `--files-from-raw` - options could require a long time to find the parent snapshot. This has been improved. + Backing up a large number of files using `--files-from-verbatim` or + `--files-from-raw` options could require a long time to find the parent + snapshot. This has been improved. https://github.com/restic/restic/pull/3905 * Enhancement #3915: Add compression statistics to the `stats` command - When executed with `--mode raw-data` on a repository that supports compression, the `stats` - command now calculates and displays, for the selected repository or snapshots: the - uncompressed size of the data; the compression progress (percentage of data that has been - compressed); the compression ratio of the compressed data; the total space saving. + When executed with `--mode raw-data` on a repository that supports compression, + the `stats` command now calculates and displays, for the selected repository or + snapshots: the uncompressed size of the data; the compression progress + (percentage of data that has been compressed); the compression ratio of the + compressed data; the total space saving. - It also takes into account both the compressed and uncompressed data if the repository is only - partially compressed. + It also takes into account both the compressed and uncompressed data if the + repository is only partially compressed. https://github.com/restic/restic/pull/3915 * Enhancement #3925: Provide command completion for PowerShell - Restic already provided generation of completion files for bash, fish and zsh. Now powershell - is supported, too. + Restic already provided generation of completion files for bash, fish and zsh. + Now powershell is supported, too. https://github.com/restic/restic/pull/3925/files * Enhancement #3931: Allow `backup` file tree scanner to be disabled - The `backup` command walks the file tree in a separate scanner process to find the total size and - file/directory count, and uses this to provide an ETA. This can slow down backups, especially - of network filesystems. + The `backup` command walks the file tree in a separate scanner process to find + the total size and file/directory count, and uses this to provide an ETA. This + can slow down backups, especially of network filesystems. - The command now has a new option `--no-scan` which can be used to disable this scanning in order - to speed up backups when needed. + The command now has a new option `--no-scan` which can be used to disable this + scanning in order to speed up backups when needed. https://github.com/restic/restic/pull/3931 * Enhancement #3932: Improve handling of ErrDot errors in rclone and sftp backends - Since Go 1.19, restic can no longer implicitly run relative executables which are found in the - current directory (e.g. `rclone` if found in `.`). This is a security feature of Go to prevent - against running unintended and possibly harmful executables. + Since Go 1.19, restic can no longer implicitly run relative executables which + are found in the current directory (e.g. `rclone` if found in `.`). This is a + security feature of Go to prevent against running unintended and possibly + harmful executables. - The error message for this was just "cannot run executable found relative to current - directory". This has now been improved to yield a more specific error message, informing the - user how to explicitly allow running the executable using the `-o rclone.program` and `-o - sftp.command` extended options with `./`. + The error message for this was just "cannot run executable found relative to + current directory". This has now been improved to yield a more specific error + message, informing the user how to explicitly allow running the executable using + the `-o rclone.program` and `-o sftp.command` extended options with `./`. https://github.com/restic/restic/issues/3932 https://pkg.go.dev/os/exec#hdr-Executables_in_the_current_directory @@ -1285,20 +2041,21 @@ restic users. The changes are ordered by importance. * Enhancement #3943: Ignore additional/unknown files in repository - If a restic repository had additional files in it (not created by restic), commands like `find` - and `restore` could become confused and fail with an `multiple IDs with prefix "12345678" - found` error. These commands now ignore such additional files. + If a restic repository had additional files in it (not created by restic), + commands like `find` and `restore` could become confused and fail with an + `multiple IDs with prefix "12345678" found` error. These commands now ignore + such additional files. https://github.com/restic/restic/pull/3943 https://forum.restic.net/t/which-protocol-should-i-choose-for-remote-linux-backups/5446/17 * Enhancement #3955: Improve `backup` performance for small files - When backing up small files restic was slower than it could be. In particular this affected - backups using maximum compression. + When backing up small files restic was slower than it could be. In particular + this affected backups using maximum compression. - This has been fixed by reworking the internal parallelism of the backup command, making it back - up small files around two times faster. + This has been fixed by reworking the internal parallelism of the backup command, + making it back up small files around two times faster. https://github.com/restic/restic/pull/3955 @@ -1347,22 +2104,23 @@ restic users. The changes are ordered by importance. * Bugfix #2248: Support `self-update` on Windows - Restic `self-update` would fail in situations where the operating system locks running - binaries, including Windows. The new behavior works around this by renaming the running file - and swapping the updated file in place. + Restic `self-update` would fail in situations where the operating system locks + running binaries, including Windows. The new behavior works around this by + renaming the running file and swapping the updated file in place. https://github.com/restic/restic/issues/2248 https://github.com/restic/restic/pull/3675 * Bugfix #3428: List snapshots in backend at most once to resolve snapshot IDs - Many commands support specifying a list of snapshot IDs which are then used to determine the - snapshots to be processed by the command. To resolve snapshot IDs or `latest`, and check that - these exist, restic previously listed all snapshots stored in the repository. Depending on - the backend this could be a slow and/or expensive operation. + Many commands support specifying a list of snapshot IDs which are then used to + determine the snapshots to be processed by the command. To resolve snapshot IDs + or `latest`, and check that these exist, restic previously listed all snapshots + stored in the repository. Depending on the backend this could be a slow and/or + expensive operation. - Restic now lists the snapshots only once and remembers the result in order to resolve all - further snapshot IDs swiftly. + Restic now lists the snapshots only once and remembers the result in order to + resolve all further snapshot IDs swiftly. https://github.com/restic/restic/issues/3428 https://github.com/restic/restic/pull/3570 @@ -1370,27 +2128,28 @@ restic users. The changes are ordered by importance. * Bugfix #3432: Fix rare 'not found in repository' error for `copy` command - In rare cases `copy` (and other commands) would report that `LoadTree(...)` returned an `id - [...] not found in repository` error. This could be caused by a backup or copy command running - concurrently. The error was only temporary; running the failed restic command a second time as - a workaround did resolve the error. + In rare cases `copy` (and other commands) would report that `LoadTree(...)` + returned an `id [...] not found in repository` error. This could be caused by a + backup or copy command running concurrently. The error was only temporary; + running the failed restic command a second time as a workaround did resolve the + error. - This issue has now been fixed by correcting the order in which restic reads data from the - repository. It is now guaranteed that restic only loads snapshots for which all necessary data - is already available. + This issue has now been fixed by correcting the order in which restic reads data + from the repository. It is now guaranteed that restic only loads snapshots for + which all necessary data is already available. https://github.com/restic/restic/issues/3432 https://github.com/restic/restic/pull/3570 * Bugfix #3681: Fix rclone (shimmed by Scoop) and sftp not working on Windows - In #3602 a fix was introduced to address the problem of `rclone` prematurely exiting when - Ctrl+C is pressed on Windows. The solution was to create the subprocess with its console - detached from the restic console. + In #3602 a fix was introduced to address the problem of `rclone` prematurely + exiting when Ctrl+C is pressed on Windows. The solution was to create the + subprocess with its console detached from the restic console. - However, this solution failed when using `rclone` installed by Scoop or using `sftp` with a - passphrase-protected private key. We've now fixed this by using a different approach to - prevent Ctrl-C from passing down too early. + However, this solution failed when using `rclone` installed by Scoop or using + `sftp` with a passphrase-protected private key. We've now fixed this by using a + different approach to prevent Ctrl-C from passing down too early. https://github.com/restic/restic/issues/3681 https://github.com/restic/restic/issues/3692 @@ -1398,28 +2157,28 @@ restic users. The changes are ordered by importance. * Bugfix #3685: The `diff` command incorrectly listed some files as added - There was a bug in the `diff` command, causing it to always show files in a removed directory as - added. This has now been fixed. + There was a bug in the `diff` command, causing it to always show files in a + removed directory as added. This has now been fixed. https://github.com/restic/restic/issues/3685 https://github.com/restic/restic/pull/3686 * Bugfix #3716: Print "wrong password" to stderr instead of stdout - If an invalid password was entered, the error message was printed on stdout and not on stderr as - intended. This has now been fixed. + If an invalid password was entered, the error message was printed on stdout and + not on stderr as intended. This has now been fixed. https://github.com/restic/restic/pull/3716 https://forum.restic.net/t/4965 * Bugfix #3720: Directory sync errors for repositories accessed via SMB - On Linux and macOS, accessing a repository via a SMB/CIFS mount resulted in restic failing to - save the lock file, yielding the following errors: + On Linux and macOS, accessing a repository via a SMB/CIFS mount resulted in + restic failing to save the lock file, yielding the following errors: - Save() returned error, retrying after 552.330144ms: sync /repo/locks: - no such file or directory Save() returned error, retrying after - 552.330144ms: sync /repo/locks: invalid argument + Save() returned error, retrying after 552.330144ms: sync + /repo/locks: no such file or directory Save() returned error, + retrying after 552.330144ms: sync /repo/locks: invalid argument This has now been fixed by ignoring the relevant error codes. @@ -1429,22 +2188,23 @@ restic users. The changes are ordered by importance. * Bugfix #3736: The `stats` command miscalculated restore size for multiple snapshots - Since restic 0.10.0 the restore size calculated by the `stats` command for multiple snapshots - was too low. The hardlink detection was accidentally applied across multiple snapshots and - thus ignored many files. This has now been fixed. + Since restic 0.10.0 the restore size calculated by the `stats` command for + multiple snapshots was too low. The hardlink detection was accidentally applied + across multiple snapshots and thus ignored many files. This has now been fixed. https://github.com/restic/restic/issues/3736 https://github.com/restic/restic/pull/3740 * Bugfix #3772: Correctly rebuild index for legacy repositories - After running `rebuild-index` on a legacy repository containing mixed pack files (that is, - pack files which store both metadata and file data), `check` printed warnings like `pack - 12345678 contained in several indexes: ...`. This warning was not critical, but has now - nonetheless been fixed by properly handling mixed pack files while rebuilding the index. + After running `rebuild-index` on a legacy repository containing mixed pack files + (that is, pack files which store both metadata and file data), `check` printed + warnings like `pack 12345678 contained in several indexes: ...`. This warning + was not critical, but has now nonetheless been fixed by properly handling mixed + pack files while rebuilding the index. - Running `prune` for such legacy repositories will also fix the warning by reorganizing the - pack files which caused it. + Running `prune` for such legacy repositories will also fix the warning by + reorganizing the pack files which caused it. https://github.com/restic/restic/pull/3772 https://github.com/restic/restic/pull/3884 @@ -1452,18 +2212,20 @@ restic users. The changes are ordered by importance. * Bugfix #3776: Limit number of key files tested while opening a repository - Previously, restic tested the password against every key in the repository when opening a - repository. The more keys there were in the repository, the slower this operation became. + Previously, restic tested the password against every key in the repository when + opening a repository. The more keys there were in the repository, the slower + this operation became. - Restic now tests the password against up to 20 key files in the repository. Alternatively, you - can use the `--key-hint=` option to specify a specific key file to use instead. + Restic now tests the password against up to 20 key files in the repository. + Alternatively, you can use the `--key-hint=` option to specify a + specific key file to use instead. https://github.com/restic/restic/pull/3776 * Bugfix #3861: Yield error on invalid policy to `forget` - The `forget` command previously silently ignored invalid/unsupported units in the duration - options, such as e.g. `--keep-within-daily 2w`. + The `forget` command previously silently ignored invalid/unsupported units in + the duration options, such as e.g. `--keep-within-daily 2w`. Specifying an invalid/unsupported duration unit now results in an error. @@ -1472,71 +2234,78 @@ restic users. The changes are ordered by importance. * Change #1842: Support debug log creation in release builds - Creating a debug log was only possible in debug builds which required users to manually build - restic. We changed the release builds to allow creating debug logs by simply setting the - environment variable `DEBUG_LOG=logname.log`. + Creating a debug log was only possible in debug builds which required users to + manually build restic. We changed the release builds to allow creating debug + logs by simply setting the environment variable `DEBUG_LOG=logname.log`. https://github.com/restic/restic/issues/1842 https://github.com/restic/restic/pull/3826 * Change #3295: Deprecate `check --check-unused` and add further checks - Since restic 0.12.0, it is expected to still have unused blobs after running `prune`. This made - the `--check-unused` option of the `check` command rather useless and tended to confuse - users. This option has been deprecated and is now ignored. + Since restic 0.12.0, it is expected to still have unused blobs after running + `prune`. This made the `--check-unused` option of the `check` command rather + useless and tended to confuse users. This option has been deprecated and is now + ignored. - The `check` command now also warns if a repository is using either the legacy S3 layout or mixed - pack files with both tree and data blobs. The latter is known to cause performance problems. + The `check` command now also warns if a repository is using either the legacy S3 + layout or mixed pack files with both tree and data blobs. The latter is known to + cause performance problems. https://github.com/restic/restic/issues/3295 https://github.com/restic/restic/pull/3730 * Change #3680: Update dependencies and require Go 1.15 or newer - We've updated most dependencies. Since some libraries require newer language features we're - dropping support for Go 1.14, which means that restic now requires at least Go 1.15 to build. + We've updated most dependencies. Since some libraries require newer language + features we're dropping support for Go 1.14, which means that restic now + requires at least Go 1.15 to build. https://github.com/restic/restic/issues/3680 https://github.com/restic/restic/issues/3883 * Change #3742: Replace `--repo2` option used by `init`/`copy` with `--from-repo` - The `init` and `copy` commands can read data from another repository. However, confusingly - `--repo2` referred to the repository *from* which the `init` command copies parameters, but - for the `copy` command `--repo2` referred to the copy *destination*. + The `init` and `copy` commands can read data from another repository. However, + confusingly `--repo2` referred to the repository *from* which the `init` command + copies parameters, but for the `copy` command `--repo2` referred to the copy + *destination*. - We've introduced a new option, `--from-repo`, which always refers to the source repository - for both commands. The old parameter names have been deprecated but still work. To create a new - repository and copy all snapshots to it, the commands are now as follows: + We've introduced a new option, `--from-repo`, which always refers to the source + repository for both commands. The old parameter names have been deprecated but + still work. To create a new repository and copy all snapshots to it, the + commands are now as follows: - ``` restic -r /srv/restic-repo-copy init --from-repo /srv/restic-repo - --copy-chunker-params restic -r /srv/restic-repo-copy copy --from-repo - /srv/restic-repo ``` + ``` + restic -r /srv/restic-repo-copy init --from-repo /srv/restic-repo --copy-chunker-params + restic -r /srv/restic-repo-copy copy --from-repo /srv/restic-repo + ``` https://github.com/restic/restic/pull/3742 https://forum.restic.net/t/5017 * Enhancement #21: Add compression support - We've added compression support to the restic repository format. To create a repository using - the new format run `init --repository-version 2`. Please note that the repository cannot be - read by restic versions prior to 0.14.0. + We've added compression support to the restic repository format. To create a + repository using the new format run `init --repository-version 2`. Please note + that the repository cannot be read by restic versions prior to 0.14.0. - You can configure whether data is compressed with the option `--compression`. It can be set to - `auto` (the default, which will compress very fast), `max` (which will trade backup speed and - CPU usage for better compression), or `off` (which disables compression). Each setting is - only applied for the current run of restic and does *not* apply to future runs. The option can - also be set via the environment variable `RESTIC_COMPRESSION`. + You can configure whether data is compressed with the option `--compression`. It + can be set to `auto` (the default, which will compress very fast), `max` (which + will trade backup speed and CPU usage for better compression), or `off` (which + disables compression). Each setting is only applied for the current run of + restic and does *not* apply to future runs. The option can also be set via the + environment variable `RESTIC_COMPRESSION`. - To upgrade in place run `migrate upgrade_repo_v2` followed by `prune`. See the documentation - for more details. The migration checks the repository integrity and upgrades the repository - format, but will not change any data. Afterwards, prune will rewrite the metadata to make use of - compression. + To upgrade in place run `migrate upgrade_repo_v2` followed by `prune`. See the + documentation for more details. The migration checks the repository integrity + and upgrades the repository format, but will not change any data. Afterwards, + prune will rewrite the metadata to make use of compression. - As an alternative you can use the `copy` command to migrate snapshots; First create a new - repository using `init --repository-version 2 --copy-chunker-params --repo2 - path/to/old/repo`, and then use the `copy` command to copy all snapshots to the new - repository. + As an alternative you can use the `copy` command to migrate snapshots; First + create a new repository using `init --repository-version 2 --copy-chunker-params + --repo2 path/to/old/repo`, and then use the `copy` command to copy all snapshots + to the new repository. https://github.com/restic/restic/issues/21 https://github.com/restic/restic/issues/3779 @@ -1546,25 +2315,28 @@ restic users. The changes are ordered by importance. * Enhancement #1153: Support pruning even when the disk is full - When running out of disk space it was no longer possible to add or remove data from a repository. - To help with recovering from such a deadlock, the prune command now supports an - `--unsafe-recover-no-free-space` option to recover from these situations. Make sure to - read the documentation first! + When running out of disk space it was no longer possible to add or remove data + from a repository. To help with recovering from such a deadlock, the prune + command now supports an `--unsafe-recover-no-free-space` option to recover from + these situations. Make sure to read the documentation first! https://github.com/restic/restic/issues/1153 https://github.com/restic/restic/pull/3481 * Enhancement #2162: Adaptive IO concurrency based on backend connections - Many commands used hard-coded limits for the number of concurrent operations. This prevented - speed improvements by increasing the number of connections used by a backend. + Many commands used hard-coded limits for the number of concurrent operations. + This prevented speed improvements by increasing the number of connections used + by a backend. - These limits have now been replaced by using the configured number of backend connections - instead, which can be controlled using the `-o .connections=5` option. - Commands will then automatically scale their parallelism accordingly. + These limits have now been replaced by using the configured number of backend + connections instead, which can be controlled using the `-o + .connections=5` option. Commands will then automatically scale + their parallelism accordingly. - To limit the number of CPU cores used by restic, you can set the environment variable - `GOMAXPROCS` accordingly. For example to use a single CPU core, use `GOMAXPROCS=1`. + To limit the number of CPU cores used by restic, you can set the environment + variable `GOMAXPROCS` accordingly. For example to use a single CPU core, use + `GOMAXPROCS=1`. https://github.com/restic/restic/issues/2162 https://github.com/restic/restic/issues/1467 @@ -1572,45 +2344,47 @@ restic users. The changes are ordered by importance. * Enhancement #2291: Allow pack size customization - Restic now uses a target pack size of 16 MiB by default. This can be customized using the - `--pack-size size` option. Supported pack sizes range between 4 and 128 MiB. + Restic now uses a target pack size of 16 MiB by default. This can be customized + using the `--pack-size size` option. Supported pack sizes range between 4 and + 128 MiB. - It is possible to migrate an existing repository to _larger_ pack files using `prune - --repack-small`. This will rewrite every pack file which is significantly smaller than the - target size. + It is possible to migrate an existing repository to _larger_ pack files using + `prune --repack-small`. This will rewrite every pack file which is significantly + smaller than the target size. https://github.com/restic/restic/issues/2291 https://github.com/restic/restic/pull/3731 * Enhancement #2295: Allow use of SAS token to authenticate to Azure - Previously restic only supported AccountKeys to authenticate to Azure storage accounts, - which necessitates giving a significant amount of access. + Previously restic only supported AccountKeys to authenticate to Azure storage + accounts, which necessitates giving a significant amount of access. - We added support for Azure SAS tokens which are a more fine-grained and time-limited manner of - granting access. Set the `AZURE_ACCOUNT_NAME` and `AZURE_ACCOUNT_SAS` environment - variables to use a SAS token for authentication. Note that if `AZURE_ACCOUNT_KEY` is set, it - will take precedence. + We added support for Azure SAS tokens which are a more fine-grained and + time-limited manner of granting access. Set the `AZURE_ACCOUNT_NAME` and + `AZURE_ACCOUNT_SAS` environment variables to use a SAS token for authentication. + Note that if `AZURE_ACCOUNT_KEY` is set, it will take precedence. https://github.com/restic/restic/issues/2295 https://github.com/restic/restic/pull/3661 * Enhancement #2351: Use config file permissions to control file group access - Previously files in a local/SFTP repository would always end up with very restrictive access - permissions, allowing access only to the owner. This prevented a number of valid use-cases - involving groups and ACLs. + Previously files in a local/SFTP repository would always end up with very + restrictive access permissions, allowing access only to the owner. This + prevented a number of valid use-cases involving groups and ACLs. - We now use the permissions of the config file in the repository to decide whether group access - should be given to newly created repository files or not. We arrange for repository files to be - created group readable exactly when the repository config file is group readable. + We now use the permissions of the config file in the repository to decide + whether group access should be given to newly created repository files or not. + We arrange for repository files to be created group readable exactly when the + repository config file is group readable. - To opt-in to group readable repositories, a simple `chmod -R g+r` or equivalent on the config - file can be used. For repositories that should be writable by group members a tad more setup is - required, see the docs. + To opt-in to group readable repositories, a simple `chmod -R g+r` or equivalent + on the config file can be used. For repositories that should be writable by + group members a tad more setup is required, see the docs. - Posix ACLs can also be used now that the group permissions being forced to zero no longer masks - the effect of ACL entries. + Posix ACLs can also be used now that the group permissions being forced to zero + no longer masks the effect of ACL entries. https://github.com/restic/restic/issues/2351 https://github.com/restic/restic/pull/3419 @@ -1618,27 +2392,29 @@ restic users. The changes are ordered by importance. * Enhancement #2696: Improve backup speed with many small files - We have restructured the backup pipeline to continue reading files while all upload - connections are busy. This allows the backup to already prepare the next data file such that the - upload can continue as soon as a connection becomes available. This can especially improve the - backup performance for high latency backends. + We have restructured the backup pipeline to continue reading files while all + upload connections are busy. This allows the backup to already prepare the next + data file such that the upload can continue as soon as a connection becomes + available. This can especially improve the backup performance for high latency + backends. - The upload concurrency is now controlled using the `-o .connections=5` - option. + The upload concurrency is now controlled using the `-o + .connections=5` option. https://github.com/restic/restic/issues/2696 https://github.com/restic/restic/pull/3489 * Enhancement #2907: Make snapshot directory structure of `mount` command customizable - We've added the possibility to customize the snapshot directory structure of the `mount` - command using templates passed to the `--snapshot-template` option. The formatting of - snapshots' timestamps is now controlled using `--time-template` and supports - subdirectories to for example group snapshots by year. Please see `restic help mount` for - further details. + We've added the possibility to customize the snapshot directory structure of the + `mount` command using templates passed to the `--snapshot-template` option. The + formatting of snapshots' timestamps is now controlled using `--time-template` + and supports subdirectories to for example group snapshots by year. Please see + `restic help mount` for further details. - Characters in tag names which are not allowed in a filename are replaced by underscores `_`. For - example a tag `foo/bar` will result in a directory name of `foo_bar`. + Characters in tag names which are not allowed in a filename are replaced by + underscores `_`. For example a tag `foo/bar` will result in a directory name of + `foo_bar`. https://github.com/restic/restic/issues/2907 https://github.com/restic/restic/pull/2913 @@ -1646,8 +2422,9 @@ restic users. The changes are ordered by importance. * Enhancement #2923: Improve speed of `copy` command - The `copy` command could require a long time to copy snapshots for non-local backends. This has - been improved to provide a throughput comparable to the `restore` command. + The `copy` command could require a long time to copy snapshots for non-local + backends. This has been improved to provide a throughput comparable to the + `restore` command. Additionally, `copy` now displays a progress bar. @@ -1656,21 +2433,23 @@ restic users. The changes are ordered by importance. * Enhancement #3114: Optimize handling of duplicate blobs in `prune` - Restic `prune` always used to repack all data files containing duplicate blobs. This - effectively removed all duplicates during prune. However, as a consequence all these data - files were repacked even if the unused repository space threshold could be reached with less - work. + Restic `prune` always used to repack all data files containing duplicate blobs. + This effectively removed all duplicates during prune. However, as a consequence + all these data files were repacked even if the unused repository space threshold + could be reached with less work. - This is now changed and `prune` works nice and fast even when there are lots of duplicate blobs. + This is now changed and `prune` works nice and fast even when there are lots of + duplicate blobs. https://github.com/restic/restic/issues/3114 https://github.com/restic/restic/pull/3290 * Enhancement #3465: Improve handling of temporary files on Windows - In some cases restic failed to delete temporary files, causing the current command to fail. - This has now been fixed by ensuring that Windows automatically deletes the file. In addition, - temporary files are only written to disk when necessary, reducing disk writes. + In some cases restic failed to delete temporary files, causing the current + command to fail. This has now been fixed by ensuring that Windows automatically + deletes the file. In addition, temporary files are only written to disk when + necessary, reducing disk writes. https://github.com/restic/restic/issues/3465 https://github.com/restic/restic/issues/1551 @@ -1678,22 +2457,23 @@ restic users. The changes are ordered by importance. * Enhancement #3475: Allow limiting IO concurrency for local and SFTP backend - Restic did not support limiting the IO concurrency / number of connections for accessing - repositories stored using the local or SFTP backends. The number of connections is now limited - as for other backends, and can be configured via the `-o local.connections=2` and `-o - sftp.connections=5` options. This ensures that restic does not overwhelm the backend with - concurrent IO operations. + Restic did not support limiting the IO concurrency / number of connections for + accessing repositories stored using the local or SFTP backends. The number of + connections is now limited as for other backends, and can be configured via the + `-o local.connections=2` and `-o sftp.connections=5` options. This ensures that + restic does not overwhelm the backend with concurrent IO operations. https://github.com/restic/restic/pull/3475 * Enhancement #3484: Stream data in `check` and `prune` commands - The commands `check --read-data` and `prune` previously downloaded data files into - temporary files which could end up being written to disk. This could cause a large amount of data - being written to disk. + The commands `check --read-data` and `prune` previously downloaded data files + into temporary files which could end up being written to disk. This could cause + a large amount of data being written to disk. - The pack files are now instead streamed, which removes the need for temporary files. Please - note that *uploads* during `backup` and `prune` still require temporary files. + The pack files are now instead streamed, which removes the need for temporary + files. Please note that *uploads* during `backup` and `prune` still require + temporary files. https://github.com/restic/restic/issues/3710 https://github.com/restic/restic/pull/3484 @@ -1702,19 +2482,19 @@ restic users. The changes are ordered by importance. * Enhancement #3709: Validate exclude patterns before backing up Exclude patterns provided via `--exclude`, `--iexclude`, `--exclude-file` or - `--iexclude-file` previously weren't validated. As a consequence, invalid patterns - resulted in files that were meant to be excluded being backed up. + `--iexclude-file` previously weren't validated. As a consequence, invalid + patterns resulted in files that were meant to be excluded being backed up. - Restic now validates all patterns before running the backup and aborts with a fatal error if an - invalid pattern is detected. + Restic now validates all patterns before running the backup and aborts with a + fatal error if an invalid pattern is detected. https://github.com/restic/restic/issues/3709 https://github.com/restic/restic/pull/3734 * Enhancement #3729: Display full IDs in `check` warnings - When running commands to inspect or repair a damaged repository, it is often necessary to - supply the full IDs of objects stored in the repository. + When running commands to inspect or repair a damaged repository, it is often + necessary to supply the full IDs of objects stored in the repository. The output of `check` now includes full IDs instead of their shortened variant. @@ -1722,28 +2502,29 @@ restic users. The changes are ordered by importance. * Enhancement #3773: Optimize memory usage for directories with many files - Backing up a directory with hundreds of thousands or more files caused restic to require large - amounts of memory. We've now optimized the `backup` command such that it requires up to 30% less - memory. + Backing up a directory with hundreds of thousands or more files caused restic to + require large amounts of memory. We've now optimized the `backup` command such + that it requires up to 30% less memory. https://github.com/restic/restic/pull/3773 * Enhancement #3819: Validate include/exclude patterns before restoring Patterns provided to `restore` via `--exclude`, `--iexclude`, `--include` and - `--iinclude` weren't validated before running the restore. Invalid patterns would result in - error messages being printed repeatedly, and possibly unwanted files being restored. + `--iinclude` weren't validated before running the restore. Invalid patterns + would result in error messages being printed repeatedly, and possibly unwanted + files being restored. - Restic now validates all patterns before running the restore, and aborts with a fatal error if - an invalid pattern is detected. + Restic now validates all patterns before running the restore, and aborts with a + fatal error if an invalid pattern is detected. https://github.com/restic/restic/pull/3819 * Enhancement #3837: Improve SFTP repository initialization over slow links - The `init` command, when used on an SFTP backend, now sends multiple `mkdir` commands to the - backend concurrently. This reduces the waiting times when creating a repository over a very - slow connection. + The `init` command, when used on an SFTP backend, now sends multiple `mkdir` + commands to the backend concurrently. This reduces the waiting times when + creating a repository over a very slow connection. https://github.com/restic/restic/issues/3837 https://github.com/restic/restic/pull/3840 @@ -1794,9 +2575,9 @@ restic users. The changes are ordered by importance. * Bugfix #1106: Never lock repository for `list locks` - The `list locks` command previously locked to the repository by default. This had the problem - that it wouldn't work for an exclusively locked repository and that the command would also - display its own lock file which can be confusing. + The `list locks` command previously locked to the repository by default. This + had the problem that it wouldn't work for an exclusively locked repository and + that the command would also display its own lock file which can be confusing. Now, the `list locks` command never locks the repository. @@ -1805,22 +2586,24 @@ restic users. The changes are ordered by importance. * Bugfix #2345: Make cache crash-resistant and usable by multiple concurrent processes - The restic cache directory (`RESTIC_CACHE_DIR`) could end up in a broken state in the event of - restic (or the OS) crashing. This is now less likely to occur as files are downloaded to a - temporary location before being moved to their proper location. + The restic cache directory (`RESTIC_CACHE_DIR`) could end up in a broken state + in the event of restic (or the OS) crashing. This is now less likely to occur as + files are downloaded to a temporary location before being moved to their proper + location. - This also allows multiple concurrent restic processes to operate on a single repository - without conflicts. Previously, concurrent operations could cause segfaults because the - processes saw each other's partially downloaded files. + This also allows multiple concurrent restic processes to operate on a single + repository without conflicts. Previously, concurrent operations could cause + segfaults because the processes saw each other's partially downloaded files. https://github.com/restic/restic/issues/2345 https://github.com/restic/restic/pull/2838 * Bugfix #2452: Improve error handling of repository locking - Previously, when the lock refresh failed to delete the old lock file, it forgot about the newly - created one. Instead it continued trying to delete the old (usually no longer existing) lock - file and thus over time lots of lock files accumulated. This has now been fixed. + Previously, when the lock refresh failed to delete the old lock file, it forgot + about the newly created one. Instead it continued trying to delete the old + (usually no longer existing) lock file and thus over time lots of lock files + accumulated. This has now been fixed. https://github.com/restic/restic/issues/2452 https://github.com/restic/restic/issues/2473 @@ -1829,43 +2612,45 @@ restic users. The changes are ordered by importance. * Bugfix #2738: Don't print progress for `backup --json --quiet` - Unlike the text output, the `--json` output format still printed progress information even in - `--quiet` mode. This has now been fixed by always disabling the progress output in quiet mode. + Unlike the text output, the `--json` output format still printed progress + information even in `--quiet` mode. This has now been fixed by always disabling + the progress output in quiet mode. https://github.com/restic/restic/issues/2738 https://github.com/restic/restic/pull/3264 * Bugfix #3382: Make `check` command honor `RESTIC_CACHE_DIR` environment variable - Previously, the `check` command didn't honor the `RESTIC_CACHE_DIR` environment variable, - which caused problems in certain system/usage configurations. This has now been fixed. + Previously, the `check` command didn't honor the `RESTIC_CACHE_DIR` environment + variable, which caused problems in certain system/usage configurations. This has + now been fixed. https://github.com/restic/restic/issues/3382 https://github.com/restic/restic/pull/3474 * Bugfix #3488: `rebuild-index` failed if an index file was damaged - Previously, the `rebuild-index` command would fail with an error if an index file was damaged - or truncated. This has now been fixed. + Previously, the `rebuild-index` command would fail with an error if an index + file was damaged or truncated. This has now been fixed. - On older restic versions, a (slow) workaround is to use `rebuild-index --read-all-packs` or - to manually delete the damaged index. + On older restic versions, a (slow) workaround is to use `rebuild-index + --read-all-packs` or to manually delete the damaged index. https://github.com/restic/restic/pull/3488 * Bugfix #3518: Make `copy` command honor `--no-lock` for source repository - The `copy` command previously did not respect the `--no-lock` option for the source - repository, causing failures with read-only storage backends. This has now been fixed such - that the option is now respected. + The `copy` command previously did not respect the `--no-lock` option for the + source repository, causing failures with read-only storage backends. This has + now been fixed such that the option is now respected. https://github.com/restic/restic/issues/3518 https://github.com/restic/restic/pull/3589 * Bugfix #3556: Fix hang with Backblaze B2 on SSL certificate authority error - Previously, if a request failed with an SSL unknown certificate authority error, the B2 - backend retried indefinitely and restic would appear to hang. + Previously, if a request failed with an SSL unknown certificate authority error, + the B2 backend retried indefinitely and restic would appear to hang. This has now been fixed and restic instead fails with an error message. @@ -1875,95 +2660,103 @@ restic users. The changes are ordered by importance. * Bugfix #3591: Fix handling of `prune --max-repack-size=0` - Restic ignored the `--max-repack-size` option when passing a value of 0. This has now been - fixed. + Restic ignored the `--max-repack-size` option when passing a value of 0. This + has now been fixed. - As a workaround, `--max-repack-size=1` can be used with older versions of restic. + As a workaround, `--max-repack-size=1` can be used with older versions of + restic. https://github.com/restic/restic/pull/3591 * Bugfix #3601: Fix rclone backend prematurely exiting when receiving SIGINT on Windows - Previously, pressing Ctrl+C in a Windows console where restic was running with rclone as the - backend would cause rclone to exit prematurely due to getting a `SIGINT` signal at the same time - as restic. Restic would then wait for a long time for time with "unexpected EOF" and "rclone - stdio connection already closed" errors. + Previously, pressing Ctrl+C in a Windows console where restic was running with + rclone as the backend would cause rclone to exit prematurely due to getting a + `SIGINT` signal at the same time as restic. Restic would then wait for a long + time for time with "unexpected EOF" and "rclone stdio connection already closed" + errors. - This has now been fixed by restic starting the rclone process detached from the console restic - runs in (similar to starting processes in a new process group on Linux), which enables restic to - gracefully clean up rclone (which now never gets the `SIGINT`). + This has now been fixed by restic starting the rclone process detached from the + console restic runs in (similar to starting processes in a new process group on + Linux), which enables restic to gracefully clean up rclone (which now never gets + the `SIGINT`). https://github.com/restic/restic/issues/3601 https://github.com/restic/restic/pull/3602 * Bugfix #3619: Avoid choosing parent snapshots newer than time of new snapshot - The `backup` command, when a `--parent` was not provided, previously chose the most recent - matching snapshot as the parent snapshot. However, this didn't make sense when the user passed - `--time` to create a new snapshot older than the most recent snapshot. + The `backup` command, when a `--parent` was not provided, previously chose the + most recent matching snapshot as the parent snapshot. However, this didn't make + sense when the user passed `--time` to create a new snapshot older than the most + recent snapshot. - Instead, `backup` now chooses the most recent snapshot which is not newer than the - snapshot-being-created's timestamp, to avoid any time travel. + Instead, `backup` now chooses the most recent snapshot which is not newer than + the snapshot-being-created's timestamp, to avoid any time travel. https://github.com/restic/restic/pull/3619 * Bugfix #3667: The `mount` command now reports symlinks sizes - Symlinks used to have size zero in restic mountpoints, confusing some third-party tools. They - now have a size equal to the byte length of their target path, as required by POSIX. + Symlinks used to have size zero in restic mountpoints, confusing some + third-party tools. They now have a size equal to the byte length of their target + path, as required by POSIX. https://github.com/restic/restic/issues/3667 https://github.com/restic/restic/pull/3668 * Change #3519: Require Go 1.14 or newer - Restic now requires Go 1.14 to build. This allows it to use new standard library features - instead of an external dependency. + Restic now requires Go 1.14 to build. This allows it to use new standard library + features instead of an external dependency. https://github.com/restic/restic/issues/3519 * Change #3641: Ignore parent snapshot for `backup --stdin` - Restic uses a parent snapshot to speed up directory scanning when performing backups, but this - only wasted time and memory when the backup source is stdin (using the `--stdin` option of the - `backup` command), since no directory scanning is performed in this case. + Restic uses a parent snapshot to speed up directory scanning when performing + backups, but this only wasted time and memory when the backup source is stdin + (using the `--stdin` option of the `backup` command), since no directory + scanning is performed in this case. - Snapshots made with `backup --stdin` no longer have a parent snapshot, which allows restic to - skip some startup operations and saves a bit of resources. + Snapshots made with `backup --stdin` no longer have a parent snapshot, which + allows restic to skip some startup operations and saves a bit of resources. - The `--parent` option is still available for `backup --stdin`, but is now ignored. + The `--parent` option is still available for `backup --stdin`, but is now + ignored. https://github.com/restic/restic/issues/3641 https://github.com/restic/restic/pull/3645 * Enhancement #233: Support negative include/exclude patterns - If a pattern starts with an exclamation mark and it matches a file that was previously matched by - a regular pattern, the match is cancelled. Notably, this can be used with `--exclude-file` to - cancel the exclusion of some files. + If a pattern starts with an exclamation mark and it matches a file that was + previously matched by a regular pattern, the match is cancelled. Notably, this + can be used with `--exclude-file` to cancel the exclusion of some files. - It works similarly to `.gitignore`, with the same limitation; Once a directory is excluded, it - is not possible to include files inside the directory. + It works similarly to `.gitignore`, with the same limitation; Once a directory + is excluded, it is not possible to include files inside the directory. Example of use as an exclude pattern for the `backup` command: $HOME/**/* !$HOME/Documents !$HOME/code !$HOME/.emacs.d !$HOME/games # [...] - node_modules *~ *.o *.lo *.pyc # [...] $HOME/code/linux/* !$HOME/code/linux/.git # [...] + node_modules *~ *.o *.lo *.pyc # [...] $HOME/code/linux/* !$HOME/code/linux/.git + # [...] https://github.com/restic/restic/issues/233 https://github.com/restic/restic/pull/2311 * Enhancement #1542: Add `--dry-run`/`-n` option to `backup` command - Testing exclude filters and other configuration options was error prone as wrong filters - could cause files to be uploaded unintentionally. It was also not possible to estimate - beforehand how much data would be uploaded. + Testing exclude filters and other configuration options was error prone as wrong + filters could cause files to be uploaded unintentionally. It was also not + possible to estimate beforehand how much data would be uploaded. - The `backup` command now has a `--dry-run`/`-n` option, which performs all the normal steps of - a backup without actually writing anything to the repository. + The `backup` command now has a `--dry-run`/`-n` option, which performs all the + normal steps of a backup without actually writing anything to the repository. - Passing -vv will log information about files that would be added, allowing for verification of - source and exclusion options before running the real backup. + Passing -vv will log information about files that would be added, allowing for + verification of source and exclusion options before running the real backup. https://github.com/restic/restic/issues/1542 https://github.com/restic/restic/pull/2308 @@ -1972,14 +2765,14 @@ restic users. The changes are ordered by importance. * Enhancement #2202: Add upload checksum for Azure, GS, S3 and Swift backends - Previously only the B2 and partially the Swift backends verified the integrity of uploaded - (encrypted) files. The verification works by informing the backend about the expected hash of - the uploaded file. The backend then verifies the upload and thereby rules out any data - corruption during upload. + Previously only the B2 and partially the Swift backends verified the integrity + of uploaded (encrypted) files. The verification works by informing the backend + about the expected hash of the uploaded file. The backend then verifies the + upload and thereby rules out any data corruption during upload. - We have now added upload checksums for the Azure, GS, S3 and Swift backends, which besides - integrity checking for uploads also means that restic can now be used to store backups in S3 - buckets which have Object Lock enabled. + We have now added upload checksums for the Azure, GS, S3 and Swift backends, + which besides integrity checking for uploads also means that restic can now be + used to store backups in S3 buckets which have Object Lock enabled. https://github.com/restic/restic/issues/2202 https://github.com/restic/restic/issues/2700 @@ -1988,65 +2781,68 @@ restic users. The changes are ordered by importance. * Enhancement #2388: Add warning for S3 if partial credentials are provided - Previously restic did not notify about incomplete credentials when using the S3 backend, - instead just reporting access denied. + Previously restic did not notify about incomplete credentials when using the S3 + backend, instead just reporting access denied. - Restic now checks that both the AWS key ID and secret environment variables are set before - connecting to the remote server, and reports an error if not. + Restic now checks that both the AWS key ID and secret environment variables are + set before connecting to the remote server, and reports an error if not. https://github.com/restic/restic/issues/2388 https://github.com/restic/restic/pull/3532 * Enhancement #2508: Support JSON output and quiet mode for the `diff` command - The `diff` command now supports outputting machine-readable output in JSON format. To enable - this, pass the `--json` option to the command. To only print the summary and suppress detailed - output, pass the `--quiet` option. + The `diff` command now supports outputting machine-readable output in JSON + format. To enable this, pass the `--json` option to the command. To only print + the summary and suppress detailed output, pass the `--quiet` option. https://github.com/restic/restic/issues/2508 https://github.com/restic/restic/pull/3592 * Enhancement #2594: Speed up the `restore --verify` command - The `--verify` option lets the `restore` command verify the file content after it has restored - a snapshot. The performance of this operation has now been improved by up to a factor of two. + The `--verify` option lets the `restore` command verify the file content after + it has restored a snapshot. The performance of this operation has now been + improved by up to a factor of two. https://github.com/restic/restic/pull/2594 * Enhancement #2656: Add flag to disable TLS verification for self-signed certificates - There is now an `--insecure-tls` global option in restic, which disables TLS verification for - self-signed certificates in order to support some development workflows. + There is now an `--insecure-tls` global option in restic, which disables TLS + verification for self-signed certificates in order to support some development + workflows. https://github.com/restic/restic/issues/2656 https://github.com/restic/restic/pull/2657 * Enhancement #2816: The `backup` command no longer updates file access times on Linux - When reading files during backup, restic used to cause the operating system to update the - files' access times. Note that this did not apply to filesystems with disabled file access - times. + When reading files during backup, restic used to cause the operating system to + update the files' access times. Note that this did not apply to filesystems with + disabled file access times. - Restic now instructs the operating system not to update the file access time, if the user - running restic is the file owner or has root permissions. + Restic now instructs the operating system not to update the file access time, if + the user running restic is the file owner or has root permissions. https://github.com/restic/restic/pull/2816 * Enhancement #2880: Make `recover` collect only unreferenced trees - Previously, the `recover` command used to generate a snapshot containing *all* root trees, - even those which were already referenced by a snapshot. + Previously, the `recover` command used to generate a snapshot containing *all* + root trees, even those which were already referenced by a snapshot. - This has been improved such that it now only processes trees not already referenced by any - snapshot. + This has been improved such that it now only processes trees not already + referenced by any snapshot. https://github.com/restic/restic/pull/2880 * Enhancement #3003: Atomic uploads for the SFTP backend - The SFTP backend did not upload files atomically. An interrupted upload could leave an - incomplete file behind which could prevent restic from accessing the repository. This has now - been fixed and uploads in the SFTP backend are done atomically. + The SFTP backend did not upload files atomically. An interrupted upload could + leave an incomplete file behind which could prevent restic from accessing the + repository. This has now been fixed and uploads in the SFTP backend are done + atomically. https://github.com/restic/restic/issues/3003 https://github.com/restic/restic/pull/3524 @@ -2060,25 +2856,27 @@ restic users. The changes are ordered by importance. * Enhancement #3429: Verify that new or modified keys are stored correctly - When adding a new key or changing the password of a key, restic used to just create the new key (and - remove the old one, when changing the password). There was no verification that the new key was - stored correctly and works properly. As the repository cannot be decrypted without a valid key - file, this could in rare cases cause the repository to become inaccessible. + When adding a new key or changing the password of a key, restic used to just + create the new key (and remove the old one, when changing the password). There + was no verification that the new key was stored correctly and works properly. As + the repository cannot be decrypted without a valid key file, this could in rare + cases cause the repository to become inaccessible. - Restic now checks that new key files actually work before continuing. This can protect against - some (rare) cases of hardware or storage problems. + Restic now checks that new key files actually work before continuing. This can + protect against some (rare) cases of hardware or storage problems. https://github.com/restic/restic/pull/3429 * Enhancement #3436: Improve local backend's resilience to (system) crashes - Restic now ensures that files stored using the `local` backend are created atomically (that - is, files are either stored completely or not at all). This ensures that no incomplete files are - left behind even if restic is terminated while writing a file. + Restic now ensures that files stored using the `local` backend are created + atomically (that is, files are either stored completely or not at all). This + ensures that no incomplete files are left behind even if restic is terminated + while writing a file. - In addition, restic now tries to ensure that the directory in the repository which contains a - newly uploaded file is also written to disk. This can prevent missing files if the system - crashes or the disk is not properly unmounted. + In addition, restic now tries to ensure that the directory in the repository + which contains a newly uploaded file is also written to disk. This can prevent + missing files if the system crashes or the disk is not properly unmounted. https://github.com/restic/restic/pull/3436 @@ -2086,54 +2884,56 @@ restic users. The changes are ordered by importance. Restic used to silently ignore the `--no-lock` option of the `forget` command. - It now skips creation of lock file in case both `--dry-run` and `--no-lock` are specified. If - `--no-lock` option is specified without `--dry-run`, restic prints a warning message to - stderr. + It now skips creation of lock file in case both `--dry-run` and `--no-lock` are + specified. If `--no-lock` option is specified without `--dry-run`, restic prints + a warning message to stderr. https://github.com/restic/restic/issues/3464 https://github.com/restic/restic/pull/3623 * Enhancement #3490: Support random subset by size in `check --read-data-subset` - The `--read-data-subset` option of the `check` command now supports a third way of specifying - the subset to check, namely `nS` where `n` is a size in bytes with suffix `S` as k/K, m/M, g/G or - t/T. + The `--read-data-subset` option of the `check` command now supports a third way + of specifying the subset to check, namely `nS` where `n` is a size in bytes with + suffix `S` as k/K, m/M, g/G or t/T. https://github.com/restic/restic/issues/3490 https://github.com/restic/restic/pull/3548 * Enhancement #3508: Cache blobs read by the `dump` command - When dumping a file using the `dump` command, restic did not cache blobs in any way, so even - consecutive runs of the same blob were loaded from the repository again and again, slowing down - the dump. + When dumping a file using the `dump` command, restic did not cache blobs in any + way, so even consecutive runs of the same blob were loaded from the repository + again and again, slowing down the dump. - Now, the caching mechanism already used by the `fuse` command is also used by the `dump` - command. This makes dumping much faster, especially for sparse files. + Now, the caching mechanism already used by the `fuse` command is also used by + the `dump` command. This makes dumping much faster, especially for sparse files. https://github.com/restic/restic/pull/3508 * Enhancement #3511: Support configurable timeout for the rclone backend - A slow rclone backend could cause restic to time out while waiting for the repository to open. - Restic now offers an `-o rclone.timeout` option to make this timeout configurable. + A slow rclone backend could cause restic to time out while waiting for the + repository to open. Restic now offers an `-o rclone.timeout` option to make this + timeout configurable. https://github.com/restic/restic/issues/3511 https://github.com/restic/restic/pull/3514 * Enhancement #3541: Improve handling of temporary B2 delete errors - Deleting files on B2 could sometimes fail temporarily, which required restic to retry the - delete operation. In some cases the file was deleted nevertheless, causing the retries and - ultimately the restic command to fail. This has now been fixed. + Deleting files on B2 could sometimes fail temporarily, which required restic to + retry the delete operation. In some cases the file was deleted nevertheless, + causing the retries and ultimately the restic command to fail. This has now been + fixed. https://github.com/restic/restic/issues/3541 https://github.com/restic/restic/pull/3544 * Enhancement #3542: Add file mode in symbolic notation to `ls --json` - The `ls --json` command now provides the file mode in symbolic notation (using the - `permissions` key), aligned with `find --json`. + The `ls --json` command now provides the file mode in symbolic notation (using + the `permissions` key), aligned with `find --json`. https://github.com/restic/restic/issues/3542 https://github.com/restic/restic/pull/3573 @@ -2141,11 +2941,12 @@ restic users. The changes are ordered by importance. * Enhancement #3593: Improve `copy` performance by parallelizing IO - Restic copy previously only used a single thread for copying blobs between repositories, - which resulted in limited performance when copying small blobs to/from a high latency backend - (i.e. any remote backend, especially b2). + Restic copy previously only used a single thread for copying blobs between + repositories, which resulted in limited performance when copying small blobs + to/from a high latency backend (i.e. any remote backend, especially b2). - Copying will now use 8 parallel threads to increase the throughput of the copy operation. + Copying will now use 8 parallel threads to increase the throughput of the copy + operation. https://github.com/restic/restic/pull/3593 @@ -2183,9 +2984,9 @@ restic users. The changes are ordered by importance. * Bugfix #2742: Improve error handling for rclone and REST backend over HTTP2 - When retrieving data from the rclone / REST backend while also using HTTP2 restic did not detect - when no data was returned at all. This could cause for example the `check` command to report the - following error: + When retrieving data from the rclone / REST backend while also using HTTP2 + restic did not detect when no data was returned at all. This could cause for + example the `check` command to report the following error: Pack ID does not match, want [...], got e3b0c442 @@ -2197,98 +2998,105 @@ restic users. The changes are ordered by importance. * Bugfix #3111: Fix terminal output redirection for PowerShell - When redirecting the output of restic using PowerShell on Windows, the output contained - terminal escape characters. This has been fixed by properly detecting the terminal type. + When redirecting the output of restic using PowerShell on Windows, the output + contained terminal escape characters. This has been fixed by properly detecting + the terminal type. - In addition, the mintty terminal now shows progress output for the backup command. + In addition, the mintty terminal now shows progress output for the backup + command. https://github.com/restic/restic/issues/3111 https://github.com/restic/restic/pull/3325 * Bugfix #3184: `backup --quiet` no longer prints status information - A regression in the latest restic version caused the output of `backup --quiet` to contain - large amounts of backup progress information when run using an interactive terminal. This is - fixed now. + A regression in the latest restic version caused the output of `backup --quiet` + to contain large amounts of backup progress information when run using an + interactive terminal. This is fixed now. - A workaround for this bug is to run restic as follows: `restic backup --quiet [..] | cat -`. + A workaround for this bug is to run restic as follows: `restic backup --quiet + [..] | cat -`. https://github.com/restic/restic/issues/3184 https://github.com/restic/restic/pull/3186 * Bugfix #3214: Treat an empty password as a fatal error for repository init - When attempting to initialize a new repository, if an empty password was supplied, the - repository would be created but the init command would return an error with a stack trace. Now, - if an empty password is provided, it is treated as a fatal error, and no repository is created. + When attempting to initialize a new repository, if an empty password was + supplied, the repository would be created but the init command would return an + error with a stack trace. Now, if an empty password is provided, it is treated + as a fatal error, and no repository is created. https://github.com/restic/restic/issues/3214 https://github.com/restic/restic/pull/3283 * Bugfix #3267: `copy` failed to copy snapshots in rare cases - The `copy` command could in rare cases fail with the error message `SaveTree(...) returned - unexpected id ...`. This has been fixed. + The `copy` command could in rare cases fail with the error message + `SaveTree(...) returned unexpected id ...`. This has been fixed. - On Linux/BSDs, the error could be caused by backing up symlinks with non-UTF-8 target paths. - Note that, due to limitations in the repository format, these are not stored properly and - should be avoided if possible. + On Linux/BSDs, the error could be caused by backing up symlinks with non-UTF-8 + target paths. Note that, due to limitations in the repository format, these are + not stored properly and should be avoided if possible. https://github.com/restic/restic/issues/3267 https://github.com/restic/restic/pull/3310 * Bugfix #3296: Fix crash of `check --read-data-subset=x%` run for an empty repository - The command `restic check --read-data-subset=x%` crashed when run for an empty repository. - This has been fixed. + The command `restic check --read-data-subset=x%` crashed when run for an empty + repository. This has been fixed. https://github.com/restic/restic/issues/3296 https://github.com/restic/restic/pull/3309 * Bugfix #3302: Fix `fdopendir: not a directory` error for local backend - The `check`, `list packs`, `prune` and `rebuild-index` commands failed for the local backend - when the `data` folder in the repository contained files. This has been fixed. + The `check`, `list packs`, `prune` and `rebuild-index` commands failed for the + local backend when the `data` folder in the repository contained files. This has + been fixed. https://github.com/restic/restic/issues/3302 https://github.com/restic/restic/pull/3308 * Bugfix #3305: Fix possibly missing backup summary of JSON output in case of error - When using `--json` output it happened from time to time that the summary output was missing in - case an error occurred. This has been fixed. + When using `--json` output it happened from time to time that the summary output + was missing in case an error occurred. This has been fixed. https://github.com/restic/restic/pull/3305 * Bugfix #3334: Print `created new cache` message only on a terminal - The message `created new cache` was printed even when the output wasn't a terminal. That broke - piping `restic dump` output to tar or zip if cache directory didn't exist. The message is now - only printed on a terminal. + The message `created new cache` was printed even when the output wasn't a + terminal. That broke piping `restic dump` output to tar or zip if cache + directory didn't exist. The message is now only printed on a terminal. https://github.com/restic/restic/issues/3334 https://github.com/restic/restic/pull/3343 * Bugfix #3380: Fix crash of `backup --exclude='**'` - The exclude filter `**`, which excludes all files, caused restic to crash. This has been - corrected. + The exclude filter `**`, which excludes all files, caused restic to crash. This + has been corrected. https://github.com/restic/restic/issues/3380 https://github.com/restic/restic/pull/3393 * Bugfix #3439: Correctly handle download errors during `restore` - Due to a regression in restic 0.12.0, the `restore` command in some cases did not retry download - errors and only printed a warning. This has been fixed by retrying incomplete data downloads. + Due to a regression in restic 0.12.0, the `restore` command in some cases did + not retry download errors and only printed a warning. This has been fixed by + retrying incomplete data downloads. https://github.com/restic/restic/issues/3439 https://github.com/restic/restic/pull/3449 * Change #3247: Empty files now have size of 0 in `ls --json` output - The `ls --json` command used to omit the sizes of empty files in its output. It now reports a size - of zero explicitly for regular files, while omitting the size field for all other types. + The `ls --json` command used to omit the sizes of empty files in its output. It + now reports a size of zero explicitly for regular files, while omitting the size + field for all other types. https://github.com/restic/restic/issues/3247 https://github.com/restic/restic/pull/3257 @@ -2302,9 +3110,9 @@ restic users. The changes are ordered by importance. * Enhancement #3167: Allow specifying limit of `snapshots` list - The `--last` option allowed limiting the output of the `snapshots` command to the latest - snapshot for each host. The new `--latest n` option allows limiting the output to the latest `n` - snapshots. + The `--last` option allowed limiting the output of the `snapshots` command to + the latest snapshot for each host. The new `--latest n` option allows limiting + the output to the latest `n` snapshots. This change deprecates the option `--last` in favour of `--latest 1`. @@ -2312,13 +3120,15 @@ restic users. The changes are ordered by importance. * Enhancement #3293: Add `--repository-file2` option to `init` and `copy` command - The `init` and `copy` command can now be used with the `--repository-file2` option or the - `$RESTIC_REPOSITORY_FILE2` environment variable. These to options are in addition to the - `--repo2` flag and allow you to read the destination repository from a file. + The `init` and `copy` command can now be used with the `--repository-file2` + option or the `$RESTIC_REPOSITORY_FILE2` environment variable. These to options + are in addition to the `--repo2` flag and allow you to read the destination + repository from a file. - Using both `--repository-file` and `--repo2` options resulted in an error for the `copy` or - `init` command. The handling of this combination of options has been fixed. A workaround for - this issue is to only use `--repo` or `-r` and `--repo2` for `init` or `copy`. + Using both `--repository-file` and `--repo2` options resulted in an error for + the `copy` or `init` command. The handling of this combination of options has + been fixed. A workaround for this issue is to only use `--repo` or `-r` and + `--repo2` for `init` or `copy`. https://github.com/restic/restic/issues/3293 https://github.com/restic/restic/pull/3294 @@ -2331,9 +3141,9 @@ restic users. The changes are ordered by importance. * Enhancement #3336: SFTP backend now checks for disk space - Backing up over SFTP previously spewed multiple generic "failure" messages when the remote - disk was full. It now checks for disk space before writing a file and fails immediately with a "no - space left on device" message. + Backing up over SFTP previously spewed multiple generic "failure" messages when + the remote disk was full. It now checks for disk space before writing a file and + fails immediately with a "no space left on device" message. https://github.com/restic/restic/issues/3336 https://github.com/restic/restic/pull/3345 @@ -2347,15 +3157,17 @@ restic users. The changes are ordered by importance. * Enhancement #3414: Add `--keep-within-hourly` option to restic forget - The `forget` command allowed keeping a given number of hourly backups or to keep all backups - within a given interval, but it was not possible to specify keeping hourly backups within a - given interval. + The `forget` command allowed keeping a given number of hourly backups or to keep + all backups within a given interval, but it was not possible to specify keeping + hourly backups within a given interval. - The new `--keep-within-hourly` option now offers this functionality. Similar options for - daily/weekly/monthly/yearly are also implemented, the new options are: + The new `--keep-within-hourly` option now offers this functionality. Similar + options for daily/weekly/monthly/yearly are also implemented, the new options + are: - --keep-within-hourly <1y2m3d4h> --keep-within-daily <1y2m3d4h> --keep-within-weekly - <1y2m3d4h> --keep-within-monthly <1y2m3d4h> --keep-within-yearly <1y2m3d4h> + --keep-within-hourly <1y2m3d4h> --keep-within-daily <1y2m3d4h> + --keep-within-weekly <1y2m3d4h> --keep-within-monthly <1y2m3d4h> + --keep-within-yearly <1y2m3d4h> https://github.com/restic/restic/issues/3414 https://github.com/restic/restic/pull/3416 @@ -2363,30 +3175,32 @@ restic users. The changes are ordered by importance. * Enhancement #3426: Optimize read performance of mount command - Reading large files in a mounted repository may be up to five times faster. This improvement - primarily applies to repositories stored at a backend that can be accessed with low latency, - like e.g. the local backend. + Reading large files in a mounted repository may be up to five times faster. This + improvement primarily applies to repositories stored at a backend that can be + accessed with low latency, like e.g. the local backend. https://github.com/restic/restic/pull/3426 * Enhancement #3427: `find --pack` fallback to index if data file is missing - When investigating a repository with missing data files, it might be useful to determine - affected snapshots before running `rebuild-index`. Previously, `find --pack pack-id` - returned no data as it required accessing the data file. Now, if the necessary data is still - available in the repository index, it gets retrieved from there. + When investigating a repository with missing data files, it might be useful to + determine affected snapshots before running `rebuild-index`. Previously, `find + --pack pack-id` returned no data as it required accessing the data file. Now, if + the necessary data is still available in the repository index, it gets retrieved + from there. - The command now also supports looking up multiple pack files in a single `find` run. + The command now also supports looking up multiple pack files in a single `find` + run. https://github.com/restic/restic/pull/3427 https://forum.restic.net/t/missing-packs-not-found/2600 * Enhancement #3456: Support filtering and specifying untagged snapshots - It was previously not possible to specify an empty tag with the `--tag` and `--keep-tag` - options. This has now been fixed, such that `--tag ''` and `--keep-tag ''` now matches - snapshots without tags. This allows e.g. the `snapshots` and `forget` commands to only - operate on untagged snapshots. + It was previously not possible to specify an empty tag with the `--tag` and + `--keep-tag` options. This has now been fixed, such that `--tag ''` and + `--keep-tag ''` now matches snapshots without tags. This allows e.g. the + `snapshots` and `forget` commands to only operate on untagged snapshots. https://github.com/restic/restic/issues/3456 https://github.com/restic/restic/pull/3457 @@ -2410,7 +3224,7 @@ restic users. The changes are ordered by importance. * Fix #3151: Don't create invalid snapshots when `backup` is interrupted * Fix #3152: Do not hang until foregrounded when completed in background * Fix #3166: Improve error handling in the `restore` command - * Fix #3232: Correct statistics for overlapping targets + * Fix #3232: Correct statistics for overlapping backup sources * Fix #3249: Improve error handling in `gs` backend * Chg #3095: Deleting files on Google Drive now moves them to the trash * Enh #909: Back up mountpoints as empty directories @@ -2438,28 +3252,28 @@ restic users. The changes are ordered by importance. * Bugfix #1681: Make `mount` not create missing mount point directory - When specifying a non-existent directory as mount point for the `mount` command, restic used - to create the specified directory automatically. + When specifying a non-existent directory as mount point for the `mount` command, + restic used to create the specified directory automatically. - This has now changed such that restic instead gives an error when the specified directory for - the mount point does not exist. + This has now changed such that restic instead gives an error when the specified + directory for the mount point does not exist. https://github.com/restic/restic/issues/1681 https://github.com/restic/restic/pull/3008 * Bugfix #1800: Ignore `no data available` filesystem error during backup - Restic was unable to backup files on some filesystems, for example certain configurations of - CIFS on Linux which return a `no data available` error when reading extended attributes. These - errors are now ignored. + Restic was unable to backup files on some filesystems, for example certain + configurations of CIFS on Linux which return a `no data available` error when + reading extended attributes. These errors are now ignored. https://github.com/restic/restic/issues/1800 https://github.com/restic/restic/pull/3034 * Bugfix #2563: Report the correct owner of directories in FUSE mounts - Restic 0.10.0 changed the FUSE mount to always report the current user as the owner of - directories within the FUSE mount, which is incorrect. + Restic 0.10.0 changed the FUSE mount to always report the current user as the + owner of directories within the FUSE mount, which is incorrect. This is now changed back to reporting the correct owner of a directory. @@ -2468,30 +3282,31 @@ restic users. The changes are ordered by importance. * Bugfix #2688: Make `backup` and `tag` commands separate tags by comma - Running `restic backup --tag foo,bar` previously created snapshots with one single tag - containing a comma (`foo,bar`) instead of two tags (`foo`, `bar`). + Running `restic backup --tag foo,bar` previously created snapshots with one + single tag containing a comma (`foo,bar`) instead of two tags (`foo`, `bar`). - Similarly, the `tag` command's `--set`, `--add` and `--remove` options would treat - `foo,bar` as one tag instead of two tags. This was inconsistent with other commands and often - unexpected when one intended `foo,bar` to mean two tags. + Similarly, the `tag` command's `--set`, `--add` and `--remove` options would + treat `foo,bar` as one tag instead of two tags. This was inconsistent with other + commands and often unexpected when one intended `foo,bar` to mean two tags. - To be consistent in all commands, restic now interprets `foo,bar` to mean two separate tags - (`foo` and `bar`) instead of one tag (`foo,bar`) everywhere, including in the `backup` and - `tag` commands. + To be consistent in all commands, restic now interprets `foo,bar` to mean two + separate tags (`foo` and `bar`) instead of one tag (`foo,bar`) everywhere, + including in the `backup` and `tag` commands. - NOTE: This change might result in unexpected behavior in cases where you use the `forget` - command and filter on tags like `foo,bar`. Snapshots previously backed up with `--tag - foo,bar` will still not match that filter, but snapshots saved from now on will match that - filter. + NOTE: This change might result in unexpected behavior in cases where you use the + `forget` command and filter on tags like `foo,bar`. Snapshots previously backed + up with `--tag foo,bar` will still not match that filter, but snapshots saved + from now on will match that filter. - To replace `foo,bar` tags with `foo` and `bar` tags in old snapshots, you can first generate a - list of the relevant snapshots using a command like: + To replace `foo,bar` tags with `foo` and `bar` tags in old snapshots, you can + first generate a list of the relevant snapshots using a command like: - Restic snapshots --json --quiet | jq '.[] | select(contains({tags: ["foo,bar"]})) | .id' + Restic snapshots --json --quiet | jq '.[] | select(contains({tags: + ["foo,bar"]})) | .id' - And then use `restic tag --set foo --set bar snapshotID [...]` to set the new tags. Please adjust - the commands to include real tag names and any additional tags, as well as the list of snapshots - to process. + And then use `restic tag --set foo --set bar snapshotID [...]` to set the new + tags. Please adjust the commands to include real tag names and any additional + tags, as well as the list of snapshots to process. https://github.com/restic/restic/issues/2688 https://github.com/restic/restic/pull/2690 @@ -2505,14 +3320,14 @@ restic users. The changes are ordered by importance. * Bugfix #3014: Fix sporadic stream reset between rclone and restic - Sometimes when using restic with the `rclone` backend, an error message similar to the - following would be printed: + Sometimes when using restic with the `rclone` backend, an error message similar + to the following would be printed: Didn't finish writing GET request (wrote 0/xxx): http2: stream closed - It was found that this was caused by restic closing the connection to rclone to soon when - downloading data. A workaround has been added which waits for the end of the download before - closing the connection. + It was found that this was caused by restic closing the connection to rclone to + soon when downloading data. A workaround has been added which waits for the end + of the download before closing the connection. https://github.com/rclone/rclone/issues/2598 https://github.com/restic/restic/pull/3014 @@ -2530,125 +3345,130 @@ restic users. The changes are ordered by importance. * Bugfix #3100: Do not require gs bucket permissions when running `init` - Restic used to require bucket level permissions for the `gs` backend in order to initialize a - restic repository. + Restic used to require bucket level permissions for the `gs` backend in order to + initialize a restic repository. - It now allows a `gs` service account to initialize a repository if the bucket does exist and the - service account has permissions to write/read to that bucket. + It now allows a `gs` service account to initialize a repository if the bucket + does exist and the service account has permissions to write/read to that bucket. https://github.com/restic/restic/issues/3100 * Bugfix #3111: Correctly detect output redirection for `backup` command on Windows - On Windows, since restic 0.10.0 the `backup` command did not properly detect when the output - was redirected to a file. This caused restic to output terminal control characters. This has - been fixed by correcting the terminal detection. + On Windows, since restic 0.10.0 the `backup` command did not properly detect + when the output was redirected to a file. This caused restic to output terminal + control characters. This has been fixed by correcting the terminal detection. https://github.com/restic/restic/issues/3111 https://github.com/restic/restic/pull/3150 * Bugfix #3151: Don't create invalid snapshots when `backup` is interrupted - When canceling a backup run at a certain moment it was possible that restic created a snapshot - with an invalid "null" tree. This caused `check` and other operations to fail. The `backup` - command now properly handles interruptions and never saves a snapshot when interrupted. + When canceling a backup run at a certain moment it was possible that restic + created a snapshot with an invalid "null" tree. This caused `check` and other + operations to fail. The `backup` command now properly handles interruptions and + never saves a snapshot when interrupted. https://github.com/restic/restic/issues/3151 https://github.com/restic/restic/pull/3164 * Bugfix #3152: Do not hang until foregrounded when completed in background - On Linux, when running in the background restic failed to stop the terminal output of the - `backup` command after it had completed. This caused restic to hang until moved to the - foreground. This has now been fixed. + On Linux, when running in the background restic failed to stop the terminal + output of the `backup` command after it had completed. This caused restic to + hang until moved to the foreground. This has now been fixed. https://github.com/restic/restic/pull/3152 https://forum.restic.net/t/restic-alpine-container-cron-hangs-epoll-pwait/3334 * Bugfix #3166: Improve error handling in the `restore` command - The `restore` command used to not print errors while downloading file contents from the - repository. It also incorrectly exited with a zero error code even when there were errors - during the restore process. This has all been fixed and `restore` now returns with a non-zero - exit code when there's an error. + The `restore` command used to not print errors while downloading file contents + from the repository. It also incorrectly exited with a zero error code even when + there were errors during the restore process. This has all been fixed and + `restore` now returns with a non-zero exit code when there's an error. https://github.com/restic/restic/issues/3166 https://github.com/restic/restic/pull/3207 * Bugfix #3232: Correct statistics for overlapping backup sources - A user reported that restic's statistics and progress information during backup was not - correctly calculated when the backup sources (files/dirs to save) overlap. For example, - consider a directory `foo` which contains (among others) a file `foo/bar`. When `restic - backup foo foo/bar` was run, restic counted the size of the file `foo/bar` twice, so the - completeness percentage as well as the number of files was wrong. This is now corrected. + A user reported that restic's statistics and progress information during backup + was not correctly calculated when the backup sources (files/dirs to save) + overlap. For example, consider a directory `foo` which contains (among others) a + file `foo/bar`. When `restic backup foo foo/bar` was run, restic counted the + size of the file `foo/bar` twice, so the completeness percentage as well as the + number of files was wrong. This is now corrected. https://github.com/restic/restic/issues/3232 https://github.com/restic/restic/pull/3243 * Bugfix #3249: Improve error handling in `gs` backend - The `gs` backend did not notice when the last step of completing a file upload failed. Under rare - circumstances, this could cause missing files in the backup repository. This has now been - fixed. + The `gs` backend did not notice when the last step of completing a file upload + failed. Under rare circumstances, this could cause missing files in the backup + repository. This has now been fixed. https://github.com/restic/restic/pull/3249 * Change #3095: Deleting files on Google Drive now moves them to the trash - When deleting files on Google Drive via the `rclone` backend, restic used to bypass the trash - folder required that one used the `-o rclone.args` option to enable usage of the trash folder. - This ensured that deleted files in Google Drive were not kept indefinitely in the trash folder. - However, since Google Drive's trash retention policy changed to deleting trashed files after - 30 days, this is no longer needed. + When deleting files on Google Drive via the `rclone` backend, restic used to + bypass the trash folder required that one used the `-o rclone.args` option to + enable usage of the trash folder. This ensured that deleted files in Google + Drive were not kept indefinitely in the trash folder. However, since Google + Drive's trash retention policy changed to deleting trashed files after 30 days, + this is no longer needed. - Restic now leaves it up to rclone and its configuration to use or not use the trash folder when - deleting files. The default is to use the trash folder, as of rclone 1.53.2. To re-enable the - restic 0.11 behavior, set the `RCLONE_DRIVE_USE_TRASH` environment variable or change the - rclone configuration. See the rclone documentation for more details. + Restic now leaves it up to rclone and its configuration to use or not use the + trash folder when deleting files. The default is to use the trash folder, as of + rclone 1.53.2. To re-enable the restic 0.11 behavior, set the + `RCLONE_DRIVE_USE_TRASH` environment variable or change the rclone + configuration. See the rclone documentation for more details. https://github.com/restic/restic/issues/3095 https://github.com/restic/restic/pull/3102 * Enhancement #909: Back up mountpoints as empty directories - When the `--one-file-system` option is specified to `restic backup`, it ignores all file - systems mounted below one of the target directories. This means that when a snapshot is - restored, users needed to manually recreate the mountpoint directories. + When the `--one-file-system` option is specified to `restic backup`, it ignores + all file systems mounted below one of the target directories. This means that + when a snapshot is restored, users needed to manually recreate the mountpoint + directories. - Restic now backs up mountpoints as empty directories and therefore implements the same - approach as `tar`. + Restic now backs up mountpoints as empty directories and therefore implements + the same approach as `tar`. https://github.com/restic/restic/issues/909 https://github.com/restic/restic/pull/3119 * Enhancement #2186: Allow specifying percentage in `check --read-data-subset` - We've enhanced the `check` command's `--read-data-subset` option to also accept a - percentage (e.g. `2.5%` or `10%`). This will check the given percentage of pack files (which - are randomly selected on each run). + We've enhanced the `check` command's `--read-data-subset` option to also accept + a percentage (e.g. `2.5%` or `10%`). This will check the given percentage of + pack files (which are randomly selected on each run). https://github.com/restic/restic/issues/2186 https://github.com/restic/restic/pull/3038 * Enhancement #2433: Make the `dump` command support `zip` format - Previously, restic could dump the contents of a whole folder structure only in the `tar` - format. The `dump` command now has a new flag to change output format to `zip`. Just pass - `--archive zip` as an option to `restic dump`. + Previously, restic could dump the contents of a whole folder structure only in + the `tar` format. The `dump` command now has a new flag to change output format + to `zip`. Just pass `--archive zip` as an option to `restic dump`. https://github.com/restic/restic/pull/2433 https://github.com/restic/restic/pull/3081 * Enhancement #2453: Report permanent/fatal backend errors earlier - When encountering errors in reading from or writing to storage backends, restic retries the - failing operation up to nine times (for a total of ten attempts). It used to retry all backend - operations, but now detects some permanent error conditions so that it can report fatal errors - earlier. + When encountering errors in reading from or writing to storage backends, restic + retries the failing operation up to nine times (for a total of ten attempts). It + used to retry all backend operations, but now detects some permanent error + conditions so that it can report fatal errors earlier. - Permanent failures include local disks being full, SSH connections dropping and permission - errors. + Permanent failures include local disks being full, SSH connections dropping and + permission errors. https://github.com/restic/restic/issues/2453 https://github.com/restic/restic/issues/3180 @@ -2657,23 +3477,26 @@ restic users. The changes are ordered by importance. * Enhancement #2495: Add option to let `backup` trust mtime without checking ctime - The `backup` command used to require that both `ctime` and `mtime` of a file matched with a - previously backed up version to determine that the file was unchanged. In other words, if - either `ctime` or `mtime` of the file had changed, it would be considered changed and restic - would read the file's content again to back up the relevant (changed) parts of it. - - The new option `--ignore-ctime` makes restic look at `mtime` only, such that `ctime` changes - for a file does not cause restic to read the file's contents again. - - The check for both `ctime` and `mtime` was introduced in restic 0.9.6 to make backups more - reliable in the face of programs that reset `mtime` (some Unix archivers do that), but it turned - out to often be expensive because it made restic read file contents even if only the metadata - (owner, permissions) of a file had changed. The new `--ignore-ctime` option lets the user - restore the 0.9.5 behavior when needed. The existing `--ignore-inode` option already turned + The `backup` command used to require that both `ctime` and `mtime` of a file + matched with a previously backed up version to determine that the file was + unchanged. In other words, if either `ctime` or `mtime` of the file had changed, + it would be considered changed and restic would read the file's content again to + back up the relevant (changed) parts of it. + + The new option `--ignore-ctime` makes restic look at `mtime` only, such that + `ctime` changes for a file does not cause restic to read the file's contents + again. + + The check for both `ctime` and `mtime` was introduced in restic 0.9.6 to make + backups more reliable in the face of programs that reset `mtime` (some Unix + archivers do that), but it turned out to often be expensive because it made + restic read file contents even if only the metadata (owner, permissions) of a + file had changed. The new `--ignore-ctime` option lets the user restore the + 0.9.5 behavior when needed. The existing `--ignore-inode` option already turned off this behavior, but also removed a different check. - Please note that changes in files' metadata are still recorded, regardless of the command line - options provided to the backup command. + Please note that changes in files' metadata are still recorded, regardless of + the command line options provided to the backup command. https://github.com/restic/restic/issues/2495 https://github.com/restic/restic/issues/2558 @@ -2682,20 +3505,21 @@ restic users. The changes are ordered by importance. * Enhancement #2528: Add Alibaba/Aliyun OSS support in the `s3` backend - A new extended option `s3.bucket-lookup` has been added to support Alibaba/Aliyun OSS in the - `s3` backend. The option can be set to one of the following values: + A new extended option `s3.bucket-lookup` has been added to support + Alibaba/Aliyun OSS in the `s3` backend. The option can be set to one of the + following values: - - `auto` - Existing behaviour - `dns` - Use DNS style bucket access - `path` - Use path style - bucket access + - `auto` - Existing behaviour - `dns` - Use DNS style bucket access - `path` - + Use path style bucket access - To make the `s3` backend work with Alibaba/Aliyun OSS you must set `s3.bucket-lookup` to `dns` - and set the `s3.region` parameter. For example: + To make the `s3` backend work with Alibaba/Aliyun OSS you must set + `s3.bucket-lookup` to `dns` and set the `s3.region` parameter. For example: Restic -o s3.bucket-lookup=dns -o s3.region=oss-eu-west-1 -r s3:https://oss-eu-west-1.aliyuncs.com/bucketname init - Note that `s3.region` must be set, otherwise the MinIO SDK tries to look it up and it seems that - Alibaba doesn't support that properly. + Note that `s3.region` must be set, otherwise the MinIO SDK tries to look it up + and it seems that Alibaba doesn't support that properly. https://github.com/restic/restic/issues/2528 https://github.com/restic/restic/pull/2535 @@ -2704,14 +3528,14 @@ restic users. The changes are ordered by importance. The `backup`, `check` and `prune` commands never printed any progress reports on non-interactive terminals. This behavior is now configurable using the - `RESTIC_PROGRESS_FPS` environment variable. Use for example a value of `1` for an update - every second, or `0.01666` for an update every minute. + `RESTIC_PROGRESS_FPS` environment variable. Use for example a value of `1` for + an update every second, or `0.01666` for an update every minute. - The `backup` command now also prints the current progress when restic receives a `SIGUSR1` - signal. + The `backup` command now also prints the current progress when restic receives a + `SIGUSR1` signal. - Setting the `RESTIC_PROGRESS_FPS` environment variable or sending a `SIGUSR1` signal - prints a status report even when `--quiet` was specified. + Setting the `RESTIC_PROGRESS_FPS` environment variable or sending a `SIGUSR1` + signal prints a status report even when `--quiet` was specified. https://github.com/restic/restic/issues/2706 https://github.com/restic/restic/issues/3194 @@ -2719,21 +3543,22 @@ restic users. The changes are ordered by importance. * Enhancement #2718: Improve `prune` performance and make it more customizable - The `prune` command is now much faster. This is especially the case for remote repositories or - repositories with not much data to remove. Also the memory usage of the `prune` command is now - reduced. + The `prune` command is now much faster. This is especially the case for remote + repositories or repositories with not much data to remove. Also the memory usage + of the `prune` command is now reduced. - Restic used to rebuild the index from scratch after pruning. This could lead to missing packs in - the index in some cases for eventually consistent backends such as e.g. AWS S3. This behavior is - now changed and the index rebuilding uses the information already known by `prune`. + Restic used to rebuild the index from scratch after pruning. This could lead to + missing packs in the index in some cases for eventually consistent backends such + as e.g. AWS S3. This behavior is now changed and the index rebuilding uses the + information already known by `prune`. - By default, the `prune` command no longer removes all unused data. This behavior can be - fine-tuned by new options, like the acceptable amount of unused space or the maximum size of - data to reorganize. For more details, please see + By default, the `prune` command no longer removes all unused data. This behavior + can be fine-tuned by new options, like the acceptable amount of unused space or + the maximum size of data to reorganize. For more details, please see https://restic.readthedocs.io/en/stable/060_forget.html . - Moreover, `prune` now accepts the `--dry-run` option and also running `forget --dry-run - --prune` will show what `prune` would do. + Moreover, `prune` now accepts the `--dry-run` option and also running `forget + --dry-run --prune` will show what `prune` would do. This enhancement also fixes several open issues, e.g.: - https://github.com/restic/restic/issues/1140 - @@ -2748,68 +3573,74 @@ restic users. The changes are ordered by importance. * Enhancement #2941: Speed up the repacking step of the `prune` command - The repack step of the `prune` command, which moves still used file parts into new pack files - such that the old ones can be garbage collected later on, now processes multiple pack files in - parallel. This is especially beneficial for high latency backends or when using a fast network - connection. + The repack step of the `prune` command, which moves still used file parts into + new pack files such that the old ones can be garbage collected later on, now + processes multiple pack files in parallel. This is especially beneficial for + high latency backends or when using a fast network connection. https://github.com/restic/restic/pull/2941 * Enhancement #2944: Add `backup` options `--files-from-{verbatim,raw}` - The new `backup` options `--files-from-verbatim` and `--files-from-raw` read a list of - files to back up from a file. Unlike the existing `--files-from` option, these options do not - interpret the listed filenames as glob patterns; instead, whitespace in filenames is - preserved as-is and no pattern expansion is done. Please see the documentation for specifics. + The new `backup` options `--files-from-verbatim` and `--files-from-raw` read a + list of files to back up from a file. Unlike the existing `--files-from` option, + these options do not interpret the listed filenames as glob patterns; instead, + whitespace in filenames is preserved as-is and no pattern expansion is done. + Please see the documentation for specifics. - These new options are highly recommended over `--files-from`, when using a script to generate - the list of files to back up. + These new options are highly recommended over `--files-from`, when using a + script to generate the list of files to back up. https://github.com/restic/restic/issues/2944 https://github.com/restic/restic/issues/3013 * Enhancement #3006: Speed up the `rebuild-index` command - We've optimized the `rebuild-index` command. Now, existing index entries are used to - minimize the number of pack files that must be read. This speeds up the index rebuild a lot. + We've optimized the `rebuild-index` command. Now, existing index entries are + used to minimize the number of pack files that must be read. This speeds up the + index rebuild a lot. - Additionally, the option `--read-all-packs` has been added, implementing the previous - behavior. + Additionally, the option `--read-all-packs` has been added, implementing the + previous behavior. https://github.com/restic/restic/pull/3006 https://github.com/restic/restic/issue/2547 * Enhancement #3048: Add more checks for index and pack files in the `check` command - The `check` command run with the `--read-data` or `--read-data-subset` options used to only - verify only the pack file content - it did not check if the blobs within the pack are correctly - contained in the index. + The `check` command run with the `--read-data` or `--read-data-subset` options + used to only verify only the pack file content - it did not check if the blobs + within the pack are correctly contained in the index. A check for the latter is now in place, which can print the following error: Blob ID is not contained in index or position is incorrect - Another test is also added, which compares pack file sizes computed from the index and the pack - header with the actual file size. This test is able to detect truncated pack files. + Another test is also added, which compares pack file sizes computed from the + index and the pack header with the actual file size. This test is able to detect + truncated pack files. - If the index is not correct, it can be rebuilt by using the `rebuild-index` command. + If the index is not correct, it can be rebuilt by using the `rebuild-index` + command. - Having added these tests, `restic check` is now able to detect non-existing blobs which are - wrongly referenced in the index. This situation could have lead to missing data. + Having added these tests, `restic check` is now able to detect non-existing + blobs which are wrongly referenced in the index. This situation could have lead + to missing data. https://github.com/restic/restic/pull/3048 https://github.com/restic/restic/pull/3082 * Enhancement #3083: Allow usage of deprecated S3 `ListObjects` API - Some S3 API implementations, e.g. Ceph before version 14.2.5, have a broken `ListObjectsV2` - implementation which causes problems for restic when using their API endpoints. When a broken - server implementation is used, restic prints errors similar to the following: + Some S3 API implementations, e.g. Ceph before version 14.2.5, have a broken + `ListObjectsV2` implementation which causes problems for restic when using their + API endpoints. When a broken server implementation is used, restic prints errors + similar to the following: List() returned error: Truncated response should have continuation token set - As a temporary workaround, restic now allows using the older `ListObjects` endpoint by - setting the `s3.list-objects-v1` extended option, for instance: + As a temporary workaround, restic now allows using the older `ListObjects` + endpoint by setting the `s3.list-objects-v1` extended option, for instance: Restic -o s3.list-objects-v1=true snapshots @@ -2820,28 +3651,30 @@ restic users. The changes are ordered by importance. * Enhancement #3099: Reduce memory usage of `check` command - The `check` command now requires less memory if it is run without the `--check-unused` option. + The `check` command now requires less memory if it is run without the + `--check-unused` option. https://github.com/restic/restic/pull/3099 * Enhancement #3106: Parallelize scan of snapshot content in `copy` and `prune` - The `copy` and `prune` commands used to traverse the directories of snapshots one by one to find - used data. This snapshot traversal is now parallized which can speed up this step several - times. + The `copy` and `prune` commands used to traverse the directories of snapshots + one by one to find used data. This snapshot traversal is now parallelized which + can speed up this step several times. - In addition the `check` command now reports how many snapshots have already been processed. + In addition the `check` command now reports how many snapshots have already been + processed. https://github.com/restic/restic/pull/3106 * Enhancement #3130: Parallelize reading of locks and snapshots - Restic used to read snapshots sequentially. For repositories containing many snapshots this - slowed down commands which have to read all snapshots. + Restic used to read snapshots sequentially. For repositories containing many + snapshots this slowed down commands which have to read all snapshots. - Now the reading of snapshots is parallelized. This speeds up for example `prune`, `backup` and - other commands that search for snapshots with certain properties or which have to find the - `latest` snapshot. + Now the reading of snapshots is parallelized. This speeds up for example + `prune`, `backup` and other commands that search for snapshots with certain + properties or which have to find the `latest` snapshot. The speed up also applies to locks stored in the backup repository. @@ -2850,37 +3683,39 @@ restic users. The changes are ordered by importance. * Enhancement #3147: Support additional environment variables for Swift authentication - The `swift` backend now supports the following additional environment variables for passing - authentication details to restic: `OS_USER_ID`, `OS_USER_DOMAIN_ID`, + The `swift` backend now supports the following additional environment variables + for passing authentication details to restic: `OS_USER_ID`, `OS_USER_DOMAIN_ID`, `OS_PROJECT_DOMAIN_ID` and `OS_TRUST_ID` - Depending on the `openrc` configuration file these might be required when the user and project - domains differ from one another. + Depending on the `openrc` configuration file these might be required when the + user and project domains differ from one another. https://github.com/restic/restic/issues/3147 https://github.com/restic/restic/pull/3158 * Enhancement #3191: Add release binaries for MIPS architectures - We've added a few new architectures for Linux to the release binaries: `mips`, `mipsle`, - `mips64`, and `mip64le`. MIPS is mostly used for low-end embedded systems. + We've added a few new architectures for Linux to the release binaries: `mips`, + `mipsle`, `mips64`, and `mip64le`. MIPS is mostly used for low-end embedded + systems. https://github.com/restic/restic/issues/3191 https://github.com/restic/restic/pull/3208 * Enhancement #3250: Add several more error checks - We've added a lot more error checks in places where errors were previously ignored (as hinted by - the static analysis program `errcheck` via `golangci-lint`). + We've added a lot more error checks in places where errors were previously + ignored (as hinted by the static analysis program `errcheck` via + `golangci-lint`). https://github.com/restic/restic/pull/3250 * Enhancement #3254: Enable HTTP/2 for backend connections - Go's HTTP library usually automatically chooses between HTTP/1.x and HTTP/2 depending on - what the server supports. But for compatibility this mechanism is disabled if DialContext is - used (which is the case for restic). This change allows restic's HTTP client to negotiate - HTTP/2 if supported by the server. + Go's HTTP library usually automatically chooses between HTTP/1.x and HTTP/2 + depending on what the server supports. But for compatibility this mechanism is + disabled if DialContext is used (which is the case for restic). This change + allows restic's HTTP client to negotiate HTTP/2 if supported by the server. https://github.com/restic/restic/pull/3254 @@ -2911,11 +3746,11 @@ restic users. The changes are ordered by importance. * Bugfix #1212: Restore timestamps and permissions on intermediate directories - When using the `--include` option of the restore command, restic restored timestamps and - permissions only on directories selected by the include pattern. Intermediate directories, - which are necessary to restore files located in sub- directories, were created with default - permissions. We've fixed the restore command to restore timestamps and permissions for these - directories as well. + When using the `--include` option of the restore command, restic restored + timestamps and permissions only on directories selected by the include pattern. + Intermediate directories, which are necessary to restore files located in sub- + directories, were created with default permissions. We've fixed the restore + command to restore timestamps and permissions for these directories as well. https://github.com/restic/restic/issues/1212 https://github.com/restic/restic/issues/1402 @@ -2923,13 +3758,14 @@ restic users. The changes are ordered by importance. * Bugfix #1756: Mark repository files as read-only when using the local backend - Files stored in a local repository were marked as writeable on the filesystem for non-Windows - systems, which did not prevent accidental file modifications outside of restic. In addition, - the local backend did not work with certain filesystems and network mounts which do not permit - modifications of file permissions. + Files stored in a local repository were marked as writable on the filesystem for + non-Windows systems, which did not prevent accidental file modifications outside + of restic. In addition, the local backend did not work with certain filesystems + and network mounts which do not permit modifications of file permissions. - Restic now marks files stored in a local repository as read-only on the filesystem on - non-Windows systems. The error handling is improved to support more filesystems. + Restic now marks files stored in a local repository as read-only on the + filesystem on non-Windows systems. The error handling is improved to support + more filesystems. https://github.com/restic/restic/issues/1756 https://github.com/restic/restic/issues/2157 @@ -2937,8 +3773,9 @@ restic users. The changes are ordered by importance. * Bugfix #2241: Hide password in REST backend repository URLs - When using a password in the REST backend repository URL, the password could in some cases be - included in the output from restic, e.g. when initializing a repo or during an error. + When using a password in the REST backend repository URL, the password could in + some cases be included in the output from restic, e.g. when initializing a repo + or during an error. The password is now replaced with "***" where applicable. @@ -2947,10 +3784,11 @@ restic users. The changes are ordered by importance. * Bugfix #2319: Correctly dump directories into tar files - The dump command previously wrote directories in a tar file in a way which can cause - compatibility problems. This caused, for example, 7zip on Windows to not open tar files - containing directories. In addition it was not possible to dump directories with extended - attributes. These compatibility problems are now corrected. + The dump command previously wrote directories in a tar file in a way which can + cause compatibility problems. This caused, for example, 7zip on Windows to not + open tar files containing directories. In addition it was not possible to dump + directories with extended attributes. These compatibility problems are now + corrected. In addition, a tar file now includes the name of the owner and group of a file. @@ -2959,17 +3797,18 @@ restic users. The changes are ordered by importance. * Bugfix #2491: Don't require `self-update --output` placeholder file - `restic self-update --output /path/to/new-restic` used to require that new-restic was an - existing file, to be overwritten. Now it's possible to download an updated restic binary to a - new path, without first having to create a placeholder file. + `restic self-update --output /path/to/new-restic` used to require that + new-restic was an existing file, to be overwritten. Now it's possible to + download an updated restic binary to a new path, without first having to create + a placeholder file. https://github.com/restic/restic/issues/2491 https://github.com/restic/restic/pull/2937 * Bugfix #2834: Fix rare cases of backup command hanging forever - We've fixed an issue with the backup progress reporting which could cause restic to hang - forever right before finishing a backup. + We've fixed an issue with the backup progress reporting which could cause restic + to hang forever right before finishing a backup. https://github.com/restic/restic/issues/2834 https://github.com/restic/restic/pull/2963 @@ -2983,47 +3822,50 @@ restic users. The changes are ordered by importance. * Bugfix #2942: Make --exclude-larger-than handle disappearing files - There was a small bug in the backup command's --exclude-larger-than option where files that - disappeared between scanning and actually backing them up to the repository caused a panic. - This is now fixed. + There was a small bug in the backup command's --exclude-larger-than option where + files that disappeared between scanning and actually backing them up to the + repository caused a panic. This is now fixed. https://github.com/restic/restic/issues/2942 * Bugfix #2951: Restic generate, help and self-update no longer check passwords - The commands `restic cache`, `generate`, `help` and `self-update` don't need passwords, but - they previously did run the RESTIC_PASSWORD_COMMAND (if set in the environment), prompting - users to authenticate for no reason. They now skip running the password command. + The commands `restic cache`, `generate`, `help` and `self-update` don't need + passwords, but they previously did run the RESTIC_PASSWORD_COMMAND (if set in + the environment), prompting users to authenticate for no reason. They now skip + running the password command. https://github.com/restic/restic/issues/2951 https://github.com/restic/restic/pull/2987 * Bugfix #2979: Make snapshots --json output [] instead of null when no snapshots - Restic previously output `null` instead of `[]` for the `--json snapshots` command, when - there were no snapshots in the repository. This caused some minor problems when parsing the - output, but is now fixed such that `[]` is output when the list of snapshots is empty. + Restic previously output `null` instead of `[]` for the `--json snapshots` + command, when there were no snapshots in the repository. This caused some minor + problems when parsing the output, but is now fixed such that `[]` is output when + the list of snapshots is empty. https://github.com/restic/restic/issues/2979 https://github.com/restic/restic/pull/2984 * Enhancement #340: Add support for Volume Shadow Copy Service (VSS) on Windows - Volume Shadow Copy Service allows read access to files that are locked by another process using - an exclusive lock through a filesystem snapshot. Restic was unable to backup those files - before. This update enables backing up these files. + Volume Shadow Copy Service allows read access to files that are locked by + another process using an exclusive lock through a filesystem snapshot. Restic + was unable to backup those files before. This update enables backing up these + files. - This needs to be enabled explicitely using the --use-fs-snapshot option of the backup - command. + This needs to be enabled explicitly using the --use-fs-snapshot option of the + backup command. https://github.com/restic/restic/issues/340 https://github.com/restic/restic/pull/2274 * Enhancement #1458: New option --repository-file - We've added a new command-line option --repository-file as an alternative to -r. This allows - to read the repository URL from a file in order to prevent certain types of information leaks, - especially for URLs containing credentials. + We've added a new command-line option --repository-file as an alternative to -r. + This allows to read the repository URL from a file in order to prevent certain + types of information leaks, especially for URLs containing credentials. https://github.com/restic/restic/issues/1458 https://github.com/restic/restic/issues/2900 @@ -3031,27 +3873,29 @@ restic users. The changes are ordered by importance. * Enhancement #2849: Authenticate to Google Cloud Storage with access token - When using the GCS backend, it is now possible to authenticate with OAuth2 access tokens - instead of a credentials file by setting the GOOGLE_ACCESS_TOKEN environment variable. + When using the GCS backend, it is now possible to authenticate with OAuth2 + access tokens instead of a credentials file by setting the GOOGLE_ACCESS_TOKEN + environment variable. https://github.com/restic/restic/pull/2849 * Enhancement #2969: Optimize check for unchanged files during backup - During a backup restic skips processing files which have not changed since the last backup run. - Previously this required opening each file once which can be slow on network filesystems. The - backup command now checks for file changes before opening a file. This considerably reduces - the time to create a backup on network filesystems. + During a backup restic skips processing files which have not changed since the + last backup run. Previously this required opening each file once which can be + slow on network filesystems. The backup command now checks for file changes + before opening a file. This considerably reduces the time to create a backup on + network filesystems. https://github.com/restic/restic/issues/2969 https://github.com/restic/restic/pull/2970 * Enhancement #2978: Warn if parent snapshot cannot be loaded during backup - During a backup restic uses the parent snapshot to check whether a file was changed and has to be - backed up again. For this check the backup has to read the directories contained in the old - snapshot. If a tree blob cannot be loaded, restic now warns about this problem with the backup - repository. + During a backup restic uses the parent snapshot to check whether a file was + changed and has to be backed up again. For this check the backup has to read the + directories contained in the old snapshot. If a tree blob cannot be loaded, + restic now warns about this problem with the backup repository. https://github.com/restic/restic/pull/2978 @@ -3111,15 +3955,16 @@ restic users. The changes are ordered by importance. * Bugfix #1863: Report correct number of directories processed by backup - The directory statistics calculation was fixed to report the actual number of processed - directories instead of always zero. + The directory statistics calculation was fixed to report the actual number of + processed directories instead of always zero. https://github.com/restic/restic/issues/1863 * Bugfix #2254: Fix tar issues when dumping `/` - We've fixed an issue with dumping either `/` or files on the first sublevel e.g. `/foo` to tar. - This also fixes tar dumping issues on Windows where this issue could also happen. + We've fixed an issue with dumping either `/` or files on the first sublevel e.g. + `/foo` to tar. This also fixes tar dumping issues on Windows where this issue + could also happen. https://github.com/restic/restic/issues/2254 https://github.com/restic/restic/issues/2357 @@ -3127,59 +3972,63 @@ restic users. The changes are ordered by importance. * Bugfix #2281: Handle format verbs like '%' properly in `find` output - The JSON or "normal" output of the `find` command can now deal with file names that contain - substrings which the Golang `fmt` package considers "format verbs" like `%s`. + The JSON or "normal" output of the `find` command can now deal with file names + that contain substrings which the Golang `fmt` package considers "format verbs" + like `%s`. https://github.com/restic/restic/issues/2281 * Bugfix #2298: Do not hang when run as a background job - Restic did hang on exit while restoring the terminal configuration when it was started as a - background job, for example using `restic ... &`. This has been fixed by only restoring the - terminal configuration when restic is interrupted while reading a password from the - terminal. + Restic did hang on exit while restoring the terminal configuration when it was + started as a background job, for example using `restic ... &`. This has been + fixed by only restoring the terminal configuration when restic is interrupted + while reading a password from the terminal. https://github.com/restic/restic/issues/2298 * Bugfix #2389: Fix mangled json output of backup command - We've fixed a race condition in the json output of the backup command that could cause multiple - lines to get mixed up. We've also ensured that the backup summary is printed last. + We've fixed a race condition in the json output of the backup command that could + cause multiple lines to get mixed up. We've also ensured that the backup summary + is printed last. https://github.com/restic/restic/issues/2389 https://github.com/restic/restic/pull/2545 * Bugfix #2390: Refresh lock timestamp - Long-running operations did not refresh lock timestamp, resulting in locks becoming stale. - This is now fixed. + Long-running operations did not refresh lock timestamp, resulting in locks + becoming stale. This is now fixed. https://github.com/restic/restic/issues/2390 * Bugfix #2429: Backup --json reports total_bytes_processed as 0 - We've fixed the json output of total_bytes_processed. The non-json output was already fixed - with pull request #2138 but left the json output untouched. + We've fixed the json output of total_bytes_processed. The non-json output was + already fixed with pull request #2138 but left the json output untouched. https://github.com/restic/restic/issues/2429 * Bugfix #2469: Fix incorrect bytes stats in `diff` command - In some cases, the wrong number of bytes (e.g. 16777215.998 TiB) were reported by the `diff` - command. This is now fixed. + In some cases, the wrong number of bytes (e.g. 16777215.998 TiB) were reported + by the `diff` command. This is now fixed. https://github.com/restic/restic/issues/2469 * Bugfix #2518: Do not crash with Synology NAS sftp server - It was found that when restic is used to store data on an sftp server on a Synology NAS with a - relative path (one which does not start with a slash), it may go into an endless loop trying to - create directories on the server. We've fixed this bug by using a function in the sftp library - instead of our own implementation. + It was found that when restic is used to store data on an sftp server on a + Synology NAS with a relative path (one which does not start with a slash), it + may go into an endless loop trying to create directories on the server. We've + fixed this bug by using a function in the sftp library instead of our own + implementation. - The bug was discovered because the Synology sftp server behaves erratic with non-absolute - path (e.g. `home/restic-repo`). This can be resolved by just using an absolute path instead - (`/home/restic-repo`). We've also added a paragraph in the FAQ. + The bug was discovered because the Synology sftp server behaves erratic with + non-absolute path (e.g. `home/restic-repo`). This can be resolved by just using + an absolute path instead (`/home/restic-repo`). We've also added a paragraph in + the FAQ. https://github.com/restic/restic/issues/2518 https://github.com/restic/restic/issues/2363 @@ -3187,84 +4036,90 @@ restic users. The changes are ordered by importance. * Bugfix #2531: Fix incorrect size calculation in `stats --mode restore-size` - The restore-size mode of stats was counting hard-linked files as if they were independent. + The restore-size mode of stats was counting hard-linked files as if they were + independent. https://github.com/restic/restic/issues/2531 * Bugfix #2537: Fix incorrect file counts in `stats --mode restore-size` - The restore-size mode of stats was failing to count empty directories and some files with hard - links. + The restore-size mode of stats was failing to count empty directories and some + files with hard links. https://github.com/restic/restic/issues/2537 * Bugfix #2592: SFTP backend supports IPv6 addresses - The SFTP backend now supports IPv6 addresses natively, without relying on aliases in the - external SSH configuration. + The SFTP backend now supports IPv6 addresses natively, without relying on + aliases in the external SSH configuration. https://github.com/restic/restic/pull/2592 * Bugfix #2607: Honor RESTIC_CACHE_DIR environment variable on Mac and Windows - On Mac and Windows, the RESTIC_CACHE_DIR environment variable was ignored. This variable can - now be used on all platforms to set the directory where restic stores caches. + On Mac and Windows, the RESTIC_CACHE_DIR environment variable was ignored. This + variable can now be used on all platforms to set the directory where restic + stores caches. https://github.com/restic/restic/pull/2607 * Bugfix #2668: Don't abort the stats command when data blobs are missing - Runing the stats command in the blobs-per-file mode on a repository with missing data blobs - previously resulted in a crash. + Running the stats command in the blobs-per-file mode on a repository with + missing data blobs previously resulted in a crash. https://github.com/restic/restic/pull/2668 * Bugfix #2674: Add stricter prune error checks - Additional checks were added to the prune command in order to improve resiliency to backend, - hardware and/or networking issues. The checks now detect a few more cases where such outside - factors could potentially cause data loss. + Additional checks were added to the prune command in order to improve resiliency + to backend, hardware and/or networking issues. The checks now detect a few more + cases where such outside factors could potentially cause data loss. https://github.com/restic/restic/pull/2674 * Bugfix #2899: Fix possible crash in the progress bar of check --read-data - We've fixed a possible crash while displaying the progress bar for the check --read-data - command. The crash occurred when the length of the progress bar status exceeded the terminal - width, which only happened for very narrow terminal windows. + We've fixed a possible crash while displaying the progress bar for the check + --read-data command. The crash occurred when the length of the progress bar + status exceeded the terminal width, which only happened for very narrow terminal + windows. https://github.com/restic/restic/pull/2899 https://forum.restic.net/t/restic-rclone-pcloud-connection-issues/2963/15 * Change #1597: Honor the --no-lock flag in the mount command - The mount command now does not lock the repository if given the --no-lock flag. This allows to - mount repositories which are archived on a read only backend/filesystem. + The mount command now does not lock the repository if given the --no-lock flag. + This allows to mount repositories which are archived on a read only + backend/filesystem. https://github.com/restic/restic/issues/1597 https://github.com/restic/restic/pull/2821 * Change #2482: Remove vendored dependencies - We've removed the vendored dependencies (in the subdir `vendor/`). When building restic, the - Go compiler automatically fetches the dependencies. It will also cryptographically verify - that the correct code has been fetched by using the hashes in `go.sum` (see the link to the - documentation below). + We've removed the vendored dependencies (in the subdir `vendor/`). When building + restic, the Go compiler automatically fetches the dependencies. It will also + cryptographically verify that the correct code has been fetched by using the + hashes in `go.sum` (see the link to the documentation below). https://github.com/restic/restic/issues/2482 https://golang.org/cmd/go/#hdr-Module_downloading_and_verification * Change #2546: Return exit code 3 when failing to backup all source data - The backup command used to return a zero exit code as long as a snapshot could be created - successfully, even if some of the source files could not be read (in which case the snapshot - would contain the rest of the files). + The backup command used to return a zero exit code as long as a snapshot could + be created successfully, even if some of the source files could not be read (in + which case the snapshot would contain the rest of the files). - This made it hard for automation/scripts to detect failures/incomplete backups by looking at - the exit code. Restic now returns the following exit codes for the backup command: + This made it hard for automation/scripts to detect failures/incomplete backups + by looking at the exit code. Restic now returns the following exit codes for the + backup command: - - 0 when the command was successful - 1 when there was a fatal error (no snapshot created) - 3 when - some source data could not be read (incomplete snapshot created) + - 0 when the command was successful - 1 when there was a fatal error (no + snapshot created) - 3 when some source data could not be read (incomplete + snapshot created) https://github.com/restic/restic/issues/956 https://github.com/restic/restic/issues/2064 @@ -3274,12 +4129,12 @@ restic users. The changes are ordered by importance. * Change #2600: Update dependencies, require Go >= 1.13 - Restic now requires Go to be at least 1.13. This allows simplifications in the build process and - removing workarounds. + Restic now requires Go to be at least 1.13. This allows simplifications in the + build process and removing workarounds. - This is also probably the last version of restic still supporting mounting repositories via - fuse on macOS. The library we're using for fuse does not support macOS any more and osxfuse is not - open source any more. + This is also probably the last version of restic still supporting mounting + repositories via fuse on macOS. The library we're using for fuse does not + support macOS any more and osxfuse is not open source any more. https://github.com/bazil/fuse/issues/224 https://github.com/osxfuse/osxfuse/issues/590 @@ -3289,17 +4144,20 @@ restic users. The changes are ordered by importance. * Enhancement #323: Add command for copying snapshots between repositories - We've added a copy command, allowing you to copy snapshots from one repository to another. + We've added a copy command, allowing you to copy snapshots from one repository + to another. - Note that this process will have to read (download) and write (upload) the entire snapshot(s) - due to the different encryption keys used on the source and destination repository. Also, the - transferred files are not re-chunked, which may break deduplication between files already - stored in the destination repo and files copied there using this command. + Note that this process will have to read (download) and write (upload) the + entire snapshot(s) due to the different encryption keys used on the source and + destination repository. Also, the transferred files are not re-chunked, which + may break deduplication between files already stored in the destination repo and + files copied there using this command. - To fully support deduplication between repositories when the copy command is used, the init - command now supports the `--copy-chunker-params` option, which initializes the new - repository with identical parameters for splitting files into chunks as an already existing - repository. This allows copied snapshots to be equally deduplicated in both repositories. + To fully support deduplication between repositories when the copy command is + used, the init command now supports the `--copy-chunker-params` option, which + initializes the new repository with identical parameters for splitting files + into chunks as an already existing repository. This allows copied snapshots to + be equally deduplicated in both repositories. https://github.com/restic/restic/issues/323 https://github.com/restic/restic/pull/2606 @@ -3307,29 +4165,29 @@ restic users. The changes are ordered by importance. * Enhancement #551: Use optimized library for hash calculation of file chunks - We've switched the library used to calculate the hashes of file chunks, which are used for - deduplication, to the optimized Minio SHA-256 implementation. + We've switched the library used to calculate the hashes of file chunks, which + are used for deduplication, to the optimized Minio SHA-256 implementation. - Depending on the CPU it improves the hashing throughput by 10-30%. Modern x86 CPUs with the SHA - Extension should be about two to three times faster. + Depending on the CPU it improves the hashing throughput by 10-30%. Modern x86 + CPUs with the SHA Extension should be about two to three times faster. https://github.com/restic/restic/issues/551 https://github.com/restic/restic/pull/2709 * Enhancement #1570: Support specifying multiple host flags for various commands - Previously commands didn't take more than one `--host` or `-H` argument into account, which - could be limiting with e.g. the `forget` command. + Previously commands didn't take more than one `--host` or `-H` argument into + account, which could be limiting with e.g. the `forget` command. - The `dump`, `find`, `forget`, `ls`, `mount`, `restore`, `snapshots`, `stats` and `tag` - commands will now take into account multiple `--host` and `-H` flags. + The `dump`, `find`, `forget`, `ls`, `mount`, `restore`, `snapshots`, `stats` and + `tag` commands will now take into account multiple `--host` and `-H` flags. https://github.com/restic/restic/issues/1570 * Enhancement #1680: Optimize `restic mount` - We've optimized the FUSE implementation used within restic. `restic mount` is now more - responsive and uses less memory. + We've optimized the FUSE implementation used within restic. `restic mount` is + now more responsive and uses less memory. https://github.com/restic/restic/issues/1680 https://github.com/restic/restic/pull/2587 @@ -3343,10 +4201,11 @@ restic users. The changes are ordered by importance. * Enhancement #2175: Allow specifying user and host when creating keys - When adding a new key to the repository, the username and hostname for the new key can be - specified on the command line. This allows overriding the defaults, for example if you would - prefer to use the FQDN to identify the host or if you want to add keys for several different hosts - without having to run the key add command on those hosts. + When adding a new key to the repository, the username and hostname for the new + key can be specified on the command line. This allows overriding the defaults, + for example if you would prefer to use the FQDN to identify the host or if you + want to add keys for several different hosts without having to run the key add + command on those hosts. https://github.com/restic/restic/issues/2175 @@ -3360,15 +4219,16 @@ restic users. The changes are ordered by importance. Fixes "not enough cache capacity" error during restore: https://github.com/restic/restic/issues/2244 - NOTE: This new implementation does not guarantee order in which blobs are written to the target - files and, for example, the last blob of a file can be written to the file before any of the - preceeding file blobs. It is therefore possible to have gaps in the data written to the target - files if restore fails or interrupted by the user. + NOTE: This new implementation does not guarantee order in which blobs are + written to the target files and, for example, the last blob of a file can be + written to the file before any of the preceding file blobs. It is therefore + possible to have gaps in the data written to the target files if restore fails + or interrupted by the user. - The implementation will try to preallocate space for the restored files on the filesystem to - prevent file fragmentation. This ensures good read performance for large files, like for - example VM images. If preallocating space is not supported by the filesystem, then this step is - silently skipped. + The implementation will try to preallocate space for the restored files on the + filesystem to prevent file fragmentation. This ensures good read performance for + large files, like for example VM images. If preallocating space is not supported + by the filesystem, then this step is silently skipped. https://github.com/restic/restic/pull/2195 https://github.com/restic/restic/pull/2893 @@ -3381,69 +4241,73 @@ restic users. The changes are ordered by importance. * Enhancement #2328: Improve speed of check command - We've improved the check command to traverse trees only once independent of whether they are - contained in multiple snapshots. The check command is now much faster for repositories with a - large number of snapshots. + We've improved the check command to traverse trees only once independent of + whether they are contained in multiple snapshots. The check command is now much + faster for repositories with a large number of snapshots. https://github.com/restic/restic/issues/2284 https://github.com/restic/restic/pull/2328 * Enhancement #2395: Ignore sync errors when operation not supported by local filesystem - The local backend has been modified to work with filesystems which doesn't support the `sync` - operation. This operation is normally used by restic to ensure that data files are fully - written to disk before continuing. + The local backend has been modified to work with filesystems which doesn't + support the `sync` operation. This operation is normally used by restic to + ensure that data files are fully written to disk before continuing. - For these limited filesystems, saving a file in the backend would previously fail with an - "operation not supported" error. This error is now ignored, which means that e.g. an SMB mount - on macOS can now be used as storage location for a repository. + For these limited filesystems, saving a file in the backend would previously + fail with an "operation not supported" error. This error is now ignored, which + means that e.g. an SMB mount on macOS can now be used as storage location for a + repository. https://github.com/restic/restic/issues/2395 https://forum.restic.net/t/sync-errors-on-mac-over-smb/1859 * Enhancement #2423: Support user@domain parsing as user - Added the ability for user@domain-like users to be authenticated over SFTP servers. + Added the ability for user@domain-like users to be authenticated over SFTP + servers. https://github.com/restic/restic/pull/2423 * Enhancement #2427: Add flag `--iexclude-file` to backup command - The backup command now supports the flag `--iexclude-file` which is a case-insensitive - version of `--exclude-file`. + The backup command now supports the flag `--iexclude-file` which is a + case-insensitive version of `--exclude-file`. https://github.com/restic/restic/issues/2427 https://github.com/restic/restic/pull/2898 * Enhancement #2569: Support excluding files by their size - The `backup` command now supports the `--exclude-larger-than` option to exclude files which - are larger than the specified maximum size. This can for example be useful to exclude - unimportant files with a large file size. + The `backup` command now supports the `--exclude-larger-than` option to exclude + files which are larger than the specified maximum size. This can for example be + useful to exclude unimportant files with a large file size. https://github.com/restic/restic/issues/2569 https://github.com/restic/restic/pull/2914 * Enhancement #2571: Self-heal missing file parts during backup of unchanged files - We've improved the resilience of restic to certain types of repository corruption. + We've improved the resilience of restic to certain types of repository + corruption. - For files that are unchanged since the parent snapshot, the backup command now verifies that - all parts of the files still exist in the repository. Parts that are missing, e.g. from a damaged - repository, are backed up again. This verification was already run for files that were - modified since the parent snapshot, but is now also done for unchanged files. + For files that are unchanged since the parent snapshot, the backup command now + verifies that all parts of the files still exist in the repository. Parts that + are missing, e.g. from a damaged repository, are backed up again. This + verification was already run for files that were modified since the parent + snapshot, but is now also done for unchanged files. - Note that restic will not backup file parts that are referenced in the index but where the actual - data is not present on disk, as this situation can only be detected by restic check. Please - ensure that you run `restic check` regularly. + Note that restic will not backup file parts that are referenced in the index but + where the actual data is not present on disk, as this situation can only be + detected by restic check. Please ensure that you run `restic check` regularly. https://github.com/restic/restic/issues/2571 https://github.com/restic/restic/pull/2827 * Enhancement #2576: Improve the chunking algorithm - We've updated the chunker library responsible for splitting files into smaller blocks. It - should improve the chunking throughput by 5-15% depending on the CPU. + We've updated the chunker library responsible for splitting files into smaller + blocks. It should improve the chunking throughput by 5-15% depending on the CPU. https://github.com/restic/restic/issues/2820 https://github.com/restic/restic/pull/2576 @@ -3451,65 +4315,68 @@ restic users. The changes are ordered by importance. * Enhancement #2598: Improve speed of diff command - We've improved the performance of the diff command when comparing snapshots with similar - content. It should run up to twice as fast as before. + We've improved the performance of the diff command when comparing snapshots with + similar content. It should run up to twice as fast as before. https://github.com/restic/restic/pull/2598 * Enhancement #2599: Slightly reduce memory usage of prune and stats commands - The prune and the stats command kept directory identifiers in memory twice while searching for - used blobs. + The prune and the stats command kept directory identifiers in memory twice while + searching for used blobs. https://github.com/restic/restic/pull/2599 * Enhancement #2733: S3 backend: Add support for WebIdentityTokenFile - We've added support for EKS IAM roles for service accounts feature to the S3 backend. + We've added support for EKS IAM roles for service accounts feature to the S3 + backend. https://github.com/restic/restic/issues/2703 https://github.com/restic/restic/pull/2733 * Enhancement #2773: Optimize handling of new index entries - Restic now uses less memory for backups which add a lot of data, e.g. large initial backups. In - addition, we've improved the stability in some edge cases. + Restic now uses less memory for backups which add a lot of data, e.g. large + initial backups. In addition, we've improved the stability in some edge cases. https://github.com/restic/restic/pull/2773 * Enhancement #2781: Reduce memory consumption of in-memory index - We've improved how the index is stored in memory. This change can reduce memory usage for large - repositories by up to 50% (depending on the operation). + We've improved how the index is stored in memory. This change can reduce memory + usage for large repositories by up to 50% (depending on the operation). https://github.com/restic/restic/pull/2781 https://github.com/restic/restic/pull/2812 * Enhancement #2786: Optimize `list blobs` command - We've changed the implementation of `list blobs` which should be now a bit faster and consume - almost no memory even for large repositories. + We've changed the implementation of `list blobs` which should be now a bit + faster and consume almost no memory even for large repositories. https://github.com/restic/restic/pull/2786 * Enhancement #2790: Optimized file access in restic mount - Reading large (> 100GiB) files from restic mountpoints is now faster, and the speedup is - greater for larger files. + Reading large (> 100GiB) files from restic mountpoints is now faster, and the + speedup is greater for larger files. https://github.com/restic/restic/pull/2790 * Enhancement #2840: Speed-up file deletion in forget, prune and rebuild-index - We've sped up the file deletion for the commands forget, prune and rebuild-index, especially - for remote repositories. Deletion was sequential before and is now run in parallel. + We've sped up the file deletion for the commands forget, prune and + rebuild-index, especially for remote repositories. Deletion was sequential + before and is now run in parallel. https://github.com/restic/restic/pull/2840 * Enhancement #2858: Support filtering snapshots by tag and path in the stats command - We've added filtering snapshots by `--tag tagList` and by `--path path` to the `stats` - command. This includes filtering of only 'latest' snapshots or all snapshots in a repository. + We've added filtering snapshots by `--tag tagList` and by `--path path` to the + `stats` command. This includes filtering of only 'latest' snapshots or all + snapshots in a repository. https://github.com/restic/restic/issues/2858 https://github.com/restic/restic/pull/2859 @@ -3536,81 +4403,85 @@ restic users. The changes are ordered by importance. * Bugfix #2063: Allow absolute path for filename when backing up from stdin - When backing up from stdin, handle directory path for `--stdin-filename`. This can be used to - specify the full path for the backed-up file. + When backing up from stdin, handle directory path for `--stdin-filename`. This + can be used to specify the full path for the backed-up file. https://github.com/restic/restic/issues/2063 * Bugfix #2174: Save files with invalid timestamps - When restic reads invalid timestamps (year is before 0000 or after 9999) it refused to read and - archive the file. We've changed the behavior and will now save modified timestamps with the - year set to either 0000 or 9999, the rest of the timestamp stays the same, so the file will be saved - (albeit with a bogus timestamp). + When restic reads invalid timestamps (year is before 0000 or after 9999) it + refused to read and archive the file. We've changed the behavior and will now + save modified timestamps with the year set to either 0000 or 9999, the rest of + the timestamp stays the same, so the file will be saved (albeit with a bogus + timestamp). https://github.com/restic/restic/issues/2174 https://github.com/restic/restic/issues/1173 * Bugfix #2249: Read fresh metadata for unmodified files - Restic took all metadata for files which were detected as unmodified, not taking into account - changed metadata (ownership, mode). This is now corrected. + Restic took all metadata for files which were detected as unmodified, not taking + into account changed metadata (ownership, mode). This is now corrected. https://github.com/restic/restic/issues/2249 https://github.com/restic/restic/pull/2252 * Bugfix #2301: Add upper bound for t in --read-data-subset=n/t - 256 is the effective maximum for t, but restic would allow larger values, leading to strange - behavior. + 256 is the effective maximum for t, but restic would allow larger values, + leading to strange behavior. https://github.com/restic/restic/issues/2301 https://github.com/restic/restic/pull/2304 * Bugfix #2321: Check errors when loading index files - Restic now checks and handles errors which occur when loading index files, the missing check - leads to odd errors (and a stack trace printed to users) later. This was reported in the forum. + Restic now checks and handles errors which occur when loading index files, the + missing check leads to odd errors (and a stack trace printed to users) later. + This was reported in the forum. https://github.com/restic/restic/pull/2321 https://forum.restic.net/t/check-rebuild-index-prune/1848/13 * Enhancement #2179: Use ctime when checking for file changes - Previously, restic only checked a file's mtime (along with other non-timestamp metadata) to - decide if a file has changed. This could cause restic to not notice that a file has changed (and - therefore continue to store the old version, as opposed to the modified version) if something - edits the file and then resets the timestamp. Restic now also checks the ctime of files, so any - modifications to a file should be noticed, and the modified file will be backed up. The ctime - check will be disabled if the --ignore-inode flag was given. + Previously, restic only checked a file's mtime (along with other non-timestamp + metadata) to decide if a file has changed. This could cause restic to not notice + that a file has changed (and therefore continue to store the old version, as + opposed to the modified version) if something edits the file and then resets the + timestamp. Restic now also checks the ctime of files, so any modifications to a + file should be noticed, and the modified file will be backed up. The ctime check + will be disabled if the --ignore-inode flag was given. - If this change causes problems for you, please open an issue, and we can look in to adding a - seperate flag to disable just the ctime check. + If this change causes problems for you, please open an issue, and we can look in + to adding a separate flag to disable just the ctime check. https://github.com/restic/restic/issues/2179 https://github.com/restic/restic/pull/2212 * Enhancement #2306: Allow multiple retries for interactive password input - Restic used to quit if the repository password was typed incorrectly once. Restic will now ask - the user again for the repository password if typed incorrectly. The user will now get three - tries to input the correct password before restic quits. + Restic used to quit if the repository password was typed incorrectly once. + Restic will now ask the user again for the repository password if typed + incorrectly. The user will now get three tries to input the correct password + before restic quits. https://github.com/restic/restic/issues/2306 * Enhancement #2330: Make `--group-by` accept both singular and plural - One can now use the values `host`/`hosts`, `path`/`paths` and `tag` / `tags` interchangeably - in the `--group-by` argument. + One can now use the values `host`/`hosts`, `path`/`paths` and `tag` / `tags` + interchangeably in the `--group-by` argument. https://github.com/restic/restic/issues/2330 * Enhancement #2350: Add option to configure S3 region - We've added a new option for setting the region when accessing an S3-compatible service. For - some providers, it is required to set this to a valid value. You can do that either by setting the - environment variable `AWS_DEFAULT_REGION` or using the option `s3.region`, e.g. like this: - `-o s3.region="us-east-1"`. + We've added a new option for setting the region when accessing an S3-compatible + service. For some providers, it is required to set this to a valid value. You + can do that either by setting the environment variable `AWS_DEFAULT_REGION` or + using the option `s3.region`, e.g. like this: `-o s3.region="us-east-1"`. https://github.com/restic/restic/pull/2350 @@ -3639,10 +4510,11 @@ restic users. The changes are ordered by importance. * Bugfix #2135: Return error when no bytes could be read from stdin - We assume that users reading backup data from stdin want to know when no data could be read, so now - restic returns an error when `backup --stdin` is called but no bytes could be read. Usually, - this means that an earlier command in a pipe has failed. The documentation was amended and now - recommends setting the `pipefail` option (`set -o pipefail`). + We assume that users reading backup data from stdin want to know when no data + could be read, so now restic returns an error when `backup --stdin` is called + but no bytes could be read. Usually, this means that an earlier command in a + pipe has failed. The documentation was amended and now recommends setting the + `pipefail` option (`set -o pipefail`). https://github.com/restic/restic/pull/2135 https://github.com/restic/restic/pull/2139 @@ -3653,84 +4525,88 @@ restic users. The changes are ordered by importance. * Bugfix #2203: Fix reading passwords from stdin - Passwords for the `init`, `key add`, and `key passwd` commands can now be read from - non-terminal stdin. + Passwords for the `init`, `key add`, and `key passwd` commands can now be read + from non-terminal stdin. https://github.com/restic/restic/issues/2203 * Bugfix #2224: Don't abort the find command when a tree can't be loaded - Change the find command so that missing trees don't result in a crash. Instead, the error is - logged to the debug log, and the tree ID is displayed along with the snapshot it belongs to. This - makes it possible to recover repositories that are missing trees by forgetting the snapshots - they are used in. + Change the find command so that missing trees don't result in a crash. Instead, + the error is logged to the debug log, and the tree ID is displayed along with + the snapshot it belongs to. This makes it possible to recover repositories that + are missing trees by forgetting the snapshots they are used in. https://github.com/restic/restic/issues/2224 * Enhancement #1895: Add case insensitive include & exclude options - The backup and restore commands now have --iexclude and --iinclude flags as case insensitive - variants of --exclude and --include. + The backup and restore commands now have --iexclude and --iinclude flags as case + insensitive variants of --exclude and --include. https://github.com/restic/restic/issues/1895 https://github.com/restic/restic/pull/2032 * Enhancement #1937: Support streaming JSON output for backup - We've added support for getting machine-readable status output during backup, just pass the - flag `--json` for `restic backup` and restic will output a stream of JSON objects which contain - the current progress. + We've added support for getting machine-readable status output during backup, + just pass the flag `--json` for `restic backup` and restic will output a stream + of JSON objects which contain the current progress. https://github.com/restic/restic/issues/1937 https://github.com/restic/restic/pull/1944 * Enhancement #2037: Add group-by option to snapshots command - We have added an option to group the output of the snapshots command, similar to the output of the - forget command. The option has been called "--group-by" and accepts any combination of the - values "host", "paths" and "tags", separated by commas. Default behavior (not specifying - --group-by) has not been changed. We have added support of the grouping to the JSON output. + We have added an option to group the output of the snapshots command, similar to + the output of the forget command. The option has been called "--group-by" and + accepts any combination of the values "host", "paths" and "tags", separated by + commas. Default behavior (not specifying --group-by) has not been changed. We + have added support of the grouping to the JSON output. https://github.com/restic/restic/issues/2037 https://github.com/restic/restic/pull/2087 * Enhancement #2124: Ability to dump folders to tar via stdout - We've added the ability to dump whole folders to stdout via the `dump` command. Restic now - requires at least Go 1.10 due to a limitation of the standard library for Go <= 1.9. + We've added the ability to dump whole folders to stdout via the `dump` command. + Restic now requires at least Go 1.10 due to a limitation of the standard library + for Go <= 1.9. https://github.com/restic/restic/issues/2123 https://github.com/restic/restic/pull/2124 * Enhancement #2139: Return error if no bytes could be read for `backup --stdin` - When restic is used to backup the output of a program, like `mysqldump | restic backup --stdin`, - it now returns an error if no bytes could be read at all. This catches the failure case when - `mysqldump` failed for some reason and did not output any data to stdout. + When restic is used to backup the output of a program, like `mysqldump | restic + backup --stdin`, it now returns an error if no bytes could be read at all. This + catches the failure case when `mysqldump` failed for some reason and did not + output any data to stdout. https://github.com/restic/restic/pull/2139 * Enhancement #2155: Add Openstack application credential auth for Swift - Since Openstack Queens Identity (auth V3) service supports an application credential auth - method. It allows to create a technical account with the limited roles. This commit adds an - application credential authentication method for the Swift backend. + Since Openstack Queens Identity (auth V3) service supports an application + credential auth method. It allows to create a technical account with the limited + roles. This commit adds an application credential authentication method for the + Swift backend. https://github.com/restic/restic/issues/2155 * Enhancement #2184: Add --json support to forget command - The forget command now supports the --json argument, outputting the information about what is - (or would-be) kept and removed from the repository. + The forget command now supports the --json argument, outputting the information + about what is (or would-be) kept and removed from the repository. https://github.com/restic/restic/issues/2184 https://github.com/restic/restic/pull/2185 * Enhancement #2205: Add --ignore-inode option to backup cmd - This option handles backup of virtual filesystems that do not keep fixed inodes for files, like - Fuse-based, pCloud, etc. Ignoring inode changes allows to consider the file as unchanged if - last modification date and size are unchanged. + This option handles backup of virtual filesystems that do not keep fixed inodes + for files, like Fuse-based, pCloud, etc. Ignoring inode changes allows to + consider the file as unchanged if last modification date and size are unchanged. https://github.com/restic/restic/issues/1631 https://github.com/restic/restic/pull/2205 @@ -3738,16 +4614,17 @@ restic users. The changes are ordered by importance. * Enhancement #2220: Add config option to set S3 storage class - The `s3.storage-class` option can be passed to restic (using `-o`) to specify the storage - class to be used for S3 objects created by restic. + The `s3.storage-class` option can be passed to restic (using `-o`) to specify + the storage class to be used for S3 objects created by restic. - The storage class is passed as-is to S3, so it needs to be understood by the API. On AWS, it can be - one of `STANDARD`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING` and - `REDUCED_REDUNDANCY`. If unspecified, the default storage class is used (`STANDARD` on - AWS). + The storage class is passed as-is to S3, so it needs to be understood by the + API. On AWS, it can be one of `STANDARD`, `STANDARD_IA`, `ONEZONE_IA`, + `INTELLIGENT_TIERING` and `REDUCED_REDUNDANCY`. If unspecified, the default + storage class is used (`STANDARD` on AWS). - You can mix storage classes in the same bucket, and the setting isn't stored in the restic - repository, so be sure to specify it with each command that writes to S3. + You can mix storage classes in the same bucket, and the setting isn't stored in + the restic repository, so be sure to specify it with each command that writes to + S3. https://github.com/restic/restic/issues/706 https://github.com/restic/restic/pull/2220 @@ -3775,19 +4652,19 @@ restic users. The changes are ordered by importance. * Bugfix #1989: Google Cloud Storage: Respect bandwidth limit - The GCS backend did not respect the bandwidth limit configured, a previous commit - accidentally removed support for it. + The GCS backend did not respect the bandwidth limit configured, a previous + commit accidentally removed support for it. https://github.com/restic/restic/issues/1989 https://github.com/restic/restic/pull/2100 * Bugfix #2040: Add host name filter shorthand flag for `stats` command - The default value for `--host` flag was set to 'H' (the shorthand version of the flag), this - caused the lookup for the latest snapshot to fail. + The default value for `--host` flag was set to 'H' (the shorthand version of the + flag), this caused the lookup for the latest snapshot to fail. - Add shorthand flag `-H` for `--host` (with empty default so if these flags are not specified the - latest snapshot will not filter by host name). + Add shorthand flag `-H` for `--host` (with empty default so if these flags are + not specified the latest snapshot will not filter by host name). Also add shorthand `-H` for `backup` command. @@ -3795,17 +4672,17 @@ restic users. The changes are ordered by importance. * Bugfix #2068: Correctly return error loading data - In one case during `prune` and `check`, an error loading data from the backend is not returned - properly. This is now corrected. + In one case during `prune` and `check`, an error loading data from the backend + is not returned properly. This is now corrected. https://github.com/restic/restic/issues/1999#issuecomment-433737921 https://github.com/restic/restic/pull/2068 * Bugfix #2095: Consistently use local time for snapshots times - By default snapshots created with restic backup were set to local time, but when the --time flag - was used the provided timestamp was parsed as UTC. With this change all snapshots times are set - to local time. + By default snapshots created with restic backup were set to local time, but when + the --time flag was used the provided timestamp was parsed as UTC. With this + change all snapshots times are set to local time. https://github.com/restic/restic/pull/2095 @@ -3814,65 +4691,70 @@ restic users. The changes are ordered by importance. This change significantly improves restore performance, especially when using high-latency remote repositories like B2. - The implementation now uses several concurrent threads to download and process multiple - remote files concurrently. To further reduce restore time, each remote file is downloaded - using a single repository request. + The implementation now uses several concurrent threads to download and process + multiple remote files concurrently. To further reduce restore time, each remote + file is downloaded using a single repository request. https://github.com/restic/restic/issues/1605 https://github.com/restic/restic/pull/1719 * Enhancement #2017: Mount: Enforce FUSE Unix permissions with allow-other - The fuse mount (`restic mount`) now lets the kernel check the permissions of the files within - snapshots (this is done through the `DefaultPermissions` FUSE option) when the option - `--allow-other` is specified. + The fuse mount (`restic mount`) now lets the kernel check the permissions of the + files within snapshots (this is done through the `DefaultPermissions` FUSE + option) when the option `--allow-other` is specified. - To restore the old behavior, we've added the `--no-default-permissions` option. This allows - all users that have access to the mount point to access all files within the snapshots. + To restore the old behavior, we've added the `--no-default-permissions` option. + This allows all users that have access to the mount point to access all files + within the snapshots. https://github.com/restic/restic/pull/2017 * Enhancement #2070: Make all commands display timestamps in local time - Restic used to drop the timezone information from displayed timestamps, it now converts - timestamps to local time before printing them so the times can be easily compared to. + Restic used to drop the timezone information from displayed timestamps, it now + converts timestamps to local time before printing them so the times can be + easily compared to. https://github.com/restic/restic/pull/2070 * Enhancement #2085: Allow --files-from to be specified multiple times - Before, restic took only the last file specified with `--files-from` into account, this is now - corrected. + Before, restic took only the last file specified with `--files-from` into + account, this is now corrected. https://github.com/restic/restic/issues/2085 https://github.com/restic/restic/pull/2086 * Enhancement #2089: Increase granularity of the "keep within" retention policy - The `keep-within` option of the `forget` command now accepts time ranges with an hourly - granularity. For example, running `restic forget --keep-within 3d12h` will keep all the - snapshots made within three days and twelve hours from the time of the latest snapshot. + The `keep-within` option of the `forget` command now accepts time ranges with an + hourly granularity. For example, running `restic forget --keep-within 3d12h` + will keep all the snapshots made within three days and twelve hours from the + time of the latest snapshot. https://github.com/restic/restic/issues/2089 https://github.com/restic/restic/pull/2090 * Enhancement #2094: Run command to get password - We've added the `--password-command` option which allows specifying a command that restic - runs every time the password for the repository is needed, so it can be integrated with a - password manager or keyring. The option can also be set via the environment variable - `$RESTIC_PASSWORD_COMMAND`. + We've added the `--password-command` option which allows specifying a command + that restic runs every time the password for the repository is needed, so it can + be integrated with a password manager or keyring. The option can also be set via + the environment variable `$RESTIC_PASSWORD_COMMAND`. https://github.com/restic/restic/pull/2094 * Enhancement #2097: Add key hinting - Added a new option `--key-hint` and corresponding environment variable `RESTIC_KEY_HINT`. - The key hint is a key ID to try decrypting first, before other keys in the repository. + Added a new option `--key-hint` and corresponding environment variable + `RESTIC_KEY_HINT`. The key hint is a key ID to try decrypting first, before + other keys in the repository. - This change will benefit repositories with many keys; if the correct key hint is supplied then - restic only needs to check one key. If the key hint is incorrect (the key does not exist, or the - password is incorrect) then restic will check all keys, as usual. + This change will benefit repositories with many keys; if the correct key hint is + supplied then restic only needs to check one key. If the key hint is incorrect + (the key does not exist, or the password is incorrect) then restic will check + all keys, as usual. https://github.com/restic/restic/issues/2097 @@ -3902,29 +4784,31 @@ restic users. The changes are ordered by importance. * Bugfix #1935: Remove truncated files from cache - When a file in the local cache is truncated, and restic tries to access data beyond the end of the - (cached) file, it used to return an error "EOF". This is now fixed, such truncated files are - removed and the data is fetched directly from the backend. + When a file in the local cache is truncated, and restic tries to access data + beyond the end of the (cached) file, it used to return an error "EOF". This is + now fixed, such truncated files are removed and the data is fetched directly + from the backend. https://github.com/restic/restic/issues/1935 * Bugfix #1978: Do not return an error when the scanner is slower than backup - When restic makes a backup, there's a background task called "scanner" which collects - information on how many files and directories are to be saved, in order to display progress - information to the user. When the backup finishes faster than the scanner, it is aborted - because the result is not needed any more. This logic contained a bug, where quitting the - scanner process was treated as an error, and caused restic to print an unhelpful error message - ("context canceled"). + When restic makes a backup, there's a background task called "scanner" which + collects information on how many files and directories are to be saved, in order + to display progress information to the user. When the backup finishes faster + than the scanner, it is aborted because the result is not needed any more. This + logic contained a bug, where quitting the scanner process was treated as an + error, and caused restic to print an unhelpful error message ("context + canceled"). https://github.com/restic/restic/issues/1978 https://github.com/restic/restic/pull/1991 * Enhancement #1766: Restore: suppress lchown errors when not running as root - Like "cp" and "rsync" do, restic now only reports errors for changing the ownership of files - during restore if it is run as root, on non-Windows operating systems. On Windows, the error - is reported as usual. + Like "cp" and "rsync" do, restic now only reports errors for changing the + ownership of files during restore if it is run as root, on non-Windows + operating systems. On Windows, the error is reported as usual. https://github.com/restic/restic/issues/1766 @@ -3932,113 +4816,118 @@ restic users. The changes are ordered by importance. We've updated the `find` command to support multiple patterns. - `restic find` is now able to list the snapshots containing a specific tree or blob, or even the - snapshots that contain blobs belonging to a given pack. A list of IDs can be given, as long as they - all have the same type. + `restic find` is now able to list the snapshots containing a specific tree or + blob, or even the snapshots that contain blobs belonging to a given pack. A list + of IDs can be given, as long as they all have the same type. - The command `find` can also display the pack IDs the blobs belong to, if the `--show-pack-id` - flag is provided. + The command `find` can also display the pack IDs the blobs belong to, if the + `--show-pack-id` flag is provided. https://github.com/restic/restic/issues/1777 https://github.com/restic/restic/pull/1780 * Enhancement #1876: Display reason why forget keeps snapshots - We've added a column to the list of snapshots `forget` keeps which details the reasons to keep a - particuliar snapshot. This makes debugging policies for forget much easier. Please remember - to always try things out with `--dry-run`! + We've added a column to the list of snapshots `forget` keeps which details the + reasons to keep a particular snapshot. This makes debugging policies for forget + much easier. Please remember to always try things out with `--dry-run`! https://github.com/restic/restic/pull/1876 * Enhancement #1891: Accept glob in paths loaded via --files-from - Before that, behaviour was different if paths were appended to command line or from a file, - because wild card characters were expanded by shell if appended to command line, but not - expanded if loaded from file. + Before that, behaviour was different if paths were appended to command line or + from a file, because wild card characters were expanded by shell if appended to + command line, but not expanded if loaded from file. https://github.com/restic/restic/issues/1891 * Enhancement #1909: Reject files/dirs by name first - The current scanner/archiver code had an architectural limitation: it always ran the - `lstat()` system call on all files and directories before a decision to include/exclude the - file/dir was made. This lead to a lot of unnecessary system calls for items that could have been - rejected by their name or path only. + The current scanner/archiver code had an architectural limitation: it always ran + the `lstat()` system call on all files and directories before a decision to + include/exclude the file/dir was made. This lead to a lot of unnecessary system + calls for items that could have been rejected by their name or path only. - We've changed the archiver/scanner implementation so that it now first rejects by name/path, - and only runs the system call on the remaining items. This reduces the number of `lstat()` - system calls a lot (depending on the exclude settings). + We've changed the archiver/scanner implementation so that it now first rejects + by name/path, and only runs the system call on the remaining items. This reduces + the number of `lstat()` system calls a lot (depending on the exclude settings). https://github.com/restic/restic/issues/1909 https://github.com/restic/restic/pull/1912 * Enhancement #1920: Vendor dependencies with Go 1.11 Modules - Until now, we've used `dep` for managing dependencies, we've now switch to using Go modules. - For users this does not change much, only if you want to compile restic without downloading - anything with Go 1.11, then you need to run: `go build -mod=vendor build.go` + Until now, we've used `dep` for managing dependencies, we've now switch to using + Go modules. For users this does not change much, only if you want to compile + restic without downloading anything with Go 1.11, then you need to run: `go + build -mod=vendor build.go` https://github.com/restic/restic/pull/1920 * Enhancement #1940: Add directory filter to ls command - The ls command can now be filtered by directories, so that only files in the given directories - will be shown. If the --recursive flag is specified, then ls will traverse subfolders and list - their files as well. + The ls command can now be filtered by directories, so that only files in the + given directories will be shown. If the --recursive flag is specified, then ls + will traverse subfolders and list their files as well. - It used to be possible to specify multiple snapshots, but that has been replaced by only one - snapshot and the possibility of specifying multiple directories. + It used to be possible to specify multiple snapshots, but that has been replaced + by only one snapshot and the possibility of specifying multiple directories. - Specifying directories constrains the walk, which can significantly speed up the listing. + Specifying directories constrains the walk, which can significantly speed up the + listing. https://github.com/restic/restic/issues/1940 https://github.com/restic/restic/pull/1941 * Enhancement #1949: Add new command `self-update` - We have added a new command called `self-update` which downloads the latest released version - of restic from GitHub and replaces the current binary with it. It does not rely on any external - program (so it'll work everywhere), but still verifies the GPG signature using the embedded - GPG public key. + We have added a new command called `self-update` which downloads the latest + released version of restic from GitHub and replaces the current binary with it. + It does not rely on any external program (so it'll work everywhere), but still + verifies the GPG signature using the embedded GPG public key. - By default, the `self-update` command is hidden behind the `selfupdate` built tag, which is - only set when restic is built using `build.go` (including official releases). The reason for - this is that downstream distributions will then not include the command by default, so users - are encouraged to use the platform-specific distribution mechanism. + By default, the `self-update` command is hidden behind the `selfupdate` built + tag, which is only set when restic is built using `build.go` (including official + releases). The reason for this is that downstream distributions will then not + include the command by default, so users are encouraged to use the + platform-specific distribution mechanism. https://github.com/restic/restic/pull/1949 * Enhancement #1953: Ls: Add JSON output support for restic ls cmd - We've implemented listing files in the repository with JSON as output, just pass `--json` as an - option to `restic ls`. This makes the output of the command machine readable. + We've implemented listing files in the repository with JSON as output, just pass + `--json` as an option to `restic ls`. This makes the output of the command + machine readable. https://github.com/restic/restic/pull/1953 * Enhancement #1962: Stream JSON output for ls command - The `ls` command now supports JSON output with the global `--json` flag, and this change - streams out JSON messages one object at a time rather than en entire array buffered in memory - before encoding. The advantage is it allows large listings to be handled efficiently. + The `ls` command now supports JSON output with the global `--json` flag, and + this change streams out JSON messages one object at a time rather than en entire + array buffered in memory before encoding. The advantage is it allows large + listings to be handled efficiently. - Two message types are printed: snapshots and nodes. A snapshot object will precede node - objects which belong to that snapshot. The `struct_type` field can be used to determine which - kind of message an object is. + Two message types are printed: snapshots and nodes. A snapshot object will + precede node objects which belong to that snapshot. The `struct_type` field can + be used to determine which kind of message an object is. https://github.com/restic/restic/pull/1962 * Enhancement #1967: Use `--host` everywhere - We now use the flag `--host` for all commands which need a host name, using `--hostname` (e.g. - for `restic backup`) still works, but will print a deprecation warning. Also, add the short - option `-H` where possible. + We now use the flag `--host` for all commands which need a host name, using + `--hostname` (e.g. for `restic backup`) still works, but will print a + deprecation warning. Also, add the short option `-H` where possible. https://github.com/restic/restic/issues/1967 * Enhancement #2028: Display size of cache directories - The `cache` command now by default shows the size of the individual cache directories. It can be - disabled with `--no-size`. + The `cache` command now by default shows the size of the individual cache + directories. It can be disabled with `--no-size`. https://github.com/restic/restic/issues/2028 https://github.com/restic/restic/pull/2033 @@ -4066,23 +4955,25 @@ restic users. The changes are ordered by importance. * Bugfix #1854: Allow saving files/dirs on different fs with `--one-file-system` - Restic now allows saving files/dirs on a different file system in a subdir correctly even when - `--one-file-system` is specified. + Restic now allows saving files/dirs on a different file system in a subdir + correctly even when `--one-file-system` is specified. The first thing the restic archiver code does is to build a tree of the target - files/directories. If it detects that a parent directory is already included (e.g. `restic - backup /foo /foo/bar/baz`), it'll ignore the latter argument. + files/directories. If it detects that a parent directory is already included + (e.g. `restic backup /foo /foo/bar/baz`), it'll ignore the latter argument. - Without `--one-file-system`, that's perfectly valid: If `/foo` is to be archived, it will - include `/foo/bar/baz`. But with `--one-file-system`, `/foo/bar/baz` may reside on a - different file system, so it won't be included with `/foo`. + Without `--one-file-system`, that's perfectly valid: If `/foo` is to be + archived, it will include `/foo/bar/baz`. But with `--one-file-system`, + `/foo/bar/baz` may reside on a different file system, so it won't be included + with `/foo`. https://github.com/restic/restic/issues/1854 https://github.com/restic/restic/pull/1855 * Bugfix #1861: Fix case-insensitive search with restic find - We've fixed the behavior for `restic find -i PATTERN`, which was broken in v0.9.1. + We've fixed the behavior for `restic find -i PATTERN`, which was broken in + v0.9.1. https://github.com/restic/restic/pull/1861 @@ -4095,21 +4986,22 @@ restic users. The changes are ordered by importance. * Bugfix #1880: Use `--cache-dir` argument for `check` command - `check` command now uses a temporary sub-directory of the specified directory if set using the - `--cache-dir` argument. If not set, the cache directory is created in the default temporary - directory as before. In either case a temporary cache is used to ensure the actual repository is - checked (rather than a local copy). + `check` command now uses a temporary sub-directory of the specified directory if + set using the `--cache-dir` argument. If not set, the cache directory is created + in the default temporary directory as before. In either case a temporary cache + is used to ensure the actual repository is checked (rather than a local copy). - The `--cache-dir` argument was not used by the `check` command, instead a cache directory was - created in the temporary directory. + The `--cache-dir` argument was not used by the `check` command, instead a cache + directory was created in the temporary directory. https://github.com/restic/restic/issues/1880 * Bugfix #1893: Return error when exclude file cannot be read - A bug was found: when multiple exclude files were passed to restic and one of them could not be - read, an error was printed and restic continued, ignoring even the existing exclude files. - Now, an error message is printed and restic aborts when an exclude file cannot be read. + A bug was found: when multiple exclude files were passed to restic and one of + them could not be read, an error was printed and restic continued, ignoring even + the existing exclude files. Now, an error message is printed and restic aborts + when an exclude file cannot be read. https://github.com/restic/restic/issues/1893 @@ -4120,9 +5012,9 @@ restic users. The changes are ordered by importance. * Enhancement #1477: S3 backend: accept AWS_SESSION_TOKEN - Before, it was not possible to use s3 backend with AWS temporary security credentials(with - AWS_SESSION_TOKEN). This change gives higher priority to credentials.EnvAWS credentials - provider. + Before, it was not possible to use s3 backend with AWS temporary security + credentials(with AWS_SESSION_TOKEN). This change gives higher priority to + credentials.EnvAWS credentials provider. https://github.com/restic/restic/issues/1477 https://github.com/restic/restic/pull/1479 @@ -4130,33 +5022,33 @@ restic users. The changes are ordered by importance. * Enhancement #1772: Add restore --verify to verify restored file content - Restore will print error message if restored file content does not match expected SHA256 - checksum + Restore will print error message if restored file content does not match + expected SHA256 checksum https://github.com/restic/restic/pull/1772 * Enhancement #1853: Add JSON output support to `restic key list` - This PR enables users to get the output of `restic key list` in JSON in addition to the existing - table format. + This PR enables users to get the output of `restic key list` in JSON in addition + to the existing table format. https://github.com/restic/restic/pull/1853 * Enhancement #1901: Update the Backblaze B2 library - We've updated the library we're using for accessing the Backblaze B2 service to 0.5.0 to - include support for upcoming so-called "application keys". With this feature, you can create - access credentials for B2 which are restricted to e.g. a single bucket or even a sub-directory - of a bucket. + We've updated the library we're using for accessing the Backblaze B2 service to + 0.5.0 to include support for upcoming so-called "application keys". With this + feature, you can create access credentials for B2 which are restricted to e.g. a + single bucket or even a sub-directory of a bucket. https://github.com/restic/restic/pull/1901 https://github.com/kurin/blazer * Enhancement #1906: Add support for B2 application keys - Restic can now use so-called "application keys" which can be created in the B2 dashboard and - were only introduced recently. In contrast to the "master key", such keys can be restricted to a - specific bucket and/or path. + Restic can now use so-called "application keys" which can be created in the B2 + dashboard and were only introduced recently. In contrast to the "master key", + such keys can be restricted to a specific bucket and/or path. https://github.com/restic/restic/issues/1906 https://github.com/restic/restic/pull/1914 @@ -4178,48 +5070,51 @@ restic users. The changes are ordered by importance. * Bugfix #1801: Add limiting bandwidth to the rclone backend - The rclone backend did not respect `--limit-upload` or `--limit-download`. Oftentimes it's - not necessary to use this, as the limiting in rclone itself should be used because it gives much - better results, but in case a remote instance of rclone is used (e.g. called via ssh), it is still - relevant to limit the bandwidth from restic to rclone. + The rclone backend did not respect `--limit-upload` or `--limit-download`. + Oftentimes it's not necessary to use this, as the limiting in rclone itself + should be used because it gives much better results, but in case a remote + instance of rclone is used (e.g. called via ssh), it is still relevant to limit + the bandwidth from restic to rclone. https://github.com/restic/restic/issues/1801 * Bugfix #1822: Allow uploading large files to MS Azure - Sometimes, restic creates files to be uploaded to the repository which are quite large, e.g. - when saving directories with many entries or very large files. The MS Azure API does not allow - uploading files larger that 256MiB directly, rather restic needs to upload them in blocks of - 100MiB. This is now implemented. + Sometimes, restic creates files to be uploaded to the repository which are quite + large, e.g. when saving directories with many entries or very large files. The + MS Azure API does not allow uploading files larger that 256MiB directly, rather + restic needs to upload them in blocks of 100MiB. This is now implemented. https://github.com/restic/restic/issues/1822 * Bugfix #1825: Correct `find` to not skip snapshots - Under certain circumstances, the `find` command was found to skip snapshots containing - directories with files to look for when the directories haven't been modified at all, and were - already printed as part of a different snapshot. This is now corrected. + Under certain circumstances, the `find` command was found to skip snapshots + containing directories with files to look for when the directories haven't been + modified at all, and were already printed as part of a different snapshot. This + is now corrected. - In addition, we've switched to our own matching/pattern implementation, so now things like - `restic find "/home/user/foo/**/main.go"` are possible. + In addition, we've switched to our own matching/pattern implementation, so now + things like `restic find "/home/user/foo/**/main.go"` are possible. https://github.com/restic/restic/issues/1825 https://github.com/restic/restic/issues/1823 * Bugfix #1833: Fix caching files on error - During `check` it may happen that different threads access the same file in the backend, which - is then downloaded into the cache only once. When that fails, only the thread which is - responsible for downloading the file signals the correct error. The other threads just assume - that the file has been downloaded successfully and then get an error when they try to access the - cached file. + During `check` it may happen that different threads access the same file in the + backend, which is then downloaded into the cache only once. When that fails, + only the thread which is responsible for downloading the file signals the + correct error. The other threads just assume that the file has been downloaded + successfully and then get an error when they try to access the cached file. https://github.com/restic/restic/issues/1833 * Bugfix #1834: Resolve deadlock - When the "scanning" process restic runs to find out how much data there is does not finish before - the backup itself is done, restic stops doing anything. This is resolved now. + When the "scanning" process restic runs to find out how much data there is does + not finish before the backup itself is done, restic stops doing anything. This + is resolved now. https://github.com/restic/restic/issues/1834 https://github.com/restic/restic/pull/1835 @@ -4247,7 +5142,7 @@ restic users. The changes are ordered by importance. * Enh #1665: Improve cache handling for `restic check` * Enh #1709: Improve messages `restic check` prints * Enh #1721: Add `cache` command to list cache dirs - * Enh #1735: Allow keeping a time range of snaphots + * Enh #1735: Allow keeping a time range of snapshots * Enh #1758: Allow saving OneDrive folders in Windows * Enh #1782: Use default AWS credentials chain for S3 backend @@ -4255,77 +5150,81 @@ restic users. The changes are ordered by importance. * Bugfix #1608: Respect time stamp for new backup when reading from stdin - When reading backups from stdin (via `restic backup --stdin`), restic now uses the time stamp - for the new backup passed in `--time`. + When reading backups from stdin (via `restic backup --stdin`), restic now uses + the time stamp for the new backup passed in `--time`. https://github.com/restic/restic/issues/1608 https://github.com/restic/restic/pull/1703 * Bugfix #1652: Ignore/remove invalid lock files - This corrects a bug introduced recently: When an invalid lock file in the repo is encountered - (e.g. if the file is empty), the code used to ignore that, but now returns the error. Now, invalid - files are ignored for the normal lock check, and removed when `restic unlock --remove-all` is - run. + This corrects a bug introduced recently: When an invalid lock file in the repo + is encountered (e.g. if the file is empty), the code used to ignore that, but + now returns the error. Now, invalid files are ignored for the normal lock check, + and removed when `restic unlock --remove-all` is run. https://github.com/restic/restic/issues/1652 https://github.com/restic/restic/pull/1653 * Bugfix #1684: Fix backend tests for rest-server - The REST server for restic now requires an explicit parameter (`--no-auth`) if no - authentication should be allowed. This is fixed in the tests. + The REST server for restic now requires an explicit parameter (`--no-auth`) if + no authentication should be allowed. This is fixed in the tests. https://github.com/restic/restic/pull/1684 * Bugfix #1730: Ignore sockets for restore - We've received a report and correct the behavior in which the restore code aborted restoring a - directory when a socket was encountered. Unix domain socket files cannot be restored (they are - created on the fly once a process starts listening). The error handling was corrected, and in - addition we're now ignoring sockets during restore. + We've received a report and correct the behavior in which the restore code + aborted restoring a directory when a socket was encountered. Unix domain socket + files cannot be restored (they are created on the fly once a process starts + listening). The error handling was corrected, and in addition we're now ignoring + sockets during restore. https://github.com/restic/restic/issues/1730 https://github.com/restic/restic/pull/1731 * Bugfix #1745: Correctly parse the argument to --tls-client-cert - Previously, the --tls-client-cert method attempt to read ARGV[1] (hardcoded) instead of the - argument that was passed to it. This has been corrected. + Previously, the --tls-client-cert method attempt to read ARGV[1] (hardcoded) + instead of the argument that was passed to it. This has been corrected. https://github.com/restic/restic/issues/1745 https://github.com/restic/restic/pull/1746 * Enhancement #549: Rework archiver code - The core archiver code and the complementary code for the `backup` command was rewritten - completely. This resolves very annoying issues such as 549. The first backup with this release - of restic will likely result in all files being re-read locally, so it will take a lot longer. The - next backup after that will be fast again. - - Basically, with the old code, restic took the last path component of each to-be-saved file or - directory as the top-level file/directory within the snapshot. This meant that when called as - `restic backup /home/user/foo`, the snapshot would contain the files in the directory - `/home/user/foo` as `/foo`. - - This is not the case any more with the new archiver code. Now, restic works very similar to what - `tar` does: When restic is called with an absolute path to save, then it'll preserve the - directory structure within the snapshot. For the example above, the snapshot would contain - the files in the directory within `/home/user/foo` in the snapshot. For relative - directories, it only preserves the relative path components. So `restic backup user/foo` - will save the files as `/user/foo` in the snapshot. - - While we were at it, the status display and notification system was completely rewritten. By - default, restic now shows which files are currently read (unless `--quiet` is specified) in a - multi-line status display. - - The `backup` command also gained a new option: `--verbose`. It can be specified once (which - prints a bit more detail what restic is doing) or twice (which prints a line for each - file/directory restic encountered, together with some statistics). - - Another issue that was resolved is the new code only reads two files at most. The old code would - read way too many files in parallel, thereby slowing down the backup process on spinning discs a - lot. + The core archiver code and the complementary code for the `backup` command was + rewritten completely. This resolves very annoying issues such as 549. The first + backup with this release of restic will likely result in all files being re-read + locally, so it will take a lot longer. The next backup after that will be fast + again. + + Basically, with the old code, restic took the last path component of each + to-be-saved file or directory as the top-level file/directory within the + snapshot. This meant that when called as `restic backup /home/user/foo`, the + snapshot would contain the files in the directory `/home/user/foo` as `/foo`. + + This is not the case any more with the new archiver code. Now, restic works very + similar to what `tar` does: When restic is called with an absolute path to save, + then it'll preserve the directory structure within the snapshot. For the example + above, the snapshot would contain the files in the directory within + `/home/user/foo` in the snapshot. For relative directories, it only preserves + the relative path components. So `restic backup user/foo` will save the files as + `/user/foo` in the snapshot. + + While we were at it, the status display and notification system was completely + rewritten. By default, restic now shows which files are currently read (unless + `--quiet` is specified) in a multi-line status display. + + The `backup` command also gained a new option: `--verbose`. It can be specified + once (which prints a bit more detail what restic is doing) or twice (which + prints a line for each file/directory restic encountered, together with some + statistics). + + Another issue that was resolved is the new code only reads two files at most. + The old code would read way too many files in parallel, thereby slowing down the + backup process on spinning discs a lot. https://github.com/restic/restic/issues/549 https://github.com/restic/restic/issues/1286 @@ -4347,11 +5246,11 @@ restic users. The changes are ordered by importance. * Enhancement #1433: Support UTF-16 encoding and process Byte Order Mark - On Windows, text editors commonly leave a Byte Order Mark at the beginning of the file to define - which encoding is used (oftentimes UTF-16). We've added code to support processing the BOMs in - text files, like the exclude files, the password file and the file passed via `--files-from`. - This does not apply to any file being saved in a backup, those are not touched and archived as they - are. + On Windows, text editors commonly leave a Byte Order Mark at the beginning of + the file to define which encoding is used (oftentimes UTF-16). We've added code + to support processing the BOMs in text files, like the exclude files, the + password file and the file passed via `--files-from`. This does not apply to any + file being saved in a backup, those are not touched and archived as they are. https://github.com/restic/restic/issues/1433 https://github.com/restic/restic/issues/1738 @@ -4359,9 +5258,9 @@ restic users. The changes are ordered by importance. * Enhancement #1477: Accept AWS_SESSION_TOKEN for the s3 backend - Before, it was not possible to use s3 backend with AWS temporary security credentials(with - AWS_SESSION_TOKEN). This change gives higher priority to credentials.EnvAWS credentials - provider. + Before, it was not possible to use s3 backend with AWS temporary security + credentials(with AWS_SESSION_TOKEN). This change gives higher priority to + credentials.EnvAWS credentials provider. https://github.com/restic/restic/issues/1477 https://github.com/restic/restic/pull/1479 @@ -4369,23 +5268,24 @@ restic users. The changes are ordered by importance. * Enhancement #1552: Use Google Application Default credentials - Google provide libraries to generate appropriate credentials with various fallback - sources. This change uses the library to generate our GCS client, which allows us to make use of - these extra methods. + Google provide libraries to generate appropriate credentials with various + fallback sources. This change uses the library to generate our GCS client, which + allows us to make use of these extra methods. - This should be backward compatible with previous restic behaviour while adding the - additional capabilities to auth from Google's internal metadata endpoints. For users - running restic in GCP this can make authentication far easier than it was before. + This should be backward compatible with previous restic behaviour while adding + the additional capabilities to auth from Google's internal metadata endpoints. + For users running restic in GCP this can make authentication far easier than it + was before. https://github.com/restic/restic/pull/1552 https://developers.google.com/identity/protocols/application-default-credentials * Enhancement #1561: Allow using rclone to access other services - We've added the ability to use rclone to store backup data on all backends that it supports. This - was done in collaboration with Nick, the author of rclone. You can now use it to first configure a - service, then restic manages the rest (starting and stopping rclone). For details, please see - the manual. + We've added the ability to use rclone to store backup data on all backends that + it supports. This was done in collaboration with Nick, the author of rclone. You + can now use it to first configure a service, then restic manages the rest + (starting and stopping rclone). For details, please see the manual. https://github.com/restic/restic/issues/1561 https://github.com/restic/restic/pull/1657 @@ -4393,9 +5293,9 @@ restic users. The changes are ordered by importance. * Enhancement #1648: Ignore AWS permission denied error when creating a repository - It's not possible to use s3 backend scoped to a subdirectory(with specific permissions). - Restic doesn't try to create repository in a subdirectory, when 'bucket exists' of parent - directory check fails due to permission issues. + It's not possible to use s3 backend scoped to a subdirectory(with specific + permissions). Restic doesn't try to create repository in a subdirectory, when + 'bucket exists' of parent directory check fails due to permission issues. https://github.com/restic/restic/pull/1648 @@ -4405,25 +5305,27 @@ restic users. The changes are ordered by importance. * Enhancement #1665: Improve cache handling for `restic check` - For safety reasons, restic does not use a local metadata cache for the `restic check` command, - so that data is loaded from the repository and restic can check it's in good condition. When the - cache is disabled, restic will fetch each tiny blob needed for checking the integrity using a - separate backend request. For non-local backends, that will take a long time, and depending on - the backend (e.g. B2) may also be much more expensive. + For safety reasons, restic does not use a local metadata cache for the `restic + check` command, so that data is loaded from the repository and restic can check + it's in good condition. When the cache is disabled, restic will fetch each tiny + blob needed for checking the integrity using a separate backend request. For + non-local backends, that will take a long time, and depending on the backend + (e.g. B2) may also be much more expensive. This PR adds a few commits which will change the behavior as follows: - * When `restic check` is called without any additional parameters, it will build a new cache in a - temporary directory, which is removed at the end of the check. This way, we'll get readahead for - metadata files (so restic will fetch the whole file when the first blob from the file is - requested), but all data is freshly fetched from the storage backend. This is the default - behavior and will work for almost all users. + * When `restic check` is called without any additional parameters, it will build + a new cache in a temporary directory, which is removed at the end of the check. + This way, we'll get readahead for metadata files (so restic will fetch the whole + file when the first blob from the file is requested), but all data is freshly + fetched from the storage backend. This is the default behavior and will work for + almost all users. - * When `restic check` is called with `--with-cache`, the default on-disc cache is used. This - behavior hasn't changed since the cache was introduced. + * When `restic check` is called with `--with-cache`, the default on-disc cache + is used. This behavior hasn't changed since the cache was introduced. - * When `--no-cache` is specified, restic falls back to the old behavior, and read all tiny blobs - in separate requests. + * When `--no-cache` is specified, restic falls back to the old behavior, and + read all tiny blobs in separate requests. https://github.com/restic/restic/issues/1665 https://github.com/restic/restic/issues/1694 @@ -4431,44 +5333,45 @@ restic users. The changes are ordered by importance. * Enhancement #1709: Improve messages `restic check` prints - Some messages `restic check` prints are not really errors, so from now on restic does not treat - them as errors any more and exits cleanly. + Some messages `restic check` prints are not really errors, so from now on restic + does not treat them as errors any more and exits cleanly. https://github.com/restic/restic/pull/1709 https://forum.restic.net/t/what-is-the-standard-procedure-to-follow-if-a-backup-or-restore-is-interrupted/571/2 * Enhancement #1721: Add `cache` command to list cache dirs - The command `cache` was added, it allows listing restic's cache directoriers together with - the last usage. It also allows removing old cache dirs without having to access a repo, via - `restic cache --cleanup` + The command `cache` was added, it allows listing restic's cache directoriers + together with the last usage. It also allows removing old cache dirs without + having to access a repo, via `restic cache --cleanup` https://github.com/restic/restic/issues/1721 https://github.com/restic/restic/pull/1749 - * Enhancement #1735: Allow keeping a time range of snaphots + * Enhancement #1735: Allow keeping a time range of snapshots - We've added the `--keep-within` option to the `forget` command. It instructs restic to keep - all snapshots within the given duration since the newest snapshot. For example, running - `restic forget --keep-within 5m7d` will keep all snapshots which have been made in the five - months and seven days since the latest snapshot. + We've added the `--keep-within` option to the `forget` command. It instructs + restic to keep all snapshots within the given duration since the newest + snapshot. For example, running `restic forget --keep-within 5m7d` will keep all + snapshots which have been made in the five months and seven days since the + latest snapshot. https://github.com/restic/restic/pull/1735 * Enhancement #1758: Allow saving OneDrive folders in Windows - Restic now contains a bugfix to two libraries, which allows saving OneDrive folders in - Windows. In order to use the newer versions of the libraries, the minimal version required to - compile restic is now Go 1.9. + Restic now contains a bugfix to two libraries, which allows saving OneDrive + folders in Windows. In order to use the newer versions of the libraries, the + minimal version required to compile restic is now Go 1.9. https://github.com/restic/restic/issues/1758 https://github.com/restic/restic/pull/1765 * Enhancement #1782: Use default AWS credentials chain for S3 backend - Adds support for file credentials to the S3 backend (e.g. ~/.aws/credentials), and reorders - the credentials chain for the S3 backend to match AWS's standard, which is static credentials, - env vars, credentials file, and finally remote. + Adds support for file credentials to the S3 backend (e.g. ~/.aws/credentials), + and reorders the credentials chain for the S3 backend to match AWS's standard, + which is static credentials, env vars, credentials file, and finally remote. https://github.com/restic/restic/pull/1782 @@ -4491,32 +5394,34 @@ restic users. The changes are ordered by importance. * Bugfix #1633: Fixed unexpected 'pack file cannot be listed' error - Due to a regression introduced in 0.8.2, the `rebuild-index` and `prune` commands failed to - read pack files with size of 587, 588, 589 or 590 bytes. + Due to a regression introduced in 0.8.2, the `rebuild-index` and `prune` + commands failed to read pack files with size of 587, 588, 589 or 590 bytes. https://github.com/restic/restic/issues/1633 https://github.com/restic/restic/pull/1635 * Bugfix #1638: Handle errors listing files in the backend - A user reported in the forum that restic completes a backup although a concurrent `prune` - operation was running. A few error messages were printed, but the backup was attempted and - completed successfully. No error code was returned. + A user reported in the forum that restic completes a backup although a + concurrent `prune` operation was running. A few error messages were printed, but + the backup was attempted and completed successfully. No error code was returned. - This should not happen: The repository is exclusively locked during `prune`, so when `restic - backup` is run in parallel, it should abort and return an error code instead. + This should not happen: The repository is exclusively locked during `prune`, so + when `restic backup` is run in parallel, it should abort and return an error + code instead. - It was found that the bug was in the code introduced only recently, which retries a List() - operation on the backend should that fail. It is now corrected. + It was found that the bug was in the code introduced only recently, which + retries a List() operation on the backend should that fail. It is now corrected. https://github.com/restic/restic/pull/1638 https://forum.restic.net/t/restic-backup-returns-0-exit-code-when-already-locked/484 * Bugfix #1641: Ignore files with invalid names in the repo - The release 0.8.2 introduced a bug: when restic encounters files in the repo which do not have a - valid name, it tries to load a file with a name of lots of zeroes instead of ignoring it. This is now - resolved, invalid file names are just ignored. + The release 0.8.2 introduced a bug: when restic encounters files in the repo + which do not have a valid name, it tries to load a file with a name of lots of + zeroes instead of ignoring it. This is now resolved, invalid file names are just + ignored. https://github.com/restic/restic/issues/1641 https://github.com/restic/restic/pull/1643 @@ -4524,8 +5429,9 @@ restic users. The changes are ordered by importance. * Enhancement #1497: Add --read-data-subset flag to check command - This change introduces ability to check integrity of a subset of repository data packs. This - can be used to spread integrity check of larger repositories over a period of time. + This change introduces ability to check integrity of a subset of repository data + packs. This can be used to spread integrity check of larger repositories over a + period of time. https://github.com/restic/restic/issues/1497 https://github.com/restic/restic/pull/1556 @@ -4538,21 +5444,22 @@ restic users. The changes are ordered by importance. * Enhancement #1623: Don't check for presence of files in the backend before writing - Before, all backend implementations were required to return an error if the file that is to be - written already exists in the backend. For most backends, that means making a request (e.g. via - HTTP) and returning an error when the file already exists. + Before, all backend implementations were required to return an error if the file + that is to be written already exists in the backend. For most backends, that + means making a request (e.g. via HTTP) and returning an error when the file + already exists. - This is not accurate, the file could have been created between the HTTP request testing for it, - and when writing starts, so we've relaxed this requeriment, which saves one additional HTTP - request per newly added file. + This is not accurate, the file could have been created between the HTTP request + testing for it, and when writing starts, so we've relaxed this requirement, + which saves one additional HTTP request per newly added file. https://github.com/restic/restic/pull/1623 * Enhancement #1634: Upgrade B2 client library, reduce HTTP requests - We've upgraded the B2 client library restic uses to access BackBlaze B2. This reduces the - number of HTTP requests needed to upload a new file from two to one, which should improve - throughput to B2. + We've upgraded the B2 client library restic uses to access BackBlaze B2. This + reduces the number of HTTP requests needed to upload a new file from two to one, + which should improve throughput to B2. https://github.com/restic/restic/pull/1634 @@ -4563,7 +5470,7 @@ restic users. The changes are ordered by importance. ## Summary - * Fix #1506: Limit bandwith at the http.RoundTripper for HTTP based backends + * Fix #1506: Limit bandwidth at the http.RoundTripper for HTTP based backends * Fix #1512: Restore directory permissions as the last step * Fix #1528: Correctly create missing subdirs in data/ * Fix #1589: Complete intermediate index upload @@ -4583,17 +5490,17 @@ restic users. The changes are ordered by importance. ## Details - * Bugfix #1506: Limit bandwith at the http.RoundTripper for HTTP based backends + * Bugfix #1506: Limit bandwidth at the http.RoundTripper for HTTP based backends https://github.com/restic/restic/issues/1506 https://github.com/restic/restic/pull/1511 * Bugfix #1512: Restore directory permissions as the last step - This change allows restoring into directories that were not writable during backup. Before, - restic created the directory, set the read-only mode and then failed to create files in the - directory. This change now restores the directory (with its permissions) as the very last - step. + This change allows restoring into directories that were not writable during + backup. Before, restic created the directory, set the read-only mode and then + failed to create files in the directory. This change now restores the directory + (with its permissions) as the very last step. https://github.com/restic/restic/issues/1512 https://github.com/restic/restic/pull/1536 @@ -4605,43 +5512,47 @@ restic users. The changes are ordered by importance. * Bugfix #1589: Complete intermediate index upload - After a user posted a comprehensive report of what he observed, we were able to find a bug and - correct it: During backup, restic uploads so-called "intermediate" index files. When the - backup finishes during a transfer of such an intermediate index, the upload is cancelled, but - the backup is finished without an error. This leads to an inconsistent state, where the - snapshot references data that is contained in the repo, but is not referenced in any index. + After a user posted a comprehensive report of what he observed, we were able to + find a bug and correct it: During backup, restic uploads so-called + "intermediate" index files. When the backup finishes during a transfer of such + an intermediate index, the upload is cancelled, but the backup is finished + without an error. This leads to an inconsistent state, where the snapshot + references data that is contained in the repo, but is not referenced in any + index. - The situation can be resolved by building a new index with `rebuild-index`, but looks very - confusing at first. Since all the data got uploaded to the repo successfully, there was no risk - of data loss, just minor inconvenience for our users. + The situation can be resolved by building a new index with `rebuild-index`, but + looks very confusing at first. Since all the data got uploaded to the repo + successfully, there was no risk of data loss, just minor inconvenience for our + users. https://github.com/restic/restic/pull/1589 https://forum.restic.net/t/error-loading-tree-check-prune-and-forget-gives-error-b2-backend/406 * Bugfix #1590: Strip spaces for lines read via --files-from - Leading and trailing spaces in lines read via `--files-from` are now stripped, so it behaves - the same as with lines read via `--exclude-file`. + Leading and trailing spaces in lines read via `--files-from` are now stripped, + so it behaves the same as with lines read via `--exclude-file`. https://github.com/restic/restic/issues/1590 https://github.com/restic/restic/pull/1613 * Bugfix #1594: Google Cloud Storage: Use generic HTTP transport - It was discovered that the Google Cloud Storage backend did not use the generic HTTP transport, - so things such as bandwidth limiting with `--limit-upload` did not work. This is resolved now. + It was discovered that the Google Cloud Storage backend did not use the generic + HTTP transport, so things such as bandwidth limiting with `--limit-upload` did + not work. This is resolved now. https://github.com/restic/restic/pull/1594 * Bugfix #1595: Backup: Remove bandwidth display - This commit removes the bandwidth displayed during backup process. It is misleading and - seldomly correct, because it's neither the "read bandwidth" (only for the very first backup) - nor the "upload bandwidth". Many users are confused about (and rightly so), c.f. #1581, #1033, - #1591 + This commit removes the bandwidth displayed during backup process. It is + misleading and seldom correct, because it's neither the "read bandwidth" (only + for the very first backup) nor the "upload bandwidth". Many users are confused + about (and rightly so), c.f. #1581, #1033, #1591 - We'll eventually replace this display with something more relevant when the new archiver code - is ready. + We'll eventually replace this display with something more relevant when the new + archiver code is ready. https://github.com/restic/restic/pull/1595 @@ -4651,59 +5562,61 @@ restic users. The changes are ordered by importance. * Enhancement #1522: Add support for TLS client certificate authentication - Support has been added for using a TLS client certificate for authentication to HTTP based - backend. A file containing the PEM encoded private key and certificate can be set using the - `--tls-client-cert` option. + Support has been added for using a TLS client certificate for authentication to + HTTP based backend. A file containing the PEM encoded private key and + certificate can be set using the `--tls-client-cert` option. https://github.com/restic/restic/issues/1522 https://github.com/restic/restic/pull/1524 * Enhancement #1538: Reduce memory allocations for querying the index - This change reduces the internal memory allocations when the index data structures in memory - are queried if a blob (part of a file) already exists in the repo. It should speed up backup a bit, - and maybe even reduce RAM usage. + This change reduces the internal memory allocations when the index data + structures in memory are queried if a blob (part of a file) already exists in + the repo. It should speed up backup a bit, and maybe even reduce RAM usage. https://github.com/restic/restic/pull/1538 * Enhancement #1541: Reduce number of remote requests during repository check - This change eliminates redundant remote repository calls and significantly improves - repository check time. + This change eliminates redundant remote repository calls and significantly + improves repository check time. https://github.com/restic/restic/issues/1541 https://github.com/restic/restic/pull/1548 * Enhancement #1549: Speed up querying across indices and scanning existing files - This change increases the whenever a blob (part of a file) is searched for in a restic - repository. This will reduce cpu usage some when backing up files already backed up by restic. - Cpu usage is further decreased when scanning files. + This change increases the whenever a blob (part of a file) is searched for in a + restic repository. This will reduce cpu usage some when backing up files already + backed up by restic. Cpu usage is further decreased when scanning files. https://github.com/restic/restic/pull/1549 * Enhancement #1554: Fuse/mount: Correctly handle EOF, add template option - We've added the `--snapshot-template` string, which can be used to specify a template for a - snapshot directory. In addition, accessing data after the end of a file via the fuse mount is now - handled correctly. + We've added the `--snapshot-template` string, which can be used to specify a + template for a snapshot directory. In addition, accessing data after the end of + a file via the fuse mount is now handled correctly. https://github.com/restic/restic/pull/1554 * Enhancement #1564: Don't terminate ssh on SIGINT - We've reworked the code which runs the `ssh` login for the sftp backend so that it can prompt for a - password (if needed) but does not exit when the user presses CTRL+C (SIGINT) e.g. during - backup. This allows restic to properly shut down when it receives SIGINT and remove the lock - file from the repo, afterwards exiting the `ssh` process. + We've reworked the code which runs the `ssh` login for the sftp backend so that + it can prompt for a password (if needed) but does not exit when the user presses + CTRL+C (SIGINT) e.g. during backup. This allows restic to properly shut down + when it receives SIGINT and remove the lock file from the repo, afterwards + exiting the `ssh` process. https://github.com/restic/restic/pull/1564 https://github.com/restic/restic/pull/1588 * Enhancement #1567: Reduce number of backend requests for rebuild-index and prune - We've found a way to reduce then number of backend requests for the `rebuild-index` and `prune` - operations. This significantly speeds up the operations for high-latency backends. + We've found a way to reduce then number of backend requests for the + `rebuild-index` and `prune` operations. This significantly speeds up the + operations for high-latency backends. https://github.com/restic/restic/issues/1567 https://github.com/restic/restic/pull/1574 @@ -4715,10 +5628,11 @@ restic users. The changes are ordered by importance. * Enhancement #1584: Limit index file size - Before, restic would create a single new index file on `prune` or `rebuild-index`, this may - lead to memory problems when this huge index is created and loaded again. We're now limiting the - size of the index file, and split newly created index files into several smaller ones. This - allows restic to be more memory-efficient. + Before, restic would create a single new index file on `prune` or + `rebuild-index`, this may lead to memory problems when this huge index is + created and loaded again. We're now limiting the size of the index file, and + split newly created index files into several smaller ones. This allows restic to + be more memory-efficient. https://github.com/restic/restic/issues/1412 https://github.com/restic/restic/issues/979 @@ -4744,8 +5658,8 @@ restic users. The changes are ordered by importance. * Bugfix #1454: Correct cache dir location for Windows and Darwin - The cache directory on Windows and Darwin was not correct, instead the directory `.cache` was - used. + The cache directory on Windows and Darwin was not correct, instead the directory + `.cache` was used. https://github.com/restic/restic/pull/1454 @@ -4756,9 +5670,9 @@ restic users. The changes are ordered by importance. * Bugfix #1459: Disable handling SIGPIPE - We've disabled handling SIGPIPE again. Turns out, writing to broken TCP connections also - raised SIGPIPE, so restic exits on the first write to a broken connection. Instead, restic - should retry the request. + We've disabled handling SIGPIPE again. Turns out, writing to broken TCP + connections also raised SIGPIPE, so restic exits on the first write to a broken + connection. Instead, restic should retry the request. https://github.com/restic/restic/issues/1457 https://github.com/restic/restic/issues/1466 @@ -4766,16 +5680,18 @@ restic users. The changes are ordered by importance. * Change #1452: Do not save atime by default - By default, the access time for files and dirs is not saved any more. It is not possible to - reliably disable updating the access time during a backup, so for the next backup the access - time is different again. This means a lot of metadata is saved. If you want to save the access time - anyway, pass `--with-atime` to the `backup` command. + By default, the access time for files and dirs is not saved any more. It is not + possible to reliably disable updating the access time during a backup, so for + the next backup the access time is different again. This means a lot of metadata + is saved. If you want to save the access time anyway, pass `--with-atime` to the + `backup` command. https://github.com/restic/restic/pull/1452 * Enhancement #11: Add the `diff` command - The command `diff` was added, it allows comparing two snapshots and listing all differences. + The command `diff` was added, it allows comparing two snapshots and listing all + differences. https://github.com/restic/restic/issues/11 https://github.com/restic/restic/issues/1460 @@ -4783,17 +5699,18 @@ restic users. The changes are ordered by importance. * Enhancement #1436: Add code to detect old cache directories - We've added code to detect old cache directories of repositories that haven't been used in a - long time, restic now prints a note when it detects that such dirs exist. Also, the option - `--cleanup-cache` was added to automatically remove such directories. That's not a problem - because the cache will be rebuild once a repo is accessed again. + We've added code to detect old cache directories of repositories that haven't + been used in a long time, restic now prints a note when it detects that such + dirs exist. Also, the option `--cleanup-cache` was added to automatically remove + such directories. That's not a problem because the cache will be rebuild once a + repo is accessed again. https://github.com/restic/restic/pull/1436 * Enhancement #1439: Improve cancellation logic - The cancellation logic was improved, restic can now shut down cleanly when requested to do so - (e.g. via ctrl+c). + The cancellation logic was improved, restic can now shut down cleanly when + requested to do so (e.g. via ctrl+c). https://github.com/restic/restic/pull/1439 @@ -4828,17 +5745,18 @@ restic users. The changes are ordered by importance. * Security #1445: Prevent writing outside the target directory during restore - A vulnerability was found in the restic restorer, which allowed attackers in special - circumstances to restore files to a location outside of the target directory. Due to the - circumstances we estimate this to be a low-risk vulnerability, but urge all users to upgrade to - the latest version of restic. + A vulnerability was found in the restic restorer, which allowed attackers in + special circumstances to restore files to a location outside of the target + directory. Due to the circumstances we estimate this to be a low-risk + vulnerability, but urge all users to upgrade to the latest version of restic. - Exploiting the vulnerability requires a Linux/Unix system which saves backups via restic and - a Windows systems which restores files from the repo. In addition, the attackers need to be able - to create files with arbitrary names which are then saved to the restic repo. For example, by - creating a file named "..\test.txt" (which is a perfectly legal filename on Linux) and - restoring a snapshot containing this file on Windows, it would be written to the parent of the - target directory. + Exploiting the vulnerability requires a Linux/Unix system which saves backups + via restic and a Windows systems which restores files from the repo. In + addition, the attackers need to be able to create files with arbitrary names + which are then saved to the restic repo. For example, by creating a file named + "..\test.txt" (which is a perfectly legal filename on Linux) and restoring a + snapshot containing this file on Windows, it would be written to the parent of + the target directory. We'd like to thank Tyler Spivey for reporting this responsibly! @@ -4846,34 +5764,36 @@ restic users. The changes are ordered by importance. * Bugfix #1256: Re-enable workaround for S3 backend - We've re-enabled a workaround for `minio-go` (the library we're using to access s3 backends), - this reduces memory usage. + We've re-enabled a workaround for `minio-go` (the library we're using to access + s3 backends), this reduces memory usage. https://github.com/restic/restic/issues/1256 https://github.com/restic/restic/pull/1267 * Bugfix #1291: Reuse backend TCP connections to BackBlaze B2 - A bug was discovered in the library we're using to access Backblaze, it now reuses already - established TCP connections which should be a lot faster and not cause network failures any - more. + A bug was discovered in the library we're using to access Backblaze, it now + reuses already established TCP connections which should be a lot faster and not + cause network failures any more. https://github.com/restic/restic/issues/1291 https://github.com/restic/restic/pull/1301 * Bugfix #1317: Run prune when `forget --prune` is called with just snapshot IDs - A bug in the `forget` command caused `prune` not to be run when `--prune` was specified without a - policy, e.g. when only snapshot IDs that should be forgotten are listed manually. + A bug in the `forget` command caused `prune` not to be run when `--prune` was + specified without a policy, e.g. when only snapshot IDs that should be forgotten + are listed manually. https://github.com/restic/restic/pull/1317 * Bugfix #1437: Remove implicit path `/restic` for the s3 backend - The s3 backend used the subdir `restic` within a bucket if no explicit path after the bucket name - was specified. Since this version, restic does not use this default path any more. If you - created a repo on s3 in a bucket without specifying a path within the bucket, you need to add - `/restic` at the end of the repository specification to access your repo: + The s3 backend used the subdir `restic` within a bucket if no explicit path + after the bucket name was specified. Since this version, restic does not use + this default path any more. If you created a repo on s3 in a bucket without + specifying a path within the bucket, you need to add `/restic` at the end of the + repository specification to access your repo: `s3:s3.amazonaws.com/bucket/restic` https://github.com/restic/restic/issues/1292 @@ -4881,32 +5801,35 @@ restic users. The changes are ordered by importance. * Enhancement #448: Sftp backend prompts for password - The sftp backend now prompts for the password if a password is necessary for login. + The sftp backend now prompts for the password if a password is necessary for + login. https://github.com/restic/restic/issues/448 https://github.com/restic/restic/pull/1270 * Enhancement #510: Add `dump` command - We've added the `dump` command which prints a file from a snapshot to stdout. This can e.g. be - used to restore files read with `backup --stdin`. + We've added the `dump` command which prints a file from a snapshot to stdout. + This can e.g. be used to restore files read with `backup --stdin`. https://github.com/restic/restic/issues/510 https://github.com/restic/restic/pull/1346 * Enhancement #1040: Add local metadata cache - We've added a local cache for metadata so that restic doesn't need to load all metadata - (snapshots, indexes, ...) from the repo each time it starts. By default the cache is active, but - there's a new global option `--no-cache` that can be used to disable the cache. By deafult, the - cache a standard cache folder for the OS, which can be overridden with `--cache-dir`. The cache - will automatically populate, indexes and snapshots are saved as they are loaded. Cache - directories for repos that haven't been used recently can automatically be removed by restic + We've added a local cache for metadata so that restic doesn't need to load all + metadata (snapshots, indexes, ...) from the repo each time it starts. By default + the cache is active, but there's a new global option `--no-cache` that can be + used to disable the cache. By default, the cache a standard cache folder for the + OS, which can be overridden with `--cache-dir`. The cache will automatically + populate, indexes and snapshots are saved as they are loaded. Cache directories + for repos that haven't been used recently can automatically be removed by restic with the `--cleanup-cache` option. - A related change was to by default create pack files in the repo that contain either data or - metadata, not both mixed together. This allows easy caching of only the metadata files. The - next run of `restic prune` will untangle mixed files automatically. + A related change was to by default create pack files in the repo that contain + either data or metadata, not both mixed together. This allows easy caching of + only the metadata files. The next run of `restic prune` will untangle mixed + files automatically. https://github.com/restic/restic/issues/29 https://github.com/restic/restic/issues/738 @@ -4918,8 +5841,8 @@ restic users. The changes are ordered by importance. * Enhancement #1102: Add subdirectory `ids` to fuse mount - The fuse mount now has an `ids` subdirectory which contains the snapshots below their (short) - IDs. + The fuse mount now has an `ids` subdirectory which contains the snapshots below + their (short) IDs. https://github.com/restic/restic/issues/1102 https://github.com/restic/restic/pull/1299 @@ -4927,17 +5850,17 @@ restic users. The changes are ordered by importance. * Enhancement #1114: Add `--cacert` to specify TLS certificates to check against - We've added the `--cacert` option which can be used to pass one (or more) CA certificates to - restic. These are used in addition to the system CA certificates to verify HTTPS certificates - (e.g. for the REST backend). + We've added the `--cacert` option which can be used to pass one (or more) CA + certificates to restic. These are used in addition to the system CA certificates + to verify HTTPS certificates (e.g. for the REST backend). https://github.com/restic/restic/issues/1114 https://github.com/restic/restic/pull/1276 * Enhancement #1216: Add upload/download limiting - We've added support for rate limiting through `--limit-upload` and `--limit-download` - flags. + We've added support for rate limiting through `--limit-upload` and + `--limit-download` flags. https://github.com/restic/restic/issues/1216 https://github.com/restic/restic/pull/1336 @@ -4945,15 +5868,15 @@ restic users. The changes are ordered by importance. * Enhancement #1249: Add `latest` symlink in fuse mount - The directory structure in the fuse mount now exposes a symlink `latest` which points to the - latest snapshot in that particular directory. + The directory structure in the fuse mount now exposes a symlink `latest` which + points to the latest snapshot in that particular directory. https://github.com/restic/restic/pull/1249 * Enhancement #1269: Add `--compact` to `forget` command - The option `--compact` was added to the `forget` command to provide the same compact view as the - `snapshots` command. + The option `--compact` was added to the `forget` command to provide the same + compact view as the `snapshots` command. https://github.com/restic/restic/pull/1269 @@ -4966,25 +5889,26 @@ restic users. The changes are ordered by importance. * Enhancement #1274: Add `generate` command, replaces `manpage` and `autocomplete` - The `generate` command has been added, which replaces the now removed commands `manpage` and - `autocomplete`. This release of restic contains the most recent manpages in `doc/man` and the - auto-completion files for bash and zsh in `doc/bash-completion.sh` and - `doc/zsh-completion.zsh` + The `generate` command has been added, which replaces the now removed commands + `manpage` and `autocomplete`. This release of restic contains the most recent + manpages in `doc/man` and the auto-completion files for bash and zsh in + `doc/bash-completion.sh` and `doc/zsh-completion.zsh` https://github.com/restic/restic/issues/1274 https://github.com/restic/restic/pull/1282 * Enhancement #1281: Google Cloud Storage backend needs less permissions - The Google Cloud Storage backend no longer requires the service account to have the - `storage.buckets.get` permission ("Storage Admin" role) in `restic init` if the bucket - already exists. + The Google Cloud Storage backend no longer requires the service account to have + the `storage.buckets.get` permission ("Storage Admin" role) in `restic init` if + the bucket already exists. https://github.com/restic/restic/pull/1281 * Enhancement #1319: Make `check` print `no errors found` explicitly - The `check` command now explicetly prints `No errors were found` when no errors could be found. + The `check` command now explicitly prints `No errors were found` when no errors + could be found. https://github.com/restic/restic/issues/1303 https://github.com/restic/restic/pull/1319 @@ -4995,8 +5919,8 @@ restic users. The changes are ordered by importance. * Enhancement #1367: Allow comments in files read from via `--file-from` - When the list of files/dirs to be saved is read from a file with `--files-from`, comment lines - (starting with `#`) are now ignored. + When the list of files/dirs to be saved is read from a file with `--files-from`, + comment lines (starting with `#`) are now ignored. https://github.com/restic/restic/issues/1367 https://github.com/restic/restic/pull/1368 @@ -5014,9 +5938,10 @@ restic users. The changes are ordered by importance. * Bugfix #1246: List all files stored in Google Cloud Storage - For large backups stored in Google Cloud Storage, the `prune` command fails because listing - only returns the first 1000 files. This has been corrected, no data is lost in the process. In - addition, a plausibility check was added to `prune`. + For large backups stored in Google Cloud Storage, the `prune` command fails + because listing only returns the first 1000 files. This has been corrected, no + data is lost in the process. In addition, a plausibility check was added to + `prune`. https://github.com/restic/restic/issues/1246 https://github.com/restic/restic/pull/1247 @@ -5054,26 +5979,28 @@ restic users. The changes are ordered by importance. * Bugfix #1167: Do not create a local repo unless `init` is used - When a restic command other than `init` is used with a local repository and the repository - directory does not exist, restic creates the directory structure. That's an error, only the - `init` command should create the dir. + When a restic command other than `init` is used with a local repository and the + repository directory does not exist, restic creates the directory structure. + That's an error, only the `init` command should create the dir. https://github.com/restic/restic/issues/1167 https://github.com/restic/restic/pull/1182 * Bugfix #1191: Make sure to write profiling files on interrupt - Since a few releases restic had the ability to write profiling files for memory and CPU usage - when `debug` is enabled. It was discovered that when restic is interrupted (ctrl+c is - pressed), the proper shutdown hook is not run. This is now corrected. + Since a few releases restic had the ability to write profiling files for memory + and CPU usage when `debug` is enabled. It was discovered that when restic is + interrupted (ctrl+c is pressed), the proper shutdown hook is not run. This is + now corrected. https://github.com/restic/restic/pull/1191 * Enhancement #317: Add `--exclude-caches` and `--exclude-if-present` - A new option `--exclude-caches` was added that allows excluding cache directories (that are - tagged as such). This is a special case of a more generic option `--exclude-if-present` which - excludes a directory if a file with a specific name (and contents) is present. + A new option `--exclude-caches` was added that allows excluding cache + directories (that are tagged as such). This is a special case of a more generic + option `--exclude-if-present` which excludes a directory if a file with a + specific name (and contents) is present. https://github.com/restic/restic/issues/317 https://github.com/restic/restic/pull/1170 @@ -5094,16 +6021,17 @@ restic users. The changes are ordered by importance. * Enhancement #1126: Use the standard Go git repository layout, use `dep` for vendoring - The git repository layout was changed to resemble the layout typically used in Go projects, - we're not using `gb` for building restic any more and vendoring the dependencies is now taken - care of by `dep`. + The git repository layout was changed to resemble the layout typically used in + Go projects, we're not using `gb` for building restic any more and vendoring the + dependencies is now taken care of by `dep`. https://github.com/restic/restic/pull/1126 * Enhancement #1132: Make `key` command always prompt for a password - The `key` command now prompts for a password even if the original password to access a repo has - been specified via the `RESTIC_PASSWORD` environment variable or a password file. + The `key` command now prompts for a password even if the original password to + access a repo has been specified via the `RESTIC_PASSWORD` environment variable + or a password file. https://github.com/restic/restic/issues/1132 https://github.com/restic/restic/pull/1133 @@ -5120,8 +6048,8 @@ restic users. The changes are ordered by importance. * Enhancement #1149: Add support for storing backups on Microsoft Azure Blob Storage - The library we're using to access the service requires Go 1.8, so restic now needs at least Go - 1.8. + The library we're using to access the service requires Go 1.8, so restic now + needs at least Go 1.8. https://github.com/restic/restic/issues/609 https://github.com/restic/restic/pull/1149 @@ -5147,8 +6075,8 @@ restic users. The changes are ordered by importance. * Enhancement #1218: Add `--compact` to `snapshots` command - The option `--compact` was added to the `snapshots` command to get a better overview of the - snapshots in a repo. It limits each snapshot to a single line. + The option `--compact` was added to the `snapshots` command to get a better + overview of the snapshots in a repo. It limits each snapshot to a single line. https://github.com/restic/restic/issues/1218 https://github.com/restic/restic/pull/1223 @@ -5172,18 +6100,19 @@ restic users. The changes are ordered by importance. * Bugfix #1115: Fix `prune`, only include existing files in indexes - A bug was found (and corrected) in the index rebuilding after prune, which led to indexes which - include blobs that were not present in the repo any more. There were already checks in place - which detected this situation and aborted with an error message. A new run of either `prune` or - `rebuild-index` corrected the index files. This is now fixed and a test has been added to detect - this. + A bug was found (and corrected) in the index rebuilding after prune, which led + to indexes which include blobs that were not present in the repo any more. There + were already checks in place which detected this situation and aborted with an + error message. A new run of either `prune` or `rebuild-index` corrected the + index files. This is now fixed and a test has been added to detect this. https://github.com/restic/restic/pull/1115 * Enhancement #1055: Create subdirs below `data/` for local/sftp backends - The local and sftp backends now create the subdirs below `data/` on open/init. This way, restic - makes sure that they always exist. This is connected to an issue for the sftp server. + The local and sftp backends now create the subdirs below `data/` on open/init. + This way, restic makes sure that they always exist. This is connected to an + issue for the sftp server. https://github.com/restic/restic/issues/1055 https://github.com/restic/rest-server/pull/11#issuecomment-309879710 @@ -5192,17 +6121,18 @@ restic users. The changes are ordered by importance. * Enhancement #1067: Allow loading credentials for s3 from IAM - When no S3 credentials are specified in the environment variables, restic now tries to load - credentials from an IAM instance profile when the s3 backend is used. + When no S3 credentials are specified in the environment variables, restic now + tries to load credentials from an IAM instance profile when the s3 backend is + used. https://github.com/restic/restic/issues/1067 https://github.com/restic/restic/pull/1086 * Enhancement #1073: Add `migrate` cmd to migrate from `s3legacy` to `default` layout - The `migrate` command for changing the `s3legacy` layout to the `default` layout for s3 - backends has been improved: It can now be restarted with `restic migrate --force s3_layout` - and automatically retries operations on error. + The `migrate` command for changing the `s3legacy` layout to the `default` layout + for s3 backends has been improved: It can now be restarted with `restic migrate + --force s3_layout` and automatically retries operations on error. https://github.com/restic/restic/issues/1073 https://github.com/restic/restic/pull/1075 @@ -5242,18 +6172,18 @@ restic users. The changes are ordered by importance. * Bugfix #965: Switch to `default` repo layout for the s3 backend - The default layout for the s3 backend is now `default` (instead of `s3legacy`). Also, there's a - new `migrate` command to convert an existing repo, it can be run like this: `restic migrate - s3_layout` + The default layout for the s3 backend is now `default` (instead of `s3legacy`). + Also, there's a new `migrate` command to convert an existing repo, it can be run + like this: `restic migrate s3_layout` https://github.com/restic/restic/issues/965 https://github.com/restic/restic/pull/1004 * Bugfix #1013: Switch back to using the high-level minio-go API for s3 - For the s3 backend we're back to using the high-level API the s3 client library for uploading - data, a few users reported dropped connections (which the library will automatically retry - now). + For the s3 backend we're back to using the high-level API the s3 client library + for uploading data, a few users reported dropped connections (which the library + will automatically retry now). https://github.com/restic/restic/issues/1013 https://github.com/restic/restic/issues/1023 @@ -5266,9 +6196,10 @@ restic users. The changes are ordered by importance. * Enhancement #636: Add dirs `tags` and `hosts` to fuse mount - The fuse mount now has two more directories: `tags` contains a subdir for each tag, which in turn - contains only the snapshots that have this tag. The subdir `hosts` contains a subdir for each - host that has a snapshot, and the subdir contains the snapshots for that host. + The fuse mount now has two more directories: `tags` contains a subdir for each + tag, which in turn contains only the snapshots that have this tag. The subdir + `hosts` contains a subdir for each host that has a snapshot, and the subdir + contains the snapshots for that host. https://github.com/restic/restic/issues/636 https://github.com/restic/restic/pull/1050 @@ -5280,8 +6211,9 @@ restic users. The changes are ordered by importance. * Enhancement #989: Improve performance of the `find` command - Improved performance for the `find` command: Restic recognizes paths it has already checked - for the files in question, so the number of backend requests is reduced a lot. + Improved performance for the `find` command: Restic recognizes paths it has + already checked for the files in question, so the number of backend requests is + reduced a lot. https://github.com/restic/restic/issues/989 https://github.com/restic/restic/pull/993 @@ -5294,16 +6226,17 @@ restic users. The changes are ordered by importance. * Enhancement #1021: Detect invalid backend name and print error - Restic now tries to detect when an invalid/unknown backend is used and returns an error - message. + Restic now tries to detect when an invalid/unknown backend is used and returns + an error message. https://github.com/restic/restic/issues/1021 https://github.com/restic/restic/pull/1070 * Enhancement #1029: Remove invalid pack files when `prune` is run - The `prune` command has been improved and will now remove invalid pack files, for example files - that have not been uploaded completely because a backup was interrupted. + The `prune` command has been improved and will now remove invalid pack files, + for example files that have not been uploaded completely because a backup was + interrupted. https://github.com/restic/restic/issues/1029 https://github.com/restic/restic/pull/1036 @@ -5323,24 +6256,24 @@ restic users. The changes are ordered by importance. * Enhancement #974: Remove regular status reports - Regular status report: We've removed the status report that was printed every 10 seconds when - restic is run non-interactively. You can still force reporting the current status by sending a - `USR1` signal to the process. + Regular status report: We've removed the status report that was printed every 10 + seconds when restic is run non-interactively. You can still force reporting the + current status by sending a `USR1` signal to the process. https://github.com/restic/restic/pull/974 * Enhancement #981: Remove temporary path from binary in `build.go` - The `build.go` now strips the temporary directory used for compilation from the binary. This - is the first step in enabling reproducible builds. + The `build.go` now strips the temporary directory used for compilation from the + binary. This is the first step in enabling reproducible builds. https://github.com/restic/restic/pull/981 * Enhancement #985: Allow multiple parallel idle HTTP connections - Backends based on HTTP now allow several idle connections in parallel. This is especially - important for the REST backend, which (when used with a local server) may create a lot - connections and exhaust available ports quickly. + Backends based on HTTP now allow several idle connections in parallel. This is + especially important for the REST backend, which (when used with a local server) + may create a lot connections and exhaust available ports quickly. https://github.com/restic/restic/issues/985 https://github.com/restic/restic/pull/986 @@ -5360,21 +6293,22 @@ restic users. The changes are ordered by importance. * Enhancement #957: Make `forget` consistent - The `forget` command was corrected to be more consistent in which snapshots are to be - forgotten. It is possible that the new code removes more snapshots than before, so please - review what would be deleted by using the `--dry-run` option. + The `forget` command was corrected to be more consistent in which snapshots are + to be forgotten. It is possible that the new code removes more snapshots than + before, so please review what would be deleted by using the `--dry-run` option. https://github.com/restic/restic/issues/953 https://github.com/restic/restic/pull/957 * Enhancement #962: Improve memory and runtime for the s3 backend - We've updated the library used for accessing s3, switched to using a lower level API and added - caching for some requests. This lead to a decrease in memory usage and a great speedup. In - addition, we added benchmark functions for all backends, so we can track improvements over - time. The Continuous Integration test service we're using (Travis) now runs the s3 backend - tests not only against a Minio server, but also against the Amazon s3 live service, so we should - be notified of any regressions much sooner. + We've updated the library used for accessing s3, switched to using a lower level + API and added caching for some requests. This lead to a decrease in memory usage + and a great speedup. In addition, we added benchmark functions for all backends, + so we can track improvements over time. The Continuous Integration test service + we're using (Travis) now runs the s3 backend tests not only against a Minio + server, but also against the Amazon s3 live service, so we should be notified of + any regressions much sooner. https://github.com/restic/restic/pull/962 https://github.com/restic/restic/pull/960 @@ -5384,11 +6318,12 @@ restic users. The changes are ordered by importance. * Enhancement #966: Unify repository layout for all backends - Up to now the s3 backend used a special repository layout. We've decided to unify the repository - layout and implemented the default layout also for the s3 backend. For creating a new - repository on s3 with the default layout, use `restic -o s3.layout=default init`. For further - commands the option is not necessary any more, restic will automatically detect the correct - layout to use. A future version will switch to the default layout for new repositories. + Up to now the s3 backend used a special repository layout. We've decided to + unify the repository layout and implemented the default layout also for the s3 + backend. For creating a new repository on s3 with the default layout, use + `restic -o s3.layout=default init`. For further commands the option is not + necessary any more, restic will automatically detect the correct layout to use. + A future version will switch to the default layout for new repositories. https://github.com/restic/restic/issues/965 https://github.com/restic/restic/pull/966 From 65748fcd6b9d636df4402c10096c54e329ae7070 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 26 Jul 2024 11:30:37 +0200 Subject: [PATCH 493/893] Update manpages and auto-completion --- doc/bash-completion.sh | 513 +++++++++++++++++++++++++++++- doc/man/restic-backup.1 | 24 +- doc/man/restic-cache.1 | 11 +- doc/man/restic-cat.1 | 13 +- doc/man/restic-check.1 | 13 +- doc/man/restic-copy.1 | 24 +- doc/man/restic-diff.1 | 21 +- doc/man/restic-dump.1 | 23 +- doc/man/restic-find.1 | 18 +- doc/man/restic-forget.1 | 26 +- doc/man/restic-generate.1 | 11 +- doc/man/restic-init.1 | 15 +- doc/man/restic-key-add.1 | 149 +++++++++ doc/man/restic-key-list.1 | 135 ++++++++ doc/man/restic-key-passwd.1 | 150 +++++++++ doc/man/restic-key-remove.1 | 134 ++++++++ doc/man/restic-key.1 | 32 +- doc/man/restic-list.1 | 13 +- doc/man/restic-ls.1 | 21 +- doc/man/restic-migrate.1 | 13 +- doc/man/restic-mount.1 | 19 +- doc/man/restic-prune.1 | 13 +- doc/man/restic-recover.1 | 13 +- doc/man/restic-repair-index.1 | 13 +- doc/man/restic-repair-packs.1 | 16 +- doc/man/restic-repair-snapshots.1 | 17 +- doc/man/restic-repair.1 | 8 + doc/man/restic-restore.1 | 53 ++- doc/man/restic-rewrite.1 | 25 +- doc/man/restic-self-update.1 | 13 +- doc/man/restic-snapshots.1 | 17 +- doc/man/restic-stats.1 | 19 +- doc/man/restic-tag.1 | 17 +- doc/man/restic-unlock.1 | 11 +- doc/man/restic-version.1 | 11 +- doc/man/restic.1 | 8 + doc/powershell-completion.ps1 | 6 +- 37 files changed, 1545 insertions(+), 93 deletions(-) create mode 100644 doc/man/restic-key-add.1 create mode 100644 doc/man/restic-key-list.1 create mode 100644 doc/man/restic-key-passwd.1 create mode 100644 doc/man/restic-key-remove.1 diff --git a/doc/bash-completion.sh b/doc/bash-completion.sh index cae37a6ca2d..9d64871ca3f 100644 --- a/doc/bash-completion.sh +++ b/doc/bash-completion.sh @@ -49,7 +49,7 @@ __restic_handle_go_custom_completion() local out requestComp lastParam lastChar comp directive args # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly restic allows to handle aliases + # Calling ${words[0]} instead of directly restic allows handling aliases args=("${words[@]:1}") # Disable ActiveHelp which is not supported for bash completion v1 requestComp="RESTIC_ACTIVE_HELP=0 ${words[0]} __completeNoDesc ${args[*]}" @@ -456,12 +456,16 @@ _restic_backup() two_word_flags+=("--read-concurrency") local_nonpersistent_flags+=("--read-concurrency") local_nonpersistent_flags+=("--read-concurrency=") + flags+=("--skip-if-unchanged") + local_nonpersistent_flags+=("--skip-if-unchanged") flags+=("--stdin") local_nonpersistent_flags+=("--stdin") flags+=("--stdin-filename=") two_word_flags+=("--stdin-filename") local_nonpersistent_flags+=("--stdin-filename") local_nonpersistent_flags+=("--stdin-filename=") + flags+=("--stdin-from-command") + local_nonpersistent_flags+=("--stdin-from-command") flags+=("--tag=") two_word_flags+=("--tag") local_nonpersistent_flags+=("--tag") @@ -479,6 +483,9 @@ _restic_backup() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -552,6 +559,9 @@ _restic_cache() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -617,6 +627,9 @@ _restic_cat() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -690,6 +703,9 @@ _restic_check() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -744,6 +760,8 @@ _restic_copy() flags_with_completion=() flags_completion=() + flags+=("--from-insecure-no-password") + local_nonpersistent_flags+=("--from-insecure-no-password") flags+=("--from-key-hint=") two_word_flags+=("--from-key-hint") local_nonpersistent_flags+=("--from-key-hint") @@ -789,6 +807,9 @@ _restic_copy() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -856,6 +877,9 @@ _restic_diff() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -934,6 +958,12 @@ _restic_dump() two_word_flags+=("--tag") local_nonpersistent_flags+=("--tag") local_nonpersistent_flags+=("--tag=") + flags+=("--target=") + two_word_flags+=("--target") + two_word_flags+=("-t") + local_nonpersistent_flags+=("--target") + local_nonpersistent_flags+=("--target=") + local_nonpersistent_flags+=("-t") flags+=("--cacert=") two_word_flags+=("--cacert") flags+=("--cache-dir=") @@ -941,6 +971,9 @@ _restic_dump() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1056,6 +1089,9 @@ _restic_find() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1174,6 +1210,8 @@ _restic_forget() two_word_flags+=("--keep-tag") local_nonpersistent_flags+=("--keep-tag") local_nonpersistent_flags+=("--keep-tag=") + flags+=("--unsafe-allow-remove-all") + local_nonpersistent_flags+=("--unsafe-allow-remove-all") flags+=("--host=") two_word_flags+=("--host") local_nonpersistent_flags+=("--host") @@ -1227,6 +1265,9 @@ _restic_forget() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1312,6 +1353,9 @@ _restic_generate() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1373,6 +1417,9 @@ _restic_help() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1430,6 +1477,8 @@ _restic_init() flags+=("--copy-chunker-params") local_nonpersistent_flags+=("--copy-chunker-params") + flags+=("--from-insecure-no-password") + local_nonpersistent_flags+=("--from-insecure-no-password") flags+=("--from-key-hint=") two_word_flags+=("--from-key-hint") local_nonpersistent_flags+=("--from-key-hint") @@ -1465,6 +1514,9 @@ _restic_init() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1505,9 +1557,224 @@ _restic_init() noun_aliases=() } -_restic_key() +_restic_key_add() { - last_command="restic_key" + last_command="restic_key_add" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + local_nonpersistent_flags+=("-h") + flags+=("--host=") + two_word_flags+=("--host") + local_nonpersistent_flags+=("--host") + local_nonpersistent_flags+=("--host=") + flags+=("--new-insecure-no-password") + local_nonpersistent_flags+=("--new-insecure-no-password") + flags+=("--new-password-file=") + two_word_flags+=("--new-password-file") + local_nonpersistent_flags+=("--new-password-file") + local_nonpersistent_flags+=("--new-password-file=") + flags+=("--user=") + two_word_flags+=("--user") + local_nonpersistent_flags+=("--user") + local_nonpersistent_flags+=("--user=") + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_key_help() +{ + last_command="restic_key_help" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + has_completion_function=1 + noun_aliases=() +} + +_restic_key_list() +{ + last_command="restic_key_list" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + local_nonpersistent_flags+=("-h") + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_key_passwd() +{ + last_command="restic_key_passwd" command_aliases=() @@ -1527,6 +1794,8 @@ _restic_key() two_word_flags+=("--host") local_nonpersistent_flags+=("--host") local_nonpersistent_flags+=("--host=") + flags+=("--new-insecure-no-password") + local_nonpersistent_flags+=("--new-insecure-no-password") flags+=("--new-password-file=") two_word_flags+=("--new-password-file") local_nonpersistent_flags+=("--new-password-file") @@ -1542,6 +1811,150 @@ _restic_key() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_key_remove() +{ + last_command="restic_key_remove" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + local_nonpersistent_flags+=("-h") + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_key() +{ + last_command="restic_key" + + command_aliases=() + + commands=() + commands+=("add") + commands+=("help") + commands+=("list") + commands+=("passwd") + commands+=("remove") + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + local_nonpersistent_flags+=("-h") + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1607,6 +2020,9 @@ _restic_list() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1677,6 +2093,8 @@ _restic_ls() flags+=("-l") local_nonpersistent_flags+=("--long") local_nonpersistent_flags+=("-l") + flags+=("--ncdu") + local_nonpersistent_flags+=("--ncdu") flags+=("--path=") two_word_flags+=("--path") local_nonpersistent_flags+=("--path") @@ -1694,6 +2112,9 @@ _restic_ls() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1763,6 +2184,9 @@ _restic_migrate() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1856,6 +2280,9 @@ _restic_mount() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -1943,6 +2370,9 @@ _restic_prune() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2008,6 +2438,9 @@ _restic_recover() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2069,6 +2502,9 @@ _restic_repair_help() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2137,6 +2573,9 @@ _restic_repair_index() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2202,6 +2641,9 @@ _restic_repair_packs() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2287,6 +2729,9 @@ _restic_repair_snapshots() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2356,6 +2801,9 @@ _restic_repair() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2410,12 +2858,20 @@ _restic_restore() flags_with_completion=() flags_completion=() + flags+=("--delete") + local_nonpersistent_flags+=("--delete") + flags+=("--dry-run") + local_nonpersistent_flags+=("--dry-run") flags+=("--exclude=") two_word_flags+=("--exclude") two_word_flags+=("-e") local_nonpersistent_flags+=("--exclude") local_nonpersistent_flags+=("--exclude=") local_nonpersistent_flags+=("-e") + flags+=("--exclude-file=") + two_word_flags+=("--exclude-file") + local_nonpersistent_flags+=("--exclude-file") + local_nonpersistent_flags+=("--exclude-file=") flags+=("--help") flags+=("-h") local_nonpersistent_flags+=("--help") @@ -2430,16 +2886,32 @@ _restic_restore() two_word_flags+=("--iexclude") local_nonpersistent_flags+=("--iexclude") local_nonpersistent_flags+=("--iexclude=") + flags+=("--iexclude-file=") + two_word_flags+=("--iexclude-file") + local_nonpersistent_flags+=("--iexclude-file") + local_nonpersistent_flags+=("--iexclude-file=") flags+=("--iinclude=") two_word_flags+=("--iinclude") local_nonpersistent_flags+=("--iinclude") local_nonpersistent_flags+=("--iinclude=") + flags+=("--iinclude-file=") + two_word_flags+=("--iinclude-file") + local_nonpersistent_flags+=("--iinclude-file") + local_nonpersistent_flags+=("--iinclude-file=") flags+=("--include=") two_word_flags+=("--include") two_word_flags+=("-i") local_nonpersistent_flags+=("--include") local_nonpersistent_flags+=("--include=") local_nonpersistent_flags+=("-i") + flags+=("--include-file=") + two_word_flags+=("--include-file") + local_nonpersistent_flags+=("--include-file") + local_nonpersistent_flags+=("--include-file=") + flags+=("--overwrite=") + two_word_flags+=("--overwrite") + local_nonpersistent_flags+=("--overwrite") + local_nonpersistent_flags+=("--overwrite=") flags+=("--path=") two_word_flags+=("--path") local_nonpersistent_flags+=("--path") @@ -2465,6 +2937,9 @@ _restic_restore() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2553,6 +3028,14 @@ _restic_rewrite() two_word_flags+=("--iexclude-file") local_nonpersistent_flags+=("--iexclude-file") local_nonpersistent_flags+=("--iexclude-file=") + flags+=("--new-host=") + two_word_flags+=("--new-host") + local_nonpersistent_flags+=("--new-host") + local_nonpersistent_flags+=("--new-host=") + flags+=("--new-time=") + two_word_flags+=("--new-time") + local_nonpersistent_flags+=("--new-time") + local_nonpersistent_flags+=("--new-time=") flags+=("--path=") two_word_flags+=("--path") local_nonpersistent_flags+=("--path") @@ -2568,6 +3051,9 @@ _restic_rewrite() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2637,6 +3123,9 @@ _restic_self-update() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2730,6 +3219,9 @@ _restic_snapshots() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2813,6 +3305,9 @@ _restic_stats() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2904,6 +3399,9 @@ _restic_tag() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -2971,6 +3469,9 @@ _restic_unlock() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -3036,6 +3537,9 @@ _restic_version() flags+=("--cleanup-cache") flags+=("--compression=") two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") @@ -3129,6 +3633,9 @@ _restic_root_command() flags+=("-h") local_nonpersistent_flags+=("--help") local_nonpersistent_flags+=("-h") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") flags+=("--insecure-tls") flags+=("--json") flags+=("--key-hint=") diff --git a/doc/man/restic-backup.1 b/doc/man/restic-backup.1 index 730685271d1..cda4aadffbe 100644 --- a/doc/man/restic-backup.1 +++ b/doc/man/restic-backup.1 @@ -22,6 +22,8 @@ given as the arguments. Exit status is 0 if the command was successful. Exit status is 1 if there was a fatal error (no snapshot created). Exit status is 3 if some source data could not be read (incomplete snapshot created). +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -63,7 +65,7 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea .PP \fB-f\fP, \fB--force\fP[=false] - force re-reading the target files/directories (overrides the "parent" flag) + force re-reading the source files/directories (overrides the "parent" flag) .PP \fB-g\fP, \fB--group-by\fP=host,paths @@ -75,7 +77,7 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea .PP \fB-H\fP, \fB--host\fP="" - set the \fBhostname\fR for the snapshot manually. To prevent an expensive rescan use the "parent" flag + set the \fBhostname\fR for the snapshot manually (default: $RESTIC_HOST). To prevent an expensive rescan use the "parent" flag .PP \fB--iexclude\fP=[] @@ -91,7 +93,7 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea .PP \fB--ignore-inode\fP[=false] - ignore inode number changes when checking for modified files + ignore inode number and ctime changes when checking for modified files .PP \fB--no-scan\fP[=false] @@ -109,6 +111,10 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea \fB--read-concurrency\fP=0 read \fBn\fR files concurrently (default: $RESTIC_READ_CONCURRENCY or 2) +.PP +\fB--skip-if-unchanged\fP[=false] + skip snapshot creation if identical to parent snapshot + .PP \fB--stdin\fP[=false] read backup from stdin @@ -117,6 +123,10 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea \fB--stdin-filename\fP="stdin" \fBfilename\fR to use when reading from stdin +.PP +\fB--stdin-from-command\fP[=false] + interpret arguments as command to execute and store its stdout + .PP \fB--tag\fP=[] add \fBtags\fR for the new snapshot in the format \fBtag[,tag,...]\fR (can be specified multiple times) @@ -147,6 +157,14 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-cache.1 b/doc/man/restic-cache.1 index c170c16242f..f868b8a6b6c 100644 --- a/doc/man/restic-cache.1 +++ b/doc/man/restic-cache.1 @@ -18,7 +18,8 @@ The "cache" command allows listing and cleaning local cache directories. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. .SH OPTIONS @@ -56,6 +57,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-cat.1 b/doc/man/restic-cat.1 index b42a58e149e..2298c58cfeb 100644 --- a/doc/man/restic-cat.1 +++ b/doc/man/restic-cat.1 @@ -18,7 +18,10 @@ The "cat" command is used to print internal objects to stdout. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -44,6 +47,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-check.1 b/doc/man/restic-check.1 index 9c1dc77e504..c0d1b07a8f3 100644 --- a/doc/man/restic-check.1 +++ b/doc/man/restic-check.1 @@ -23,7 +23,10 @@ repository and not use a local cache. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -61,6 +64,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-copy.1 b/doc/man/restic-copy.1 index bd9795f4447..63b67e5e726 100644 --- a/doc/man/restic-copy.1 +++ b/doc/man/restic-copy.1 @@ -30,7 +30,19 @@ This can be mitigated by the "--copy-chunker-params" option when initializing a new destination repository using the "init" command. +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. + + .SH OPTIONS +.PP +\fB--from-insecure-no-password\fP[=false] + use an empty password for the source repository, must be passed to every restic command (insecure) + .PP \fB--from-key-hint\fP="" key ID of key to try decrypting the source repository first (default: $RESTIC_FROM_KEY_HINT) @@ -57,11 +69,11 @@ new destination repository using the "init" command. .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--tag\fP=[] @@ -85,6 +97,14 @@ new destination repository using the "init" command. \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-diff.1 b/doc/man/restic-diff.1 index 28f3a483887..f4ffa27374b 100644 --- a/doc/man/restic-diff.1 +++ b/doc/man/restic-diff.1 @@ -28,18 +28,27 @@ U The metadata (access mode, timestamps, ...) for the item was updated M The file's content was modified .IP \(bu 2 T The type was changed, e.g. a file was made a symlink +.IP \(bu 2 +? Bitrot detected: The file's content has changed but all metadata is the same .RE +.PP +Metadata comparison will likely not work if a backup was created using the +\&'--ignore-inode' or '--ignore-ctime' option. + .PP To only compare files in specific subfolders, you can use the -":" syntax, where "subfolder" is a path within the +"snapshotID:subfolder" syntax, where "subfolder" is a path within the snapshot. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -69,6 +78,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-dump.1 b/doc/man/restic-dump.1 index 7fa3f777de1..00cb3c8b6ba 100644 --- a/doc/man/restic-dump.1 +++ b/doc/man/restic-dump.1 @@ -24,13 +24,16 @@ repository. .PP To include the folder content at the root of the archive, you can use the -":" syntax, where "subfolder" is a path within the +"snapshotID:subfolder" syntax, where "subfolder" is a path within the snapshot. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -44,16 +47,20 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR, when snapshot ID "latest" is given (can be specified multiple times) + only consider snapshots for this \fBhost\fR, when snapshot ID "latest" is given (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR, when snapshot ID "latest" is given (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR, when snapshot ID "latest" is given (can be specified multiple times, snapshots must include all specified paths) .PP \fB--tag\fP=[] only consider snapshots including \fBtag[,tag,...]\fR, when snapshot ID "latest" is given (can be specified multiple times) +.PP +\fB-t\fP, \fB--target\fP="" + write the output to target \fBpath\fR + .SH OPTIONS INHERITED FROM PARENT COMMANDS .PP @@ -72,6 +79,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-find.1 b/doc/man/restic-find.1 index c3297c43f10..2d81decd387 100644 --- a/doc/man/restic-find.1 +++ b/doc/man/restic-find.1 @@ -29,7 +29,7 @@ It can also be used to search for restic blobs or trees for troubleshooting. .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--human-readable\fP[=false] @@ -57,7 +57,7 @@ It can also be used to search for restic blobs or trees for troubleshooting. .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--show-pack-id\fP[=false] @@ -93,6 +93,14 @@ It can also be used to search for restic blobs or trees for troubleshooting. \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -178,8 +186,10 @@ restic find --pack 025c1d06 EXIT STATUS =========== -Exit status is 0 if the command was successful, and non-zero if there was any error. - +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .EE diff --git a/doc/man/restic-forget.1 b/doc/man/restic-forget.1 index d0c4cfc74da..55705288feb 100644 --- a/doc/man/restic-forget.1 +++ b/doc/man/restic-forget.1 @@ -15,7 +15,10 @@ restic-forget - Remove snapshots from the repository .PP The "forget" command removes snapshots according to a policy. All snapshots are first divided into groups according to "--group-by", and after that the policy -specified by the "--keep-*" options is applied to each group individually. +specified by the "--keep-\fI" options is applied to each group individually. +If there are not enough snapshots to keep one for each duration related +"--keep-{within-,}\fP" option, the oldest snapshot in the group is kept +additionally. .PP Please note that this command really only deletes the snapshot object in the @@ -29,7 +32,10 @@ security considerations. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -85,9 +91,13 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--keep-tag\fP=[] keep snapshots with this \fBtaglist\fR (can be specified multiple times) +.PP +\fB--unsafe-allow-remove-all\fP[=false] + allow deleting all snapshots of a snapshot group + .PP \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--tag\fP=[] @@ -95,7 +105,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB-c\fP, \fB--compact\fP[=false] @@ -155,6 +165,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-generate.1 b/doc/man/restic-generate.1 index 84f659ef2f6..f2db39bacad 100644 --- a/doc/man/restic-generate.1 +++ b/doc/man/restic-generate.1 @@ -19,7 +19,8 @@ and the auto-completion files for bash, fish and zsh). .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. .SH OPTIONS @@ -65,6 +66,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-init.1 b/doc/man/restic-init.1 index 5f19c8f8cc8..de439add53d 100644 --- a/doc/man/restic-init.1 +++ b/doc/man/restic-init.1 @@ -18,7 +18,8 @@ The "init" command initializes a new repository. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. .SH OPTIONS @@ -26,6 +27,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--copy-chunker-params\fP[=false] copy chunker parameters from the secondary repository (useful with the copy command) +.PP +\fB--from-insecure-no-password\fP[=false] + use an empty password for the source repository, must be passed to every restic command (insecure) + .PP \fB--from-key-hint\fP="" key ID of key to try decrypting the source repository first (default: $RESTIC_FROM_KEY_HINT) @@ -72,6 +77,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-key-add.1 b/doc/man/restic-key-add.1 new file mode 100644 index 00000000000..6a24e1e6712 --- /dev/null +++ b/doc/man/restic-key-add.1 @@ -0,0 +1,149 @@ +.nh +.TH "restic backup" "1" "Jan 2017" "generated by \fBrestic generate\fR" "" + +.SH NAME +.PP +restic-key-add - Add a new key (password) to the repository; returns the new key ID + + +.SH SYNOPSIS +.PP +\fBrestic key add [flags]\fP + + +.SH DESCRIPTION +.PP +The "add" sub-command creates a new key and validates the key. Returns the new key ID. + + +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. + + +.SH OPTIONS +.PP +\fB-h\fP, \fB--help\fP[=false] + help for add + +.PP +\fB--host\fP="" + the hostname for new key + +.PP +\fB--new-insecure-no-password\fP[=false] + add an empty password for the repository (insecure) + +.PP +\fB--new-password-file\fP="" + \fBfile\fR from which to read the new password + +.PP +\fB--user\fP="" + the username for new key + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB--cacert\fP=[] + \fBfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT) + +.PP +\fB--cache-dir\fP="" + set the cache \fBdirectory\fR\&. (default: use system default cache directory) + +.PP +\fB--cleanup-cache\fP[=false] + auto remove old cache directories + +.PP +\fB--compression\fP=auto + compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) + +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + +.PP +\fB--insecure-tls\fP[=false] + skip TLS certificate verification when connecting to the repository (insecure) + +.PP +\fB--json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB--key-hint\fP="" + \fBkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + +.PP +\fB--limit-download\fP=0 + limits downloads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--limit-upload\fP=0 + limits uploads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--no-cache\fP[=false] + do not use a local cache + +.PP +\fB--no-extra-verify\fP[=false] + skip additional verification of data before upload (see documentation) + +.PP +\fB--no-lock\fP[=false] + do not lock the repository, this allows some operations on read-only repositories + +.PP +\fB-o\fP, \fB--option\fP=[] + set extended option (\fBkey=value\fR, can be specified multiple times) + +.PP +\fB--pack-size\fP=0 + set target pack \fBsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) + +.PP +\fB--password-command\fP="" + shell \fBcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND) + +.PP +\fB-p\fP, \fB--password-file\fP="" + \fBfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE) + +.PP +\fB-q\fP, \fB--quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB-r\fP, \fB--repo\fP="" + \fBrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY) + +.PP +\fB--repository-file\fP="" + \fBfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE) + +.PP +\fB--retry-lock\fP=0s + retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) + +.PP +\fB--tls-client-cert\fP="" + path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) + +.PP +\fB-v\fP, \fB--verbose\fP[=0] + be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2) + + +.SH SEE ALSO +.PP +\fBrestic-key(1)\fP diff --git a/doc/man/restic-key-list.1 b/doc/man/restic-key-list.1 new file mode 100644 index 00000000000..a00b116b9bf --- /dev/null +++ b/doc/man/restic-key-list.1 @@ -0,0 +1,135 @@ +.nh +.TH "restic backup" "1" "Jan 2017" "generated by \fBrestic generate\fR" "" + +.SH NAME +.PP +restic-key-list - List keys (passwords) + + +.SH SYNOPSIS +.PP +\fBrestic key list [flags]\fP + + +.SH DESCRIPTION +.PP +The "list" sub-command lists all the keys (passwords) associated with the repository. +Returns the key ID, username, hostname, created time and if it's the current key being +used to access the repository. + + +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. + + +.SH OPTIONS +.PP +\fB-h\fP, \fB--help\fP[=false] + help for list + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB--cacert\fP=[] + \fBfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT) + +.PP +\fB--cache-dir\fP="" + set the cache \fBdirectory\fR\&. (default: use system default cache directory) + +.PP +\fB--cleanup-cache\fP[=false] + auto remove old cache directories + +.PP +\fB--compression\fP=auto + compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) + +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + +.PP +\fB--insecure-tls\fP[=false] + skip TLS certificate verification when connecting to the repository (insecure) + +.PP +\fB--json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB--key-hint\fP="" + \fBkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + +.PP +\fB--limit-download\fP=0 + limits downloads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--limit-upload\fP=0 + limits uploads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--no-cache\fP[=false] + do not use a local cache + +.PP +\fB--no-extra-verify\fP[=false] + skip additional verification of data before upload (see documentation) + +.PP +\fB--no-lock\fP[=false] + do not lock the repository, this allows some operations on read-only repositories + +.PP +\fB-o\fP, \fB--option\fP=[] + set extended option (\fBkey=value\fR, can be specified multiple times) + +.PP +\fB--pack-size\fP=0 + set target pack \fBsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) + +.PP +\fB--password-command\fP="" + shell \fBcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND) + +.PP +\fB-p\fP, \fB--password-file\fP="" + \fBfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE) + +.PP +\fB-q\fP, \fB--quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB-r\fP, \fB--repo\fP="" + \fBrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY) + +.PP +\fB--repository-file\fP="" + \fBfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE) + +.PP +\fB--retry-lock\fP=0s + retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) + +.PP +\fB--tls-client-cert\fP="" + path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) + +.PP +\fB-v\fP, \fB--verbose\fP[=0] + be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2) + + +.SH SEE ALSO +.PP +\fBrestic-key(1)\fP diff --git a/doc/man/restic-key-passwd.1 b/doc/man/restic-key-passwd.1 new file mode 100644 index 00000000000..42315d72ae0 --- /dev/null +++ b/doc/man/restic-key-passwd.1 @@ -0,0 +1,150 @@ +.nh +.TH "restic backup" "1" "Jan 2017" "generated by \fBrestic generate\fR" "" + +.SH NAME +.PP +restic-key-passwd - Change key (password); creates a new key ID and removes the old key ID, returns new key ID + + +.SH SYNOPSIS +.PP +\fBrestic key passwd [flags]\fP + + +.SH DESCRIPTION +.PP +The "passwd" sub-command creates a new key, validates the key and remove the old key ID. +Returns the new key ID. + + +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. + + +.SH OPTIONS +.PP +\fB-h\fP, \fB--help\fP[=false] + help for passwd + +.PP +\fB--host\fP="" + the hostname for new key + +.PP +\fB--new-insecure-no-password\fP[=false] + add an empty password for the repository (insecure) + +.PP +\fB--new-password-file\fP="" + \fBfile\fR from which to read the new password + +.PP +\fB--user\fP="" + the username for new key + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB--cacert\fP=[] + \fBfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT) + +.PP +\fB--cache-dir\fP="" + set the cache \fBdirectory\fR\&. (default: use system default cache directory) + +.PP +\fB--cleanup-cache\fP[=false] + auto remove old cache directories + +.PP +\fB--compression\fP=auto + compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) + +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + +.PP +\fB--insecure-tls\fP[=false] + skip TLS certificate verification when connecting to the repository (insecure) + +.PP +\fB--json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB--key-hint\fP="" + \fBkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + +.PP +\fB--limit-download\fP=0 + limits downloads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--limit-upload\fP=0 + limits uploads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--no-cache\fP[=false] + do not use a local cache + +.PP +\fB--no-extra-verify\fP[=false] + skip additional verification of data before upload (see documentation) + +.PP +\fB--no-lock\fP[=false] + do not lock the repository, this allows some operations on read-only repositories + +.PP +\fB-o\fP, \fB--option\fP=[] + set extended option (\fBkey=value\fR, can be specified multiple times) + +.PP +\fB--pack-size\fP=0 + set target pack \fBsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) + +.PP +\fB--password-command\fP="" + shell \fBcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND) + +.PP +\fB-p\fP, \fB--password-file\fP="" + \fBfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE) + +.PP +\fB-q\fP, \fB--quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB-r\fP, \fB--repo\fP="" + \fBrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY) + +.PP +\fB--repository-file\fP="" + \fBfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE) + +.PP +\fB--retry-lock\fP=0s + retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) + +.PP +\fB--tls-client-cert\fP="" + path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) + +.PP +\fB-v\fP, \fB--verbose\fP[=0] + be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2) + + +.SH SEE ALSO +.PP +\fBrestic-key(1)\fP diff --git a/doc/man/restic-key-remove.1 b/doc/man/restic-key-remove.1 new file mode 100644 index 00000000000..6ee82605968 --- /dev/null +++ b/doc/man/restic-key-remove.1 @@ -0,0 +1,134 @@ +.nh +.TH "restic backup" "1" "Jan 2017" "generated by \fBrestic generate\fR" "" + +.SH NAME +.PP +restic-key-remove - Remove key ID (password) from the repository. + + +.SH SYNOPSIS +.PP +\fBrestic key remove [ID] [flags]\fP + + +.SH DESCRIPTION +.PP +The "remove" sub-command removes the selected key ID. The "remove" command does not allow +removing the current key being used to access the repository. + + +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. + + +.SH OPTIONS +.PP +\fB-h\fP, \fB--help\fP[=false] + help for remove + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB--cacert\fP=[] + \fBfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT) + +.PP +\fB--cache-dir\fP="" + set the cache \fBdirectory\fR\&. (default: use system default cache directory) + +.PP +\fB--cleanup-cache\fP[=false] + auto remove old cache directories + +.PP +\fB--compression\fP=auto + compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) + +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + +.PP +\fB--insecure-tls\fP[=false] + skip TLS certificate verification when connecting to the repository (insecure) + +.PP +\fB--json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB--key-hint\fP="" + \fBkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + +.PP +\fB--limit-download\fP=0 + limits downloads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--limit-upload\fP=0 + limits uploads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--no-cache\fP[=false] + do not use a local cache + +.PP +\fB--no-extra-verify\fP[=false] + skip additional verification of data before upload (see documentation) + +.PP +\fB--no-lock\fP[=false] + do not lock the repository, this allows some operations on read-only repositories + +.PP +\fB-o\fP, \fB--option\fP=[] + set extended option (\fBkey=value\fR, can be specified multiple times) + +.PP +\fB--pack-size\fP=0 + set target pack \fBsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) + +.PP +\fB--password-command\fP="" + shell \fBcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND) + +.PP +\fB-p\fP, \fB--password-file\fP="" + \fBfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE) + +.PP +\fB-q\fP, \fB--quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB-r\fP, \fB--repo\fP="" + \fBrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY) + +.PP +\fB--repository-file\fP="" + \fBfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE) + +.PP +\fB--retry-lock\fP=0s + retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) + +.PP +\fB--tls-client-cert\fP="" + path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) + +.PP +\fB-v\fP, \fB--verbose\fP[=0] + be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2) + + +.SH SEE ALSO +.PP +\fBrestic-key(1)\fP diff --git a/doc/man/restic-key.1 b/doc/man/restic-key.1 index 8d181318821..43da808ccc0 100644 --- a/doc/man/restic-key.1 +++ b/doc/man/restic-key.1 @@ -8,17 +8,13 @@ restic-key - Manage keys (passwords) .SH SYNOPSIS .PP -\fBrestic key [flags] [list|add|remove|passwd] [ID]\fP +\fBrestic key [flags]\fP .SH DESCRIPTION .PP -The "key" command manages keys (passwords) for accessing the repository. - - -.SH EXIT STATUS -.PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +The "key" command allows you to set multiple access keys or passwords +per repository. .SH OPTIONS @@ -26,18 +22,6 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB-h\fP, \fB--help\fP[=false] help for key -.PP -\fB--host\fP="" - the hostname for new keys - -.PP -\fB--new-password-file\fP="" - \fBfile\fR from which to read the new password - -.PP -\fB--user\fP="" - the username for new keys - .SH OPTIONS INHERITED FROM PARENT COMMANDS .PP @@ -56,6 +40,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) @@ -131,4 +123,4 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .SH SEE ALSO .PP -\fBrestic(1)\fP +\fBrestic(1)\fP, \fBrestic-key-add(1)\fP, \fBrestic-key-list(1)\fP, \fBrestic-key-passwd(1)\fP, \fBrestic-key-remove(1)\fP diff --git a/doc/man/restic-list.1 b/doc/man/restic-list.1 index e399038a219..f8a1db00580 100644 --- a/doc/man/restic-list.1 +++ b/doc/man/restic-list.1 @@ -18,7 +18,10 @@ The "list" command allows listing objects in the repository based on type. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -44,6 +47,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-ls.1 b/doc/man/restic-ls.1 index 10b0657a372..6cc6625836c 100644 --- a/doc/man/restic-ls.1 +++ b/doc/man/restic-ls.1 @@ -33,7 +33,10 @@ a path separator); paths use the forward slash '/' as separator. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -43,7 +46,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR, when snapshot ID "latest" is given (can be specified multiple times) + only consider snapshots for this \fBhost\fR, when snapshot ID "latest" is given (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--human-readable\fP[=false] @@ -53,9 +56,13 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB-l\fP, \fB--long\fP[=false] use a long listing format showing size and mode +.PP +\fB--ncdu\fP[=false] + output NCDU export format (pipe into 'ncdu -f -') + .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR, when snapshot ID "latest" is given (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR, when snapshot ID "latest" is given (can be specified multiple times, snapshots must include all specified paths) .PP \fB--recursive\fP[=false] @@ -83,6 +90,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-migrate.1 b/doc/man/restic-migrate.1 index 7e48f726c6a..2272294bf47 100644 --- a/doc/man/restic-migrate.1 +++ b/doc/man/restic-migrate.1 @@ -20,7 +20,10 @@ names are specified, these migrations are applied. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -50,6 +53,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-mount.1 b/doc/man/restic-mount.1 index aab607fcfe5..a256d2a5ff3 100644 --- a/doc/man/restic-mount.1 +++ b/doc/man/restic-mount.1 @@ -28,7 +28,6 @@ Example time template without colons: .EX --time-template "2006-01-02_15-04-05" - .EE .PP @@ -36,7 +35,6 @@ You need to specify a sample format for exactly the following timestamp: .EX Mon Jan 2 15:04:05 -0700 MST 2006 - .EE .PP @@ -62,7 +60,10 @@ The default path templates are: .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -76,7 +77,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--no-default-permissions\fP[=false] @@ -88,7 +89,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--path-template\fP=[] @@ -120,6 +121,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-prune.1 b/doc/man/restic-prune.1 index c54d5d7ff0e..7e16748ab2b 100644 --- a/doc/man/restic-prune.1 +++ b/doc/man/restic-prune.1 @@ -19,7 +19,10 @@ referenced and therefore not needed any more. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -73,6 +76,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-recover.1 b/doc/man/restic-recover.1 index 010fbafd77f..0529360aecf 100644 --- a/doc/man/restic-recover.1 +++ b/doc/man/restic-recover.1 @@ -20,7 +20,10 @@ It can be used if, for example, a snapshot has been removed by accident with "fo .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -46,6 +49,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-repair-index.1 b/doc/man/restic-repair-index.1 index f06be64c04d..60327a91692 100644 --- a/doc/man/restic-repair-index.1 +++ b/doc/man/restic-repair-index.1 @@ -19,7 +19,10 @@ repository. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -49,6 +52,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-repair-packs.1 b/doc/man/restic-repair-packs.1 index f3671fe1803..01a2f65407b 100644 --- a/doc/man/restic-repair-packs.1 +++ b/doc/man/restic-repair-packs.1 @@ -12,9 +12,6 @@ restic-repair-packs - Salvage damaged pack files .SH DESCRIPTION -.PP -WARNING: The CLI for this command is experimental and will likely change in the future! - .PP The "repair packs" command extracts intact blobs from the specified pack files, rebuilds the index to remove the damaged pack files and removes the pack files from the repository. @@ -22,7 +19,10 @@ the index to remove the damaged pack files and removes the pack files from the r .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -48,6 +48,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-repair-snapshots.1 b/doc/man/restic-repair-snapshots.1 index 9369f25f245..c4439f1314e 100644 --- a/doc/man/restic-repair-snapshots.1 +++ b/doc/man/restic-repair-snapshots.1 @@ -37,7 +37,10 @@ snapshot! .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -55,11 +58,11 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--tag\fP=[] @@ -83,6 +86,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-repair.1 b/doc/man/restic-repair.1 index 77aecc17333..7fa313aab7c 100644 --- a/doc/man/restic-repair.1 +++ b/doc/man/restic-repair.1 @@ -39,6 +39,14 @@ Repair the repository \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-restore.1 b/doc/man/restic-restore.1 index 4635b1e43b9..876b18bf805 100644 --- a/doc/man/restic-restore.1 +++ b/doc/man/restic-restore.1 @@ -21,43 +21,74 @@ The special snapshotID "latest" can be used to restore the latest snapshot in th repository. .PP -To only restore a specific subfolder, you can use the ":" +To only restore a specific subfolder, you can use the "snapshotID:subfolder" syntax, where "subfolder" is a path within the snapshot. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS +.PP +\fB--delete\fP[=false] + delete files from target directory if they do not exist in snapshot. Use '--dry-run -vv' to check what would be deleted + +.PP +\fB--dry-run\fP[=false] + do not write any data, just show what would be done + .PP \fB-e\fP, \fB--exclude\fP=[] exclude a \fBpattern\fR (can be specified multiple times) +.PP +\fB--exclude-file\fP=[] + read exclude patterns from a \fBfile\fR (can be specified multiple times) + .PP \fB-h\fP, \fB--help\fP[=false] help for restore .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR, when snapshot ID "latest" is given (can be specified multiple times) + only consider snapshots for this \fBhost\fR, when snapshot ID "latest" is given (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--iexclude\fP=[] - same as --exclude but ignores the casing of \fBpattern\fR + same as --exclude \fBpattern\fR but ignores the casing of filenames + +.PP +\fB--iexclude-file\fP=[] + same as --exclude-file but ignores casing of \fBfile\fRnames in patterns .PP \fB--iinclude\fP=[] - same as --include but ignores the casing of \fBpattern\fR + same as --include \fBpattern\fR but ignores the casing of filenames + +.PP +\fB--iinclude-file\fP=[] + same as --include-file but ignores casing of \fBfile\fRnames in patterns .PP \fB-i\fP, \fB--include\fP=[] - include a \fBpattern\fR, exclude everything else (can be specified multiple times) + include a \fBpattern\fR (can be specified multiple times) + +.PP +\fB--include-file\fP=[] + read include patterns from a \fBfile\fR (can be specified multiple times) + +.PP +\fB--overwrite\fP=always + overwrite behavior, one of (always|if-changed|if-newer|never) (default: always) .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR, when snapshot ID "latest" is given (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR, when snapshot ID "latest" is given (can be specified multiple times, snapshots must include all specified paths) .PP \fB--sparse\fP[=false] @@ -93,6 +124,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-rewrite.1 b/doc/man/restic-rewrite.1 index d63c653e6fb..d3dd9243677 100644 --- a/doc/man/restic-rewrite.1 +++ b/doc/man/restic-rewrite.1 @@ -35,7 +35,10 @@ use the "prune" command. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -61,7 +64,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--iexclude\fP=[] @@ -71,9 +74,17 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--iexclude-file\fP=[] same as --exclude-file but ignores casing of \fBfile\fRnames in patterns +.PP +\fB--new-host\fP="" + replace hostname + +.PP +\fB--new-time\fP="" + replace time of the backup + .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--tag\fP=[] @@ -97,6 +108,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-self-update.1 b/doc/man/restic-self-update.1 index 92ab5add372..e6dd4faf25f 100644 --- a/doc/man/restic-self-update.1 +++ b/doc/man/restic-self-update.1 @@ -21,7 +21,10 @@ files. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -51,6 +54,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-snapshots.1 b/doc/man/restic-snapshots.1 index 6203bbf2b25..25d5274e3e4 100644 --- a/doc/man/restic-snapshots.1 +++ b/doc/man/restic-snapshots.1 @@ -18,7 +18,10 @@ The "snapshots" command lists all snapshots stored in the repository. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -36,7 +39,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--latest\fP=0 @@ -44,7 +47,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--tag\fP=[] @@ -68,6 +71,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-stats.1 b/doc/man/restic-stats.1 index 9d37163defb..fe4074ca52d 100644 --- a/doc/man/restic-stats.1 +++ b/doc/man/restic-stats.1 @@ -32,7 +32,7 @@ The modes are: .IP \(bu 2 restore-size: (default) Counts the size of the restored files. .IP \(bu 2 -files-by-contents: Counts total size of files, where a file is +files-by-contents: Counts total size of unique files, where a file is considered unique if it has unique contents. .IP \(bu 2 raw-data: Counts the size of blobs in the repository, regardless of @@ -48,7 +48,10 @@ Refer to the online manual for more details about each mode. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -58,7 +61,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--mode\fP="restore-size" @@ -66,7 +69,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--tag\fP=[] @@ -90,6 +93,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-tag.1 b/doc/man/restic-tag.1 index b1468c74d76..7ab1911e5a4 100644 --- a/doc/man/restic-tag.1 +++ b/doc/man/restic-tag.1 @@ -25,7 +25,10 @@ When no snapshotID is given, all snapshots matching the host, tag and path filte .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. +Exit status is 10 if the repository does not exist. +Exit status is 11 if the repository is already locked. .SH OPTIONS @@ -39,11 +42,11 @@ Exit status is 0 if the command was successful, and non-zero if there was any er .PP \fB-H\fP, \fB--host\fP=[] - only consider snapshots for this \fBhost\fR (can be specified multiple times) + only consider snapshots for this \fBhost\fR (can be specified multiple times) (default: $RESTIC_HOST) .PP \fB--path\fP=[] - only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times) + only consider snapshots including this (absolute) \fBpath\fR (can be specified multiple times, snapshots must include all specified paths) .PP \fB--remove\fP=[] @@ -75,6 +78,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-unlock.1 b/doc/man/restic-unlock.1 index 0b3b43f2acc..a24a4f81535 100644 --- a/doc/man/restic-unlock.1 +++ b/doc/man/restic-unlock.1 @@ -18,7 +18,8 @@ The "unlock" command removes stale locks that have been created by other restic .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. .SH OPTIONS @@ -48,6 +49,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic-version.1 b/doc/man/restic-version.1 index ccc23038f75..e9df439edb0 100644 --- a/doc/man/restic-version.1 +++ b/doc/man/restic-version.1 @@ -19,7 +19,8 @@ and the version of this software. .SH EXIT STATUS .PP -Exit status is 0 if the command was successful, and non-zero if there was any error. +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. .SH OPTIONS @@ -45,6 +46,14 @@ Exit status is 0 if the command was successful, and non-zero if there was any er \fB--compression\fP=auto compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/man/restic.1 b/doc/man/restic.1 index 333eab76a41..ee423c6ad99 100644 --- a/doc/man/restic.1 +++ b/doc/man/restic.1 @@ -41,6 +41,14 @@ The full documentation can be found at https://restic.readthedocs.io/ . \fB-h\fP, \fB--help\fP[=false] help for restic +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + .PP \fB--insecure-tls\fP[=false] skip TLS certificate verification when connecting to the repository (insecure) diff --git a/doc/powershell-completion.ps1 b/doc/powershell-completion.ps1 index d8aa5a1af81..033477e7bed 100644 --- a/doc/powershell-completion.ps1 +++ b/doc/powershell-completion.ps1 @@ -10,7 +10,7 @@ filter __restic_escapeStringWithSpecialChars { $_ -replace '\s|#|@|\$|;|,|''|\{|\}|\(|\)|"|`|\||<|>|&','`$&' } -[scriptblock]$__resticCompleterBlock = { +[scriptblock]${__resticCompleterBlock} = { param( $WordToComplete, $CommandAst, @@ -85,7 +85,7 @@ filter __restic_escapeStringWithSpecialChars { __restic_debug "Calling $RequestComp" # First disable ActiveHelp which is not supported for Powershell - $env:RESTIC_ACTIVE_HELP=0 + ${env:RESTIC_ACTIVE_HELP}=0 #call the command store the output in $out and redirect stderr and stdout to null # $Out is an array contains each line per element @@ -242,4 +242,4 @@ filter __restic_escapeStringWithSpecialChars { } } -Register-ArgumentCompleter -CommandName 'restic' -ScriptBlock $__resticCompleterBlock +Register-ArgumentCompleter -CommandName 'restic' -ScriptBlock ${__resticCompleterBlock} From 277c8f5029a12bd882c2c1d2088f435caec67bb8 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 26 Jul 2024 11:30:37 +0200 Subject: [PATCH 494/893] Add version for 0.17.0 --- VERSION | 2 +- cmd/restic/global.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index e62f3ef1c62..c5523bd09b1 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.16.5-dev +0.17.0 diff --git a/cmd/restic/global.go b/cmd/restic/global.go index be485dc3b94..080863da7fa 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -47,7 +47,7 @@ import ( // to a missing backend storage location or config file var ErrNoRepository = errors.New("repository does not exist") -var version = "0.16.5-dev (compiled manually)" +var version = "0.17.0" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" From ebc15b8680c0b0fbbb2e1885dde42a205b49789c Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 26 Jul 2024 11:30:50 +0200 Subject: [PATCH 495/893] Set development version for 0.17.0 --- VERSION | 2 +- cmd/restic/global.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index c5523bd09b1..a0073758b8a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.17.0 +0.17.0-dev diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 080863da7fa..846f3339fe1 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -47,7 +47,7 @@ import ( // to a missing backend storage location or config file var ErrNoRepository = errors.New("repository does not exist") -var version = "0.17.0" +var version = "0.17.0-dev (compiled manually)" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" From d4b28cea6c91676554242fdd94b04f9db41a891d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 26 Jul 2024 20:09:46 +0200 Subject: [PATCH 496/893] docs: clarify how to pass arguments using backup --stdin-from-command --- doc/040_backup.rst | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 81d99e07110..25c5a8ad110 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -584,11 +584,13 @@ Reading data from a command Sometimes, it can be useful to directly save the output of a program, for example, ``mysqldump`` so that the SQL can later be restored. Restic supports this mode of operation; just supply the option ``--stdin-from-command`` when using the -``backup`` action, and write the command in place of the files/directories: +``backup`` action, and write the command in place of the files/directories. To prevent +restic from interpreting the arguments for the commmand, make sure to add ``--`` before +the command starts: .. code-block:: console - $ restic -r /srv/restic-repo backup --stdin-from-command mysqldump [...] + $ restic -r /srv/restic-repo backup --stdin-from-command -- mysqldump --host example mydb [...] This command creates a new snapshot based on the standard output of ``mysqldump``. By default, the command's standard output is saved in a file named ``stdin``. @@ -596,7 +598,7 @@ A different name can be specified with ``--stdin-filename``: .. code-block:: console - $ restic -r /srv/restic-repo backup --stdin-filename production.sql --stdin-from-command mysqldump [...] + $ restic -r /srv/restic-repo backup --stdin-filename production.sql --stdin-from-command -- mysqldump --host example mydb [...] Restic uses the command exit code to determine whether the command succeeded. A non-zero exit code from the command causes restic to cancel the backup. This causes From bb50d86e68830ce63a0b986293d206076fe61ae2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 28 Jul 2024 12:21:30 +0200 Subject: [PATCH 497/893] cat: add subcommand completion --- cmd/restic/cmd_cat.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index 693c26790dc..d3e98b2ffd1 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -12,6 +12,8 @@ import ( "github.com/restic/restic/internal/restic" ) +var catAllowedCmds = []string{"config", "index", "snapshot", "key", "masterkey", "lock", "pack", "blob", "tree"} + var cmdCat = &cobra.Command{ Use: "cat [flags] [masterkey|config|pack ID|blob ID|snapshot ID|index ID|key ID|lock ID|tree snapshot:subfolder]", Short: "Print internal objects to stdout", @@ -30,6 +32,7 @@ Exit status is 11 if the repository is already locked. RunE: func(cmd *cobra.Command, args []string) error { return runCat(cmd.Context(), globalOptions, args) }, + ValidArgs: catAllowedCmds, } func init() { @@ -37,21 +40,19 @@ func init() { } func validateCatArgs(args []string) error { - var allowedCmds = []string{"config", "index", "snapshot", "key", "masterkey", "lock", "pack", "blob", "tree"} - if len(args) < 1 { return errors.Fatal("type not specified") } validType := false - for _, v := range allowedCmds { + for _, v := range catAllowedCmds { if v == args[0] { validType = true break } } if !validType { - return errors.Fatalf("invalid type %q, must be one of [%s]", args[0], strings.Join(allowedCmds, "|")) + return errors.Fatalf("invalid type %q, must be one of [%s]", args[0], strings.Join(catAllowedCmds, "|")) } if args[0] != "masterkey" && args[0] != "config" && len(args) != 2 { From 7448a15f720b270a1c18fce0a974af3c64329f3f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 28 Jul 2024 12:21:42 +0200 Subject: [PATCH 498/893] stats: add --mode completions --- cmd/restic/cmd_stats.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 8a78d57f76d..60ab1e5bcb4 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -70,10 +70,20 @@ type StatsOptions struct { var statsOptions StatsOptions +func must(err error) { + if err != nil { + panic(fmt.Sprintf("error during setup: %v", err)) + } +} + func init() { cmdRoot.AddCommand(cmdStats) f := cmdStats.Flags() f.StringVar(&statsOptions.countMode, "mode", countModeRestoreSize, "counting mode: restore-size (default), files-by-contents, blobs-per-file or raw-data") + must(cmdStats.RegisterFlagCompletionFunc("mode", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return []string{countModeRestoreSize, countModeUniqueFilesByContents, countModeBlobsPerFile, countModeRawData}, cobra.ShellCompDirectiveDefault + })) + initMultiSnapshotFilter(f, &statsOptions.SnapshotFilter, true) } From 62c4a5e9a0f463203120aba3bb7fc76896b19f8a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 29 Jul 2024 20:10:58 +0200 Subject: [PATCH 499/893] fs: fix long path handling for ResetPermissions, ClearAttributes and security descriptors --- internal/fs/file.go | 2 +- internal/fs/file_windows.go | 2 +- internal/fs/sd_windows.go | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/fs/file.go b/internal/fs/file.go index 929195f1c97..85b202dc8b8 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -134,7 +134,7 @@ func IsAccessDenied(err error) bool { // ResetPermissions resets the permissions of the file at the specified path func ResetPermissions(path string) error { // Set the default file permissions - if err := os.Chmod(path, 0600); err != nil { + if err := os.Chmod(fixpath(path), 0600); err != nil { return err } return nil diff --git a/internal/fs/file_windows.go b/internal/fs/file_windows.go index b05068c426b..50c7e9938f6 100644 --- a/internal/fs/file_windows.go +++ b/internal/fs/file_windows.go @@ -85,7 +85,7 @@ func ClearSystem(path string) error { // ClearAttribute removes the specified attribute from the file. func ClearAttribute(path string, attribute uint32) error { - ptr, err := windows.UTF16PtrFromString(path) + ptr, err := windows.UTF16PtrFromString(fixpath(path)) if err != nil { return err } diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index 5d98b4ef46b..2da1c5df452 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -129,22 +129,22 @@ func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { // getNamedSecurityInfoHigh gets the higher level SecurityDescriptor which requires admin permissions. func getNamedSecurityInfoHigh(filePath string) (*windows.SECURITY_DESCRIPTOR, error) { - return windows.GetNamedSecurityInfo(filePath, windows.SE_FILE_OBJECT, highSecurityFlags) + return windows.GetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, highSecurityFlags) } // getNamedSecurityInfoLow gets the lower level SecurityDescriptor which requires no admin permissions. func getNamedSecurityInfoLow(filePath string) (*windows.SECURITY_DESCRIPTOR, error) { - return windows.GetNamedSecurityInfo(filePath, windows.SE_FILE_OBJECT, lowBackupSecurityFlags) + return windows.GetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, lowBackupSecurityFlags) } // setNamedSecurityInfoHigh sets the higher level SecurityDescriptor which requires admin permissions. func setNamedSecurityInfoHigh(filePath string, owner *windows.SID, group *windows.SID, dacl *windows.ACL, sacl *windows.ACL) error { - return windows.SetNamedSecurityInfo(filePath, windows.SE_FILE_OBJECT, highSecurityFlags, owner, group, dacl, sacl) + return windows.SetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, highSecurityFlags, owner, group, dacl, sacl) } // setNamedSecurityInfoLow sets the lower level SecurityDescriptor which requires no admin permissions. func setNamedSecurityInfoLow(filePath string, dacl *windows.ACL) error { - return windows.SetNamedSecurityInfo(filePath, windows.SE_FILE_OBJECT, lowRestoreSecurityFlags, nil, nil, dacl, nil) + return windows.SetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, lowRestoreSecurityFlags, nil, nil, dacl, nil) } // enableBackupPrivilege enables privilege for backing up security descriptors From a9be9867828b65d1b8ac6c21c9d695b0ee294ec2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 29 Jul 2024 21:11:47 +0200 Subject: [PATCH 500/893] restorer: add minimal long path handling test --- internal/restorer/restorer_test.go | 33 ++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 9c02afe68bd..191f3b8ef48 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -1506,3 +1506,36 @@ func TestRestoreToFile(t *testing.T) { err := res.RestoreTo(ctx, tempdir) rtest.Assert(t, strings.Contains(err.Error(), "cannot create target directory"), "unexpected error %v", err) } + +func TestRestorerLongPath(t *testing.T) { + tmp := t.TempDir() + + longPath := tmp + for i := 0; i < 20; i++ { + longPath = filepath.Join(longPath, "aaaaaaaaaaaaaaaaaaaa") + } + + rtest.OK(t, os.MkdirAll(longPath, 0o700)) + f, err := fs.OpenFile(filepath.Join(longPath, "file"), fs.O_CREATE|fs.O_RDWR, 0o600) + rtest.OK(t, err) + _, err = f.WriteString("Hello, World!") + rtest.OK(t, err) + rtest.OK(t, f.Close()) + + repo := repository.TestRepository(t) + + local := &fs.Local{} + sc := archiver.NewScanner(local) + rtest.OK(t, sc.Scan(context.TODO(), []string{tmp})) + arch := archiver.New(repo, local, archiver.Options{}) + sn, _, _, err := arch.Snapshot(context.Background(), []string{tmp}, archiver.SnapshotOptions{}) + rtest.OK(t, err) + + res := NewRestorer(repo, sn, Options{}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rtest.OK(t, res.RestoreTo(ctx, tmp)) + _, err = res.VerifyFiles(ctx, tmp) + rtest.OK(t, err) +} From 98c73eeca945c49df2ce1979f38d9d59941a1429 Mon Sep 17 00:00:00 2001 From: Ben Northway Date: Tue, 30 Jul 2024 09:11:18 -0600 Subject: [PATCH 501/893] Update faq.rst the correct output of `echo "~/documents"` is `~/documents` --- doc/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/faq.rst b/doc/faq.rst index 19879d8171e..b26398f8c8b 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -90,7 +90,7 @@ The error here is that the tilde ``~`` in ``"~/documents"`` didn't get expanded /home/john/documents $ echo "~/documents" - ~/document + ~/documents $ echo "$HOME/documents" /home/john/documents From 6a97833337b2d85584a2a7e9af39266790834b44 Mon Sep 17 00:00:00 2001 From: Michael Terry Date: Sat, 27 Jul 2024 18:55:00 -0400 Subject: [PATCH 502/893] restore: clean up error handling when restoring metadata - Fix a logic error that instead of reporting the *first* metadata-setting error that appears, we were instead reporting the *last* error (and only if the lchown call failed!). - Don't show any errors when setting metadata for files in non-root mode (things like timestamps, attributes). Previously, only lchown errors were skipped. But other kinds of attribute errors make sense to skip as well. The code path happened to work correctly before because of the above logic error. But once that was fixed, this change needed to happen too. --- changelog/unreleased/pull-4958 | 7 +++++++ internal/restic/node.go | 24 ++++++++++++------------ internal/restic/node_test.go | 12 ++++++++++++ 3 files changed, 31 insertions(+), 12 deletions(-) create mode 100644 changelog/unreleased/pull-4958 diff --git a/changelog/unreleased/pull-4958 b/changelog/unreleased/pull-4958 new file mode 100644 index 00000000000..bbb28a97b8e --- /dev/null +++ b/changelog/unreleased/pull-4958 @@ -0,0 +1,7 @@ +Bugfix: Don't ignore metadata-setting errors during restore + +Restic was accidentally ignoring errors when setting timestamps, +attributes, or file modes during restore. It will now report those +errors (unless it's just a permission error when not running as root). + +https://github.com/restic/restic/pull/4958 diff --git a/internal/restic/node.go b/internal/restic/node.go index 51c6071b7a7..7c19882276a 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -229,6 +229,13 @@ func (node *Node) CreateAt(ctx context.Context, path string, repo BlobLoader) er func (node Node) RestoreMetadata(path string, warn func(msg string)) error { err := node.restoreMetadata(path, warn) if err != nil { + // It is common to have permission errors for folders like /home + // unless you're running as root, so ignore those. + if os.Geteuid() > 0 && errors.Is(err, os.ErrPermission) { + debug.Log("not running as root, ignoring permission error for %v: %v", + path, err) + return nil + } debug.Log("restoreMetadata(%s) error %v", path, err) } @@ -239,33 +246,26 @@ func (node Node) restoreMetadata(path string, warn func(msg string)) error { var firsterr error if err := lchown(path, int(node.UID), int(node.GID)); err != nil { - // Like "cp -a" and "rsync -a" do, we only report lchown permission errors - // if we run as root. - if os.Geteuid() > 0 && os.IsPermission(err) { - debug.Log("not running as root, ignoring lchown permission error for %v: %v", - path, err) - } else { - firsterr = errors.WithStack(err) - } + firsterr = errors.WithStack(err) } if err := node.RestoreTimestamps(path); err != nil { debug.Log("error restoring timestamps for dir %v: %v", path, err) - if firsterr != nil { + if firsterr == nil { firsterr = err } } if err := node.restoreExtendedAttributes(path); err != nil { debug.Log("error restoring extended attributes for %v: %v", path, err) - if firsterr != nil { + if firsterr == nil { firsterr = err } } if err := node.restoreGenericAttributes(path, warn); err != nil { debug.Log("error restoring generic attributes for %v: %v", path, err) - if firsterr != nil { + if firsterr == nil { firsterr = err } } @@ -275,7 +275,7 @@ func (node Node) restoreMetadata(path string, warn func(msg string)) error { // calls above would fail. if node.Type != "symlink" { if err := fs.Chmod(path, node.Mode); err != nil { - if firsterr != nil { + if firsterr == nil { firsterr = errors.WithStack(err) } } diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index 6e0f31e2136..7991d33e0cb 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" ) @@ -382,3 +383,14 @@ func TestSymlinkSerializationFormat(t *testing.T) { test.Assert(t, n2.LinkTargetRaw == nil, "quoted link target is just a helper field and must be unset after decoding") } } + +func TestNodeRestoreMetadataError(t *testing.T) { + tempdir := t.TempDir() + + node := nodeTests[0] + nodePath := filepath.Join(tempdir, node.Name) + + // This will fail because the target file does not exist + err := node.RestoreMetadata(nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) }) + test.Assert(t, errors.Is(err, os.ErrNotExist), "failed for an unexpected reason") +} From ae1cb889dd566a1c3417234f18e3b6b7c908df6f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 31 Jul 2024 19:30:47 +0200 Subject: [PATCH 503/893] Add more checks for canceled contexts --- cmd/restic/cmd_diff.go | 12 ++++++++++++ cmd/restic/cmd_dump.go | 4 ++++ cmd/restic/cmd_find.go | 4 ++++ cmd/restic/cmd_forget.go | 4 ++++ cmd/restic/cmd_recover.go | 4 ++++ cmd/restic/cmd_snapshots.go | 8 ++++++++ internal/backend/sftp/sftp.go | 4 ++++ internal/fuse/dir.go | 8 ++++++++ internal/fuse/file.go | 6 +++++- internal/fuse/snapshots_dir.go | 4 ++++ internal/repository/check.go | 4 ++++ internal/repository/repository.go | 4 ++++ internal/restic/snapshot_find.go | 4 ++++ internal/restorer/filerestorer.go | 4 ++++ internal/restorer/restorer.go | 8 ++++++-- internal/walker/rewriter.go | 4 ++++ internal/walker/walker.go | 4 ++++ 17 files changed, 87 insertions(+), 3 deletions(-) diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 6488a7c354e..b15882b0902 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -177,6 +177,10 @@ func (c *Comparer) printDir(ctx context.Context, mode string, stats *DiffStat, b } for _, node := range tree.Nodes { + if ctx.Err() != nil { + return ctx.Err() + } + name := path.Join(prefix, node.Name) if node.Type == "dir" { name += "/" @@ -204,6 +208,10 @@ func (c *Comparer) collectDir(ctx context.Context, blobs restic.BlobSet, id rest } for _, node := range tree.Nodes { + if ctx.Err() != nil { + return ctx.Err() + } + addBlobs(blobs, node) if node.Type == "dir" { @@ -255,6 +263,10 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref tree1Nodes, tree2Nodes, names := uniqueNodeNames(tree1, tree2) for _, name := range names { + if ctx.Err() != nil { + return ctx.Err() + } + node1, t1 := tree1Nodes[name] node2, t2 := tree2Nodes[name] diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 7e1efa3ae3d..9c0fe535e68 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -85,6 +85,10 @@ func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.BlobLoade item := filepath.Join(prefix, pathComponents[0]) l := len(pathComponents) for _, node := range tree.Nodes { + if ctx.Err() != nil { + return ctx.Err() + } + // If dumping something in the highest level it will just take the // first item it finds and dump that according to the switch case below. if node.Name == pathComponents[0] { diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 4f9549ca44a..aebca594e56 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -377,6 +377,10 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error { if node.Type == "file" && f.blobIDs != nil { for _, id := range node.Content { + if ctx.Err() != nil { + return ctx.Err() + } + idStr := id.String() if _, ok := f.blobIDs[idStr]; !ok { // Look for short ID form diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 87738b518f1..27b8f4f7490 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -246,6 +246,10 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption printer.P("Applying Policy: %v\n", policy) for k, snapshotGroup := range snapshotGroups { + if ctx.Err() != nil { + return ctx.Err() + } + if gopts.Verbose >= 1 && !gopts.JSON { err = PrintSnapshotGroupHeader(globalOptions.stdout, k) if err != nil { diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index 5e4744bb6b7..4e8b8c07763 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -118,6 +118,10 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { return nil } + if ctx.Err() != nil { + return ctx.Err() + } + tree := restic.NewTree(len(roots)) for id := range roots { var subtreeID = id diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index 9112e1b9585..826ab55ec36 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -81,6 +81,10 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions } for k, list := range snapshotGroups { + if ctx.Err() != nil { + return ctx.Err() + } + if opts.Last { // This branch should be removed in the same time // that --last. @@ -101,6 +105,10 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions } for k, list := range snapshotGroups { + if ctx.Err() != nil { + return ctx.Err() + } + if grouped { err := PrintSnapshotGroupHeader(globalOptions.stdout, k) if err != nil { diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index 70fc30a62c9..efbd0c8d5b7 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -578,6 +578,10 @@ func (r *SFTP) deleteRecursive(ctx context.Context, name string) error { } for _, fi := range entries { + if ctx.Err() != nil { + return ctx.Err() + } + itemName := r.Join(name, fi.Name()) if fi.IsDir() { err := r.deleteRecursive(ctx, itemName) diff --git a/internal/fuse/dir.go b/internal/fuse/dir.go index 763a9640ce3..fd030295b6d 100644 --- a/internal/fuse/dir.go +++ b/internal/fuse/dir.go @@ -107,6 +107,10 @@ func (d *dir) open(ctx context.Context) error { } items := make(map[string]*restic.Node) for _, n := range tree.Nodes { + if ctx.Err() != nil { + return ctx.Err() + } + nodes, err := replaceSpecialNodes(ctx, d.root.repo, n) if err != nil { debug.Log(" replaceSpecialNodes(%v) failed: %v", n, err) @@ -171,6 +175,10 @@ func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { }) for _, node := range d.items { + if ctx.Err() != nil { + return nil, ctx.Err() + } + name := cleanupNodeName(node.Name) var typ fuse.DirentType switch node.Type { diff --git a/internal/fuse/file.go b/internal/fuse/file.go index e2e0cf9a016..494fca283ba 100644 --- a/internal/fuse/file.go +++ b/internal/fuse/file.go @@ -66,12 +66,16 @@ func (f *file) Attr(_ context.Context, a *fuse.Attr) error { } -func (f *file) Open(_ context.Context, _ *fuse.OpenRequest, _ *fuse.OpenResponse) (fs.Handle, error) { +func (f *file) Open(ctx context.Context, _ *fuse.OpenRequest, _ *fuse.OpenResponse) (fs.Handle, error) { debug.Log("open file %v with %d blobs", f.node.Name, len(f.node.Content)) var bytes uint64 cumsize := make([]uint64, 1+len(f.node.Content)) for i, id := range f.node.Content { + if ctx.Err() != nil { + return nil, ctx.Err() + } + size, found := f.root.repo.LookupBlobSize(restic.DataBlob, id) if !found { return nil, errors.Errorf("id %v not found in repository", id) diff --git a/internal/fuse/snapshots_dir.go b/internal/fuse/snapshots_dir.go index 7369ea17ad5..4cae7106c47 100644 --- a/internal/fuse/snapshots_dir.go +++ b/internal/fuse/snapshots_dir.go @@ -78,6 +78,10 @@ func (d *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { } for name, entry := range meta.names { + if ctx.Err() != nil { + return nil, ctx.Err() + } + d := fuse.Dirent{ Inode: inodeFromName(d.inode, name), Name: name, diff --git a/internal/repository/check.go b/internal/repository/check.go index 27eb11d716c..1eeea58dc1f 100644 --- a/internal/repository/check.go +++ b/internal/repository/check.go @@ -95,6 +95,10 @@ func checkPackInner(ctx context.Context, r *Repository, id restic.ID, blobs []re it := newPackBlobIterator(id, newBufReader(bufRd), 0, blobs, r.Key(), dec) for { + if ctx.Err() != nil { + return ctx.Err() + } + val, err := it.Next() if err == errPackEOF { break diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 838858c38fd..f7fd65c71d4 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -1000,6 +1000,10 @@ func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBl it := newPackBlobIterator(packID, newByteReader(data), dataStart, blobs, key, dec) for { + if ctx.Err() != nil { + return ctx.Err() + } + val, err := it.Next() if err == errPackEOF { break diff --git a/internal/restic/snapshot_find.go b/internal/restic/snapshot_find.go index 6d1ab9a7a22..6eb51b23707 100644 --- a/internal/restic/snapshot_find.go +++ b/internal/restic/snapshot_find.go @@ -134,6 +134,10 @@ func (f *SnapshotFilter) FindAll(ctx context.Context, be Lister, loader LoaderUn ids := NewIDSet() // Process all snapshot IDs given as arguments. for _, s := range snapshotIDs { + if ctx.Err() != nil { + return ctx.Err() + } + var sn *Snapshot if s == "latest" { if usedFilter { diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index e517e628460..31234b96098 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -122,6 +122,10 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { // create packInfo from fileInfo for _, file := range r.files { + if ctx.Err() != nil { + return ctx.Err() + } + fileBlobs := file.blobs.(restic.IDs) largeFile := len(fileBlobs) > largeFileBlobCount var packsMap map[restic.ID][]fileBlobInfo diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index cd3fd076d2b..00da4e18e82 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -450,7 +450,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { }, leaveDir: func(node *restic.Node, target, location string, expectedFilenames []string) error { if res.opts.Delete { - if err := res.removeUnexpectedFiles(target, location, expectedFilenames); err != nil { + if err := res.removeUnexpectedFiles(ctx, target, location, expectedFilenames); err != nil { return err } } @@ -469,7 +469,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { return err } -func (res *Restorer) removeUnexpectedFiles(target, location string, expectedFilenames []string) error { +func (res *Restorer) removeUnexpectedFiles(ctx context.Context, target, location string, expectedFilenames []string) error { if !res.opts.Delete { panic("internal error") } @@ -487,6 +487,10 @@ func (res *Restorer) removeUnexpectedFiles(target, location string, expectedFile } for _, entry := range entries { + if ctx.Err() != nil { + return ctx.Err() + } + if _, ok := keep[toComparableFilename(entry)]; ok { continue } diff --git a/internal/walker/rewriter.go b/internal/walker/rewriter.go index 6c27b26acfd..7e984ae2505 100644 --- a/internal/walker/rewriter.go +++ b/internal/walker/rewriter.go @@ -116,6 +116,10 @@ func (t *TreeRewriter) RewriteTree(ctx context.Context, repo BlobLoadSaver, node tb := restic.NewTreeJSONBuilder() for _, node := range curTree.Nodes { + if ctx.Err() != nil { + return restic.ID{}, ctx.Err() + } + path := path.Join(nodepath, node.Name) node = t.opts.RewriteNode(node, path) if node == nil { diff --git a/internal/walker/walker.go b/internal/walker/walker.go index 091b0548988..788ece1cf1a 100644 --- a/internal/walker/walker.go +++ b/internal/walker/walker.go @@ -57,6 +57,10 @@ func walk(ctx context.Context, repo restic.BlobLoader, prefix string, parentTree }) for _, node := range tree.Nodes { + if ctx.Err() != nil { + return ctx.Err() + } + p := path.Join(prefix, node.Name) if node.Type == "" { From a639454f287bbf17e4f86651261be320198af2eb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 31 Jul 2024 19:45:33 +0200 Subject: [PATCH 504/893] diff: improve cancelation handling --- cmd/restic/cmd_diff.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index b15882b0902..6793184b136 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -191,13 +191,13 @@ func (c *Comparer) printDir(ctx context.Context, mode string, stats *DiffStat, b if node.Type == "dir" { err := c.printDir(ctx, mode, stats, blobs, name, *node.Subtree) - if err != nil { + if err != nil && err != context.Canceled { Warnf("error: %v\n", err) } } } - return nil + return ctx.Err() } func (c *Comparer) collectDir(ctx context.Context, blobs restic.BlobSet, id restic.ID) error { @@ -216,13 +216,13 @@ func (c *Comparer) collectDir(ctx context.Context, blobs restic.BlobSet, id rest if node.Type == "dir" { err := c.collectDir(ctx, blobs, *node.Subtree) - if err != nil { + if err != nil && err != context.Canceled { Warnf("error: %v\n", err) } } } - return nil + return ctx.Err() } func uniqueNodeNames(tree1, tree2 *restic.Tree) (tree1Nodes, tree2Nodes map[string]*restic.Node, uniqueNames []string) { @@ -316,7 +316,7 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref } else { err = c.diffTree(ctx, stats, name, *node1.Subtree, *node2.Subtree) } - if err != nil { + if err != nil && err != context.Canceled { Warnf("error: %v\n", err) } } @@ -330,7 +330,7 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref if node1.Type == "dir" { err := c.printDir(ctx, "-", &stats.Removed, stats.BlobsBefore, prefix, *node1.Subtree) - if err != nil { + if err != nil && err != context.Canceled { Warnf("error: %v\n", err) } } @@ -344,14 +344,14 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref if node2.Type == "dir" { err := c.printDir(ctx, "+", &stats.Added, stats.BlobsAfter, prefix, *node2.Subtree) - if err != nil { + if err != nil && err != context.Canceled { Warnf("error: %v\n", err) } } } } - return nil + return ctx.Err() } func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []string) error { From 7b423a0915ebed6e94862e6bc947ffb6d7add0aa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 01:11:48 +0000 Subject: [PATCH 505/893] build(deps): bump docker/login-action from 3.2.0 to 3.3.0 Bumps [docker/login-action](https://github.com/docker/login-action) from 3.2.0 to 3.3.0. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/0d4c9c5ea7693da7b068278f7b52bda2a190a446...9780b0c442fbb1117ed29e0efdff1e18412f7567) --- updated-dependencies: - dependency-name: docker/login-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a943d1b15ed..a24660b4553 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -25,7 +25,7 @@ jobs: uses: actions/checkout@v4 - name: Log in to the Container registry - uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} From db779195506b905cd2a5f447dc118698036d3a47 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 3 Aug 2024 17:22:56 +0200 Subject: [PATCH 506/893] migrate: show warning if migration is unknown --- cmd/restic/cmd_migrate.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index e8998005096..7e472ff121c 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -74,8 +74,10 @@ func checkMigrations(ctx context.Context, repo restic.Repository, printer progre func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, repo restic.Repository, args []string, term *termstatus.Terminal, printer progress.Printer) error { var firsterr error for _, name := range args { + found := false for _, m := range migrations.All { if m.Name() == name { + found = true ok, reason, err := m.Check(ctx, repo) if err != nil { return err @@ -119,6 +121,9 @@ func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptio printer.P("migration %v: success\n", m.Name()) } } + if !found { + printer.E("unknown migration %v", name) + } } return firsterr From b3bfb5ed44c255820989d621b429e62730ae5c66 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 3 Aug 2024 18:09:28 +0200 Subject: [PATCH 507/893] add changelog for long paths on windows --- changelog/unreleased/issue-4953 | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 changelog/unreleased/issue-4953 diff --git a/changelog/unreleased/issue-4953 b/changelog/unreleased/issue-4953 new file mode 100644 index 00000000000..78a266aff09 --- /dev/null +++ b/changelog/unreleased/issue-4953 @@ -0,0 +1,7 @@ +Bugfix: Correctly handle long paths on older Windows versions + +When using older Windows versions, like Windows Server 2012, restic 0.17.0 +failed to back up files with long paths. This has been fixed. + +https://github.com/restic/restic/issues/4953 +https://github.com/restic/restic/pull/4954 From 1c775feeccabf2373607703683b598beeb45f77b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 31 Jul 2024 20:49:44 +0200 Subject: [PATCH 508/893] add changelog for cancelation delay --- changelog/unreleased/issue-4957 | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 changelog/unreleased/issue-4957 diff --git a/changelog/unreleased/issue-4957 b/changelog/unreleased/issue-4957 new file mode 100644 index 00000000000..d18e28ec9b4 --- /dev/null +++ b/changelog/unreleased/issue-4957 @@ -0,0 +1,10 @@ +Bugfix: Fix delayed cancelation of some commands + +Since restic 0.17.0, some commands no longer promptly reacted to being canceled +via Ctrl-C (SIGINT) and continued to run for a limited amount of time. The most +affected commands were `diff`,`find`, `ls`, `stats` and `rewrite`. + +This has been fixed. + +https://github.com/restic/restic/issues/4957 +https://github.com/restic/restic/pull/4960 From 4266dca1b627776ad5a1540f9a7643a5ff722b65 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 3 Aug 2024 18:51:38 +0200 Subject: [PATCH 509/893] cache: fix confusing debug log --- internal/backend/cache/backend.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/backend/cache/backend.go b/internal/backend/cache/backend.go index 63bb6f85f0a..94f648cf4d5 100644 --- a/internal/backend/cache/backend.go +++ b/internal/backend/cache/backend.go @@ -162,7 +162,9 @@ func (b *Backend) Load(ctx context.Context, h backend.Handle, length int, offset // try loading from cache without checking that the handle is actually cached inCache, err := b.loadFromCache(h, length, offset, consumer) if inCache { - debug.Log("error loading %v from cache: %v", h, err) + if err != nil { + debug.Log("error loading %v from cache: %v", h, err) + } // the caller must explicitly use cache.Forget() to remove the cache entry return err } From 068d5b95c30b44311dd3a33e481cb930a9f70692 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 3 Aug 2024 23:34:59 +0530 Subject: [PATCH 510/893] rewrite: skip saving empty indexes during MasterIndex.Rewrite --- internal/repository/index/master_index.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/repository/index/master_index.go b/internal/repository/index/master_index.go index f8e776b2301..9b5c4f9f8b5 100644 --- a/internal/repository/index/master_index.go +++ b/internal/repository/index/master_index.go @@ -456,6 +456,9 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud worker := func() error { for idx := range saveCh { idx.Finalize() + if len(idx.packs) == 0 { + continue + } if _, err := idx.SaveIndex(wgCtx, repo); err != nil { return err } From f4bdfea1c9a9ff2e5e9d2bc48eda44afc58cf1b0 Mon Sep 17 00:00:00 2001 From: Michael Terry Date: Sat, 27 Jul 2024 19:00:43 -0400 Subject: [PATCH 511/893] backup: print scanner errors to stderr, not stdout --- internal/ui/backup/text.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/internal/ui/backup/text.go b/internal/ui/backup/text.go index f96746739e3..4e3f1aec46f 100644 --- a/internal/ui/backup/text.go +++ b/internal/ui/backup/text.go @@ -15,7 +15,8 @@ import ( type TextProgress struct { *ui.Message - term *termstatus.Terminal + term *termstatus.Terminal + verbosity uint } // assert that Backup implements the ProgressPrinter interface @@ -24,8 +25,9 @@ var _ ProgressPrinter = &TextProgress{} // NewTextProgress returns a new backup progress reporter. func NewTextProgress(term *termstatus.Terminal, verbosity uint) *TextProgress { return &TextProgress{ - Message: ui.NewMessage(term, verbosity), - term: term, + Message: ui.NewMessage(term, verbosity), + term: term, + verbosity: verbosity, } } @@ -73,7 +75,9 @@ func (b *TextProgress) Update(total, processed Counter, errors uint, currentFile // ScannerError is the error callback function for the scanner, it prints the // error in verbose mode and returns nil. func (b *TextProgress) ScannerError(_ string, err error) error { - b.V("scan: %v\n", err) + if b.verbosity >= 2 { + b.E("scan: %v\n", err) + } return nil } From ad2585af679a6c5a95056ede9555909588d24ebb Mon Sep 17 00:00:00 2001 From: Michael Terry Date: Sat, 27 Jul 2024 19:04:34 -0400 Subject: [PATCH 512/893] backup: show actual error strings in --json mode Previously, an error JSON fragment would look like: {"message_type": "error", "error": {}} This is because encoding/json cannot marshal an error interface. Instead, we now call .Error() to get the string value. --- internal/ui/backup/json.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index 64b5de13bd1..81bbd10478f 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -68,7 +68,7 @@ func (b *JSONProgress) Update(total, processed Counter, errors uint, currentFile func (b *JSONProgress) ScannerError(item string, err error) error { b.error(errorUpdate{ MessageType: "error", - Error: err, + Error: err.Error(), During: "scan", Item: item, }) @@ -79,7 +79,7 @@ func (b *JSONProgress) ScannerError(item string, err error) error { func (b *JSONProgress) Error(item string, err error) error { b.error(errorUpdate{ MessageType: "error", - Error: err, + Error: err.Error(), During: "archival", Item: item, }) @@ -208,7 +208,7 @@ type statusUpdate struct { type errorUpdate struct { MessageType string `json:"message_type"` // "error" - Error error `json:"error"` + Error string `json:"error"` During string `json:"during"` Item string `json:"item"` } From d8ea178e691145ba2e15406165594cfdf72181b7 Mon Sep 17 00:00:00 2001 From: textaligncenter <67056612+textaligncenter@users.noreply.github.com> Date: Sat, 3 Aug 2024 16:44:45 +0000 Subject: [PATCH 513/893] fix panic in fs_reader --- changelog/unreleased/issue-4975 | 6 ++++++ internal/fs/fs_reader_command.go | 4 ++++ internal/fs/fs_reader_command_test.go | 5 +++++ 3 files changed, 15 insertions(+) create mode 100644 changelog/unreleased/issue-4975 diff --git a/changelog/unreleased/issue-4975 b/changelog/unreleased/issue-4975 new file mode 100644 index 00000000000..0e29935f576 --- /dev/null +++ b/changelog/unreleased/issue-4975 @@ -0,0 +1,6 @@ +Bugfix: Prevent `backup --stdin-from-command` from panicking + +If --stdin-from-command is used, restic now checks whether there is a command behind it. + +https://github.com/restic/restic/issues/4975 +https://github.com/restic/restic/pull/4976 diff --git a/internal/fs/fs_reader_command.go b/internal/fs/fs_reader_command.go index 3830e5811c4..6d061f64123 100644 --- a/internal/fs/fs_reader_command.go +++ b/internal/fs/fs_reader_command.go @@ -29,6 +29,10 @@ type CommandReader struct { } func NewCommandReader(ctx context.Context, args []string, logOutput io.Writer) (*CommandReader, error) { + if len(args) == 0 { + return nil, fmt.Errorf("no command was specified as argument") + } + // Prepare command and stdout command := exec.CommandContext(ctx, args[0], args[1:]...) stdout, err := command.StdoutPipe() diff --git a/internal/fs/fs_reader_command_test.go b/internal/fs/fs_reader_command_test.go index a9028544c4c..8f0d17b1ea5 100644 --- a/internal/fs/fs_reader_command_test.go +++ b/internal/fs/fs_reader_command_test.go @@ -34,6 +34,11 @@ func TestCommandReaderInvalid(t *testing.T) { test.Assert(t, err != nil, "missing error") } +func TestCommandReaderEmptyArgs(t *testing.T) { + _, err := fs.NewCommandReader(context.TODO(), []string{}, io.Discard) + test.Assert(t, err != nil, "missing error") +} + func TestCommandReaderOutput(t *testing.T) { reader, err := fs.NewCommandReader(context.TODO(), []string{"echo", "hello world"}, io.Discard) test.OK(t, err) From a376323331574a4f5dd00177d6ac2c8e5f7d760c Mon Sep 17 00:00:00 2001 From: Michael Terry Date: Sat, 27 Jul 2024 19:06:26 -0400 Subject: [PATCH 514/893] restore: print JSON versions of errors in --json mode Previously, they were printed as freeform text. This also adds a ui.Terminal interface to make writing tests easier and also adds a few tests. --- changelog/unreleased/issue-4944 | 8 ++++ changelog/unreleased/issue-4945 | 8 ++++ cmd/restic/cmd_restore.go | 3 +- internal/restorer/restorer_test.go | 3 ++ internal/ui/backup/json.go | 5 +-- internal/ui/backup/json_test.go | 27 ++++++++++++++ internal/ui/backup/text.go | 4 +- internal/ui/backup/text_test.go | 27 ++++++++++++++ internal/ui/message.go | 6 +-- internal/ui/mock.go | 22 +++++++++++ internal/ui/restore/json.go | 25 ++++++++++++- internal/ui/restore/json_test.go | 24 ++++++++---- internal/ui/restore/progress.go | 17 ++++++--- internal/ui/restore/progress_test.go | 55 +++++++++++++++++++++------- internal/ui/restore/text.go | 24 ++++++------ internal/ui/restore/text_test.go | 36 ++++++++---------- internal/ui/terminal.go | 10 +++++ 17 files changed, 234 insertions(+), 70 deletions(-) create mode 100644 changelog/unreleased/issue-4944 create mode 100644 changelog/unreleased/issue-4945 create mode 100644 internal/ui/backup/json_test.go create mode 100644 internal/ui/backup/text_test.go create mode 100644 internal/ui/mock.go create mode 100644 internal/ui/terminal.go diff --git a/changelog/unreleased/issue-4944 b/changelog/unreleased/issue-4944 new file mode 100644 index 00000000000..02f5ae3418c --- /dev/null +++ b/changelog/unreleased/issue-4944 @@ -0,0 +1,8 @@ +Enhancement: Print JSON-formatted errors during `restore --json` + +Restic printed any restore errors directly to the console as freeform +text messages, even with `--json`. Restic now prints them as JSON formatted +messages when `--json` is passed. + +https://github.com/restic/restic/issues/4944 +https://github.com/restic/restic/pull/4946 diff --git a/changelog/unreleased/issue-4945 b/changelog/unreleased/issue-4945 new file mode 100644 index 00000000000..7bbf69fac2b --- /dev/null +++ b/changelog/unreleased/issue-4945 @@ -0,0 +1,8 @@ +Bugfix: Include missing backup error text with `--json` + +Restic was not actually providing the text of an error message during +backup if `--json` was passed, instead only printing `"error": {}`. +Restic now includes the error text in JSON output. + +https://github.com/restic/restic/issues/4945 +https://github.com/restic/restic/pull/4946 diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 89942f4cf93..eda608802d4 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -164,9 +164,8 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, totalErrors := 0 res.Error = func(location string, err error) error { - msg.E("ignoring error for %s: %s\n", location, err) totalErrors++ - return nil + return progress.Error(location, err) } res.Warn = func(message string) { msg.E("Warning: %s\n", message) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 191f3b8ef48..a6de50556b1 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -989,6 +989,9 @@ type printerMock struct { func (p *printerMock) Update(_ restoreui.State, _ time.Duration) { } +func (p *printerMock) Error(item string, err error) error { + return nil +} func (p *printerMock) CompleteItem(action restoreui.ItemAction, item string, size uint64) { } func (p *printerMock) Finish(s restoreui.State, _ time.Duration) { diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index 81bbd10478f..bb6685136f8 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -7,14 +7,13 @@ import ( "github.com/restic/restic/internal/archiver" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" - "github.com/restic/restic/internal/ui/termstatus" ) // JSONProgress reports progress for the `backup` command in JSON. type JSONProgress struct { *ui.Message - term *termstatus.Terminal + term ui.Terminal v uint } @@ -22,7 +21,7 @@ type JSONProgress struct { var _ ProgressPrinter = &JSONProgress{} // NewJSONProgress returns a new backup progress reporter. -func NewJSONProgress(term *termstatus.Terminal, verbosity uint) *JSONProgress { +func NewJSONProgress(term ui.Terminal, verbosity uint) *JSONProgress { return &JSONProgress{ Message: ui.NewMessage(term, verbosity), term: term, diff --git a/internal/ui/backup/json_test.go b/internal/ui/backup/json_test.go new file mode 100644 index 00000000000..4846279b38b --- /dev/null +++ b/internal/ui/backup/json_test.go @@ -0,0 +1,27 @@ +package backup + +import ( + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui" +) + +func createJSONProgress() (*ui.MockTerminal, ProgressPrinter) { + term := &ui.MockTerminal{} + printer := NewJSONProgress(term, 3) + return term, printer +} + +func TestJSONError(t *testing.T) { + term, printer := createJSONProgress() + test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil) + test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":\"error \\\"message\\\"\",\"during\":\"archival\",\"item\":\"/path\"}\n"}, term.Errors) +} + +func TestJSONScannerError(t *testing.T) { + term, printer := createJSONProgress() + test.Equals(t, printer.ScannerError("/path", errors.New("error \"message\"")), nil) + test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":\"error \\\"message\\\"\",\"during\":\"scan\",\"item\":\"/path\"}\n"}, term.Errors) +} diff --git a/internal/ui/backup/text.go b/internal/ui/backup/text.go index 4e3f1aec46f..097f0d0d818 100644 --- a/internal/ui/backup/text.go +++ b/internal/ui/backup/text.go @@ -15,7 +15,7 @@ import ( type TextProgress struct { *ui.Message - term *termstatus.Terminal + term ui.Terminal verbosity uint } @@ -23,7 +23,7 @@ type TextProgress struct { var _ ProgressPrinter = &TextProgress{} // NewTextProgress returns a new backup progress reporter. -func NewTextProgress(term *termstatus.Terminal, verbosity uint) *TextProgress { +func NewTextProgress(term ui.Terminal, verbosity uint) *TextProgress { return &TextProgress{ Message: ui.NewMessage(term, verbosity), term: term, diff --git a/internal/ui/backup/text_test.go b/internal/ui/backup/text_test.go new file mode 100644 index 00000000000..39338a50cf7 --- /dev/null +++ b/internal/ui/backup/text_test.go @@ -0,0 +1,27 @@ +package backup + +import ( + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui" +) + +func createTextProgress() (*ui.MockTerminal, ProgressPrinter) { + term := &ui.MockTerminal{} + printer := NewTextProgress(term, 3) + return term, printer +} + +func TestError(t *testing.T) { + term, printer := createTextProgress() + test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil) + test.Equals(t, []string{"error: error \"message\"\n"}, term.Errors) +} + +func TestScannerError(t *testing.T) { + term, printer := createTextProgress() + test.Equals(t, printer.ScannerError("/path", errors.New("error \"message\"")), nil) + test.Equals(t, []string{"scan: error \"message\"\n"}, term.Errors) +} diff --git a/internal/ui/message.go b/internal/ui/message.go index 38cdaf30117..6ad5a439e67 100644 --- a/internal/ui/message.go +++ b/internal/ui/message.go @@ -2,19 +2,17 @@ package ui import ( "fmt" - - "github.com/restic/restic/internal/ui/termstatus" ) // Message reports progress with messages of different verbosity. type Message struct { - term *termstatus.Terminal + term Terminal v uint } // NewMessage returns a message progress reporter with underlying terminal // term. -func NewMessage(term *termstatus.Terminal, verbosity uint) *Message { +func NewMessage(term Terminal, verbosity uint) *Message { return &Message{ term: term, v: verbosity, diff --git a/internal/ui/mock.go b/internal/ui/mock.go new file mode 100644 index 00000000000..5a4debb024d --- /dev/null +++ b/internal/ui/mock.go @@ -0,0 +1,22 @@ +package ui + +type MockTerminal struct { + Output []string + Errors []string +} + +func (m *MockTerminal) Print(line string) { + m.Output = append(m.Output, line) +} + +func (m *MockTerminal) Error(line string) { + m.Errors = append(m.Errors, line) +} + +func (m *MockTerminal) SetStatus(lines []string) { + m.Output = append([]string{}, lines...) +} + +func (m *MockTerminal) CanUpdateStatus() bool { + return true +} diff --git a/internal/ui/restore/json.go b/internal/ui/restore/json.go index c248a7951e6..4135dd6675e 100644 --- a/internal/ui/restore/json.go +++ b/internal/ui/restore/json.go @@ -7,11 +7,11 @@ import ( ) type jsonPrinter struct { - terminal term + terminal ui.Terminal verbosity uint } -func NewJSONProgress(terminal term, verbosity uint) ProgressPrinter { +func NewJSONProgress(terminal ui.Terminal, verbosity uint) ProgressPrinter { return &jsonPrinter{ terminal: terminal, verbosity: verbosity, @@ -22,6 +22,10 @@ func (t *jsonPrinter) print(status interface{}) { t.terminal.Print(ui.ToJSONString(status)) } +func (t *jsonPrinter) error(status interface{}) { + t.terminal.Error(ui.ToJSONString(status)) +} + func (t *jsonPrinter) Update(p State, duration time.Duration) { status := statusUpdate{ MessageType: "status", @@ -41,6 +45,16 @@ func (t *jsonPrinter) Update(p State, duration time.Duration) { t.print(status) } +func (t *jsonPrinter) Error(item string, err error) error { + t.error(errorUpdate{ + MessageType: "error", + Error: err.Error(), + During: "restore", + Item: item, + }) + return nil +} + func (t *jsonPrinter) CompleteItem(messageType ItemAction, item string, size uint64) { if t.verbosity < 3 { return @@ -99,6 +113,13 @@ type statusUpdate struct { BytesSkipped uint64 `json:"bytes_skipped,omitempty"` } +type errorUpdate struct { + MessageType string `json:"message_type"` // "error" + Error string `json:"error"` + During string `json:"during"` + Item string `json:"item"` +} + type verboseUpdate struct { MessageType string `json:"message_type"` // "verbose_status" Action string `json:"action"` diff --git a/internal/ui/restore/json_test.go b/internal/ui/restore/json_test.go index 06a70d5dcd4..1e0f80a4f7f 100644 --- a/internal/ui/restore/json_test.go +++ b/internal/ui/restore/json_test.go @@ -4,11 +4,13 @@ import ( "testing" "time" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui" ) -func createJSONProgress() (*mockTerm, ProgressPrinter) { - term := &mockTerm{} +func createJSONProgress() (*ui.MockTerminal, ProgressPrinter) { + term := &ui.MockTerminal{} printer := NewJSONProgress(term, 3) return term, printer } @@ -16,31 +18,31 @@ func createJSONProgress() (*mockTerm, ProgressPrinter) { func TestJSONPrintUpdate(t *testing.T) { term, printer := createJSONProgress() printer.Update(State{3, 11, 0, 29, 47, 0}, 5*time.Second) - test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.output) + test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.Output) } func TestJSONPrintUpdateWithSkipped(t *testing.T) { term, printer := createJSONProgress() printer.Update(State{3, 11, 2, 29, 47, 59}, 5*time.Second) - test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":29,\"bytes_skipped\":59}\n"}, term.output) + test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":29,\"bytes_skipped\":59}\n"}, term.Output) } func TestJSONPrintSummaryOnSuccess(t *testing.T) { term, printer := createJSONProgress() printer.Finish(State{11, 11, 0, 47, 47, 0}, 5*time.Second) - test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"total_bytes\":47,\"bytes_restored\":47}\n"}, term.output) + test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"total_bytes\":47,\"bytes_restored\":47}\n"}, term.Output) } func TestJSONPrintSummaryOnErrors(t *testing.T) { term, printer := createJSONProgress() printer.Finish(State{3, 11, 0, 29, 47, 0}, 5*time.Second) - test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.output) + test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.Output) } func TestJSONPrintSummaryOnSuccessWithSkipped(t *testing.T) { term, printer := createJSONProgress() printer.Finish(State{11, 11, 2, 47, 47, 59}, 5*time.Second) - test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":47,\"bytes_skipped\":59}\n"}, term.output) + test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":47,\"bytes_skipped\":59}\n"}, term.Output) } func TestJSONPrintCompleteItem(t *testing.T) { @@ -57,6 +59,12 @@ func TestJSONPrintCompleteItem(t *testing.T) { } { term, printer := createJSONProgress() printer.CompleteItem(data.action, "test", data.size) - test.Equals(t, []string{data.expected}, term.output) + test.Equals(t, []string{data.expected}, term.Output) } } + +func TestJSONError(t *testing.T) { + term, printer := createJSONProgress() + test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil) + test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":\"error \\\"message\\\"\",\"during\":\"restore\",\"item\":\"/path\"}\n"}, term.Errors) +} diff --git a/internal/ui/restore/progress.go b/internal/ui/restore/progress.go index 67b15f07e74..06f4c86aa11 100644 --- a/internal/ui/restore/progress.go +++ b/internal/ui/restore/progress.go @@ -32,13 +32,9 @@ type progressInfoEntry struct { bytesTotal uint64 } -type term interface { - Print(line string) - SetStatus(lines []string) -} - type ProgressPrinter interface { Update(progress State, duration time.Duration) + Error(item string, err error) error CompleteItem(action ItemAction, item string, size uint64) Finish(progress State, duration time.Duration) } @@ -139,6 +135,17 @@ func (p *Progress) ReportDeletedFile(name string) { p.printer.CompleteItem(ActionDeleted, name, 0) } +func (p *Progress) Error(item string, err error) error { + if p == nil { + return nil + } + + p.m.Lock() + defer p.m.Unlock() + + return p.printer.Error(item, err) +} + func (p *Progress) Finish() { p.updater.Done() } diff --git a/internal/ui/restore/progress_test.go b/internal/ui/restore/progress_test.go index 4a630474159..b01440bee91 100644 --- a/internal/ui/restore/progress_test.go +++ b/internal/ui/restore/progress_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/test" ) @@ -23,9 +24,18 @@ type itemTraceEntry struct { } type itemTrace []itemTraceEntry + +type errorTraceEntry struct { + item string + err error +} + +type errorTrace []errorTraceEntry + type mockPrinter struct { - trace printerTrace - items itemTrace + trace printerTrace + items itemTrace + errors errorTrace } const mockFinishDuration = 42 * time.Second @@ -33,6 +43,10 @@ const mockFinishDuration = 42 * time.Second func (p *mockPrinter) Update(progress State, duration time.Duration) { p.trace = append(p.trace, printerTraceEntry{progress, duration, false}) } +func (p *mockPrinter) Error(item string, err error) error { + p.errors = append(p.errors, errorTraceEntry{item, err}) + return nil +} func (p *mockPrinter) CompleteItem(action ItemAction, item string, size uint64) { p.items = append(p.items, itemTraceEntry{action, item, size}) } @@ -40,20 +54,21 @@ func (p *mockPrinter) Finish(progress State, _ time.Duration) { p.trace = append(p.trace, printerTraceEntry{progress, mockFinishDuration, true}) } -func testProgress(fn func(progress *Progress) bool) (printerTrace, itemTrace) { +func testProgress(fn func(progress *Progress) bool) (printerTrace, itemTrace, errorTrace) { printer := &mockPrinter{} progress := NewProgress(printer, 0) final := fn(progress) progress.update(0, final) trace := append(printerTrace{}, printer.trace...) items := append(itemTrace{}, printer.items...) + errors := append(errorTrace{}, printer.errors...) // cleanup to avoid goroutine leak, but copy trace first progress.Finish() - return trace, items + return trace, items, errors } func TestNew(t *testing.T) { - result, items := testProgress(func(progress *Progress) bool { + result, items, _ := testProgress(func(progress *Progress) bool { return false }) test.Equals(t, printerTrace{ @@ -65,7 +80,7 @@ func TestNew(t *testing.T) { func TestAddFile(t *testing.T) { fileSize := uint64(100) - result, items := testProgress(func(progress *Progress) bool { + result, items, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) return false }) @@ -79,7 +94,7 @@ func TestFirstProgressOnAFile(t *testing.T) { expectedBytesWritten := uint64(5) expectedBytesTotal := uint64(100) - result, items := testProgress(func(progress *Progress) bool { + result, items, _ := testProgress(func(progress *Progress) bool { progress.AddFile(expectedBytesTotal) progress.AddProgress("test", ActionFileUpdated, expectedBytesWritten, expectedBytesTotal) return false @@ -93,7 +108,7 @@ func TestFirstProgressOnAFile(t *testing.T) { func TestLastProgressOnAFile(t *testing.T) { fileSize := uint64(100) - result, items := testProgress(func(progress *Progress) bool { + result, items, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddProgress("test", ActionFileUpdated, 30, fileSize) progress.AddProgress("test", ActionFileUpdated, 35, fileSize) @@ -111,7 +126,7 @@ func TestLastProgressOnAFile(t *testing.T) { func TestLastProgressOnLastFile(t *testing.T) { fileSize := uint64(100) - result, items := testProgress(func(progress *Progress) bool { + result, items, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(50) progress.AddProgress("test1", ActionFileUpdated, 50, 50) @@ -131,7 +146,7 @@ func TestLastProgressOnLastFile(t *testing.T) { func TestSummaryOnSuccess(t *testing.T) { fileSize := uint64(100) - result, _ := testProgress(func(progress *Progress) bool { + result, _, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(50) progress.AddProgress("test1", ActionFileUpdated, 50, 50) @@ -146,7 +161,7 @@ func TestSummaryOnSuccess(t *testing.T) { func TestSummaryOnErrors(t *testing.T) { fileSize := uint64(100) - result, _ := testProgress(func(progress *Progress) bool { + result, _, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(50) progress.AddProgress("test1", ActionFileUpdated, 50, 50) @@ -161,7 +176,7 @@ func TestSummaryOnErrors(t *testing.T) { func TestSkipFile(t *testing.T) { fileSize := uint64(100) - result, items := testProgress(func(progress *Progress) bool { + result, items, _ := testProgress(func(progress *Progress) bool { progress.AddSkippedFile("test", fileSize) return true }) @@ -176,7 +191,7 @@ func TestSkipFile(t *testing.T) { func TestProgressTypes(t *testing.T) { fileSize := uint64(100) - _, items := testProgress(func(progress *Progress) bool { + _, items, _ := testProgress(func(progress *Progress) bool { progress.AddFile(fileSize) progress.AddFile(0) progress.AddProgress("dir", ActionDirRestored, fileSize, fileSize) @@ -190,3 +205,17 @@ func TestProgressTypes(t *testing.T) { itemTraceEntry{ActionDeleted, "del", 0}, }, items) } + +func TestProgressError(t *testing.T) { + err1 := errors.New("err1") + err2 := errors.New("err2") + _, _, errors := testProgress(func(progress *Progress) bool { + test.Equals(t, progress.Error("first", err1), nil) + test.Equals(t, progress.Error("second", err2), nil) + return true + }) + test.Equals(t, errorTrace{ + errorTraceEntry{"first", err1}, + errorTraceEntry{"second", err2}, + }, errors) +} diff --git a/internal/ui/restore/text.go b/internal/ui/restore/text.go index ec512f369dd..ba0dcd00743 100644 --- a/internal/ui/restore/text.go +++ b/internal/ui/restore/text.go @@ -8,14 +8,15 @@ import ( ) type textPrinter struct { - terminal term - verbosity uint + *ui.Message + + terminal ui.Terminal } -func NewTextProgress(terminal term, verbosity uint) ProgressPrinter { +func NewTextProgress(terminal ui.Terminal, verbosity uint) ProgressPrinter { return &textPrinter{ - terminal: terminal, - verbosity: verbosity, + Message: ui.NewMessage(terminal, verbosity), + terminal: terminal, } } @@ -33,11 +34,12 @@ func (t *textPrinter) Update(p State, duration time.Duration) { t.terminal.SetStatus([]string{progress}) } -func (t *textPrinter) CompleteItem(messageType ItemAction, item string, size uint64) { - if t.verbosity < 3 { - return - } +func (t *textPrinter) Error(item string, err error) error { + t.E("ignoring error for %s: %s\n", item, err) + return nil +} +func (t *textPrinter) CompleteItem(messageType ItemAction, item string, size uint64) { var action string switch messageType { case ActionDirRestored: @@ -57,9 +59,9 @@ func (t *textPrinter) CompleteItem(messageType ItemAction, item string, size uin } if messageType == ActionDirRestored || messageType == ActionOtherRestored || messageType == ActionDeleted { - t.terminal.Print(fmt.Sprintf("%-9v %v", action, item)) + t.VV("%-9v %v", action, item) } else { - t.terminal.Print(fmt.Sprintf("%-9v %v with size %v", action, item, ui.FormatBytes(size))) + t.VV("%-9v %v with size %v", action, item, ui.FormatBytes(size)) } } diff --git a/internal/ui/restore/text_test.go b/internal/ui/restore/text_test.go index b198a27df89..4ffb1615d83 100644 --- a/internal/ui/restore/text_test.go +++ b/internal/ui/restore/text_test.go @@ -4,23 +4,13 @@ import ( "testing" "time" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui" ) -type mockTerm struct { - output []string -} - -func (m *mockTerm) Print(line string) { - m.output = append(m.output, line) -} - -func (m *mockTerm) SetStatus(lines []string) { - m.output = append([]string{}, lines...) -} - -func createTextProgress() (*mockTerm, ProgressPrinter) { - term := &mockTerm{} +func createTextProgress() (*ui.MockTerminal, ProgressPrinter) { + term := &ui.MockTerminal{} printer := NewTextProgress(term, 3) return term, printer } @@ -28,31 +18,31 @@ func createTextProgress() (*mockTerm, ProgressPrinter) { func TestPrintUpdate(t *testing.T) { term, printer := createTextProgress() printer.Update(State{3, 11, 0, 29, 47, 0}, 5*time.Second) - test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B"}, term.output) + test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B"}, term.Output) } func TestPrintUpdateWithSkipped(t *testing.T) { term, printer := createTextProgress() printer.Update(State{3, 11, 2, 29, 47, 59}, 5*time.Second) - test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B, skipped 2 files/dirs 59 B"}, term.output) + test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B, skipped 2 files/dirs 59 B"}, term.Output) } func TestPrintSummaryOnSuccess(t *testing.T) { term, printer := createTextProgress() printer.Finish(State{11, 11, 0, 47, 47, 0}, 5*time.Second) - test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05"}, term.output) + test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05"}, term.Output) } func TestPrintSummaryOnErrors(t *testing.T) { term, printer := createTextProgress() printer.Finish(State{3, 11, 0, 29, 47, 0}, 5*time.Second) - test.Equals(t, []string{"Summary: Restored 3 / 11 files/dirs (29 B / 47 B) in 0:05"}, term.output) + test.Equals(t, []string{"Summary: Restored 3 / 11 files/dirs (29 B / 47 B) in 0:05"}, term.Output) } func TestPrintSummaryOnSuccessWithSkipped(t *testing.T) { term, printer := createTextProgress() printer.Finish(State{11, 11, 2, 47, 47, 59}, 5*time.Second) - test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05, skipped 2 files/dirs 59 B"}, term.output) + test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05, skipped 2 files/dirs 59 B"}, term.Output) } func TestPrintCompleteItem(t *testing.T) { @@ -70,6 +60,12 @@ func TestPrintCompleteItem(t *testing.T) { } { term, printer := createTextProgress() printer.CompleteItem(data.action, "test", data.size) - test.Equals(t, []string{data.expected}, term.output) + test.Equals(t, []string{data.expected}, term.Output) } } + +func TestError(t *testing.T) { + term, printer := createTextProgress() + test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil) + test.Equals(t, []string{"ignoring error for /path: error \"message\"\n"}, term.Errors) +} diff --git a/internal/ui/terminal.go b/internal/ui/terminal.go new file mode 100644 index 00000000000..2d9418a61f0 --- /dev/null +++ b/internal/ui/terminal.go @@ -0,0 +1,10 @@ +package ui + +// Terminal is used to write messages and display status lines which can be +// updated. See termstatus.Terminal for a concrete implementation. +type Terminal interface { + Print(line string) + Error(line string) + SetStatus(lines []string) + CanUpdateStatus() bool +} From 03be64a094c88284ee3bdf36cc2cd382e820517f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 3 Aug 2024 22:17:07 +0200 Subject: [PATCH 515/893] mount: enable debug log for TestMountSameTimestamps The test has started to become flaky. --- cmd/restic/cmd_mount_integration_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmd/restic/cmd_mount_integration_test.go b/cmd/restic/cmd_mount_integration_test.go index d764b4e4f0f..c5f4d193a26 100644 --- a/cmd/restic/cmd_mount_integration_test.go +++ b/cmd/restic/cmd_mount_integration_test.go @@ -13,6 +13,7 @@ import ( "time" systemFuse "github.com/anacrolix/fuse" + "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -205,6 +206,11 @@ func TestMountSameTimestamps(t *testing.T) { t.Skip("Skipping fuse tests") } + debugEnabled := debug.TestLogToStderr(t) + if debugEnabled { + defer debug.TestDisableLog(t) + } + env, cleanup := withTestEnvironment(t) // must list snapshots more than once env.gopts.backendTestHook = nil From 88f59fc2d6d40b71e74d8566117d496a9f949203 Mon Sep 17 00:00:00 2001 From: Michael Terry Date: Sat, 3 Aug 2024 15:29:10 -0400 Subject: [PATCH 516/893] json: switch backup and restore errors from string to struct types This keeps backwards compatibility with the previous empty structs. And maybe we'd want to put other fields into the inner struct later, rather than the outer message. --- doc/075_scripting.rst | 15 ++++++++++++++- internal/ui/backup/json.go | 16 ++++++++++------ internal/ui/backup/json_test.go | 4 ++-- internal/ui/restore/json.go | 14 +++++++++----- internal/ui/restore/json_test.go | 2 +- 5 files changed, 36 insertions(+), 15 deletions(-) diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 87ae4fcf46f..fa7fa1b6ec1 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -139,7 +139,7 @@ Error +----------------------+-------------------------------------------+ | ``message_type`` | Always "error" | +----------------------+-------------------------------------------+ -| ``error`` | Error message | +| ``error.message`` | Error message | +----------------------+-------------------------------------------+ | ``during`` | What restic was trying to do | +----------------------+-------------------------------------------+ @@ -539,6 +539,19 @@ Status |``bytes_skipped`` | Total size of skipped files | +----------------------+------------------------------------------------------------+ +Error +^^^^^ + ++----------------------+-------------------------------------------+ +| ``message_type`` | Always "error" | ++----------------------+-------------------------------------------+ +| ``error.message`` | Error message | ++----------------------+-------------------------------------------+ +| ``during`` | Always "restore" | ++----------------------+-------------------------------------------+ +| ``item`` | Usually, the path of the problematic file | ++----------------------+-------------------------------------------+ + Verbose Status ^^^^^^^^^^^^^^ diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index bb6685136f8..f4a76afd7a6 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -67,7 +67,7 @@ func (b *JSONProgress) Update(total, processed Counter, errors uint, currentFile func (b *JSONProgress) ScannerError(item string, err error) error { b.error(errorUpdate{ MessageType: "error", - Error: err.Error(), + Error: errorObject{err.Error()}, During: "scan", Item: item, }) @@ -78,7 +78,7 @@ func (b *JSONProgress) ScannerError(item string, err error) error { func (b *JSONProgress) Error(item string, err error) error { b.error(errorUpdate{ MessageType: "error", - Error: err.Error(), + Error: errorObject{err.Error()}, During: "archival", Item: item, }) @@ -205,11 +205,15 @@ type statusUpdate struct { CurrentFiles []string `json:"current_files,omitempty"` } +type errorObject struct { + Message string `json:"message"` +} + type errorUpdate struct { - MessageType string `json:"message_type"` // "error" - Error string `json:"error"` - During string `json:"during"` - Item string `json:"item"` + MessageType string `json:"message_type"` // "error" + Error errorObject `json:"error"` + During string `json:"during"` + Item string `json:"item"` } type verboseUpdate struct { diff --git a/internal/ui/backup/json_test.go b/internal/ui/backup/json_test.go index 4846279b38b..b4872efd51f 100644 --- a/internal/ui/backup/json_test.go +++ b/internal/ui/backup/json_test.go @@ -17,11 +17,11 @@ func createJSONProgress() (*ui.MockTerminal, ProgressPrinter) { func TestJSONError(t *testing.T) { term, printer := createJSONProgress() test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil) - test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":\"error \\\"message\\\"\",\"during\":\"archival\",\"item\":\"/path\"}\n"}, term.Errors) + test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":{\"message\":\"error \\\"message\\\"\"},\"during\":\"archival\",\"item\":\"/path\"}\n"}, term.Errors) } func TestJSONScannerError(t *testing.T) { term, printer := createJSONProgress() test.Equals(t, printer.ScannerError("/path", errors.New("error \"message\"")), nil) - test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":\"error \\\"message\\\"\",\"during\":\"scan\",\"item\":\"/path\"}\n"}, term.Errors) + test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":{\"message\":\"error \\\"message\\\"\"},\"during\":\"scan\",\"item\":\"/path\"}\n"}, term.Errors) } diff --git a/internal/ui/restore/json.go b/internal/ui/restore/json.go index 4135dd6675e..72cc38a6e13 100644 --- a/internal/ui/restore/json.go +++ b/internal/ui/restore/json.go @@ -48,7 +48,7 @@ func (t *jsonPrinter) Update(p State, duration time.Duration) { func (t *jsonPrinter) Error(item string, err error) error { t.error(errorUpdate{ MessageType: "error", - Error: err.Error(), + Error: errorObject{err.Error()}, During: "restore", Item: item, }) @@ -113,11 +113,15 @@ type statusUpdate struct { BytesSkipped uint64 `json:"bytes_skipped,omitempty"` } +type errorObject struct { + Message string `json:"message"` +} + type errorUpdate struct { - MessageType string `json:"message_type"` // "error" - Error string `json:"error"` - During string `json:"during"` - Item string `json:"item"` + MessageType string `json:"message_type"` // "error" + Error errorObject `json:"error"` + During string `json:"during"` + Item string `json:"item"` } type verboseUpdate struct { diff --git a/internal/ui/restore/json_test.go b/internal/ui/restore/json_test.go index 1e0f80a4f7f..917a48070dd 100644 --- a/internal/ui/restore/json_test.go +++ b/internal/ui/restore/json_test.go @@ -66,5 +66,5 @@ func TestJSONPrintCompleteItem(t *testing.T) { func TestJSONError(t *testing.T) { term, printer := createJSONProgress() test.Equals(t, printer.Error("/path", errors.New("error \"message\"")), nil) - test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":\"error \\\"message\\\"\",\"during\":\"restore\",\"item\":\"/path\"}\n"}, term.Errors) + test.Equals(t, []string{"{\"message_type\":\"error\",\"error\":{\"message\":\"error \\\"message\\\"\"},\"during\":\"restore\",\"item\":\"/path\"}\n"}, term.Errors) } From 73c9780321ebc8024b1df3d295818af3fbe79de7 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 3 Aug 2024 19:10:11 +0200 Subject: [PATCH 517/893] backup: store but warn if extended metadata for item is incomplete Files were not included in the backup if the extended metadata for the file could not be read. This is rather drastic. Instead settle on returning a warning but still including the file in the backup. --- changelog/unreleased/pull-4977 | 15 +++++++++++++++ internal/archiver/archiver.go | 3 ++- internal/archiver/archiver_test.go | 26 ++++++++++++++++++++++++++ 3 files changed, 43 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/pull-4977 diff --git a/changelog/unreleased/pull-4977 b/changelog/unreleased/pull-4977 new file mode 100644 index 00000000000..702df29a7c4 --- /dev/null +++ b/changelog/unreleased/pull-4977 @@ -0,0 +1,15 @@ +Change: let `backup` store files with incomplete metadata + +If restic failed to read the extended metadata for a file or folder while +creating a backup, then the file or folder was not included in the resulting +snapshot. Instead, only a warning message was printed along with exiting +with exit code 3. + +Now, restic also includes items for which the extended metadata could not +be read in a snapshot. The warning message has been changed to read +``` +incomplete metadata for /path/to/file: details on error +``` + +https://github.com/restic/restic/issues/4953 +https://github.com/restic/restic/pull/4977 diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index d9f089e81a1..e441512982d 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -263,7 +263,8 @@ func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, // overwrite name to match that within the snapshot node.Name = path.Base(snPath) if err != nil { - return node, fmt.Errorf("nodeFromFileInfo %v: %w", filename, err) + err = fmt.Errorf("incomplete metadata for %v: %w", filename, err) + return node, arch.error(filename, err) } return node, err } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index f38d5b0de3b..b519387db4e 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -3,6 +3,7 @@ package archiver import ( "bytes" "context" + "fmt" "io" "os" "path/filepath" @@ -2338,3 +2339,28 @@ func TestRacyFileSwap(t *testing.T) { t.Errorf("Save() excluded the node, that's unexpected") } } + +func TestMetadataBackupErrorFiltering(t *testing.T) { + tempdir := t.TempDir() + repo := repository.TestRepository(t) + + filename := filepath.Join(tempdir, "file") + rtest.OK(t, os.WriteFile(filename, []byte("example"), 0o600)) + fi, err := os.Stat(filename) + rtest.OK(t, err) + + arch := New(repo, fs.Local{}, Options{}) + + var filteredErr error + replacementErr := fmt.Errorf("replacement") + arch.Error = func(item string, err error) error { + filteredErr = err + return replacementErr + } + + // check that errors from reading extended metadata are properly filtered + node, err := arch.nodeFromFileInfo("file", filename+"invalid", fi, false) + rtest.Assert(t, node != nil, "node is missing") + rtest.Assert(t, err == replacementErr, "expected %v got %v", replacementErr, err) + rtest.Assert(t, filteredErr != nil, "missing inner error") +} From 56113a8da7000d3264f014cd33bb1bceaff225e4 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sat, 3 Aug 2024 16:03:30 -0600 Subject: [PATCH 518/893] Skip EA processing for volumes that do not support EA --- internal/fs/ea_windows.go | 40 +++++++++++++++++++++++++++++++++ internal/restic/node_windows.go | 20 +++++++++++++++-- 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/internal/fs/ea_windows.go b/internal/fs/ea_windows.go index 08466c33f0e..e69f595a80f 100644 --- a/internal/fs/ea_windows.go +++ b/internal/fs/ea_windows.go @@ -53,6 +53,15 @@ var ( errInvalidEaBuffer = errors.New("invalid extended attribute buffer") errEaNameTooLarge = errors.New("extended attribute name too large") errEaValueTooLarge = errors.New("extended attribute value too large") + + kernel32dll = syscall.NewLazyDLL("kernel32.dll") + + procGetVolumeInformationW = kernel32dll.NewProc("GetVolumeInformationW") +) + +const ( + // fileSupportsExtendedAttributes is a bitmask that indicates whether the file system supports extended attributes. + fileSupportsExtendedAttributes = 0x00000004 ) // ExtendedAttribute represents a single Windows EA. @@ -283,3 +292,34 @@ func setFileEA(handle windows.Handle, iosb *ioStatusBlock, buf *uint8, bufLen ui status = ntStatus(r0) return } + +// PathSupportsExtendedAttributes returns true if the path supports extended attributes. +func PathSupportsExtendedAttributes(path string) (bool, error) { + var ( + volumeName [syscall.MAX_PATH + 1]uint16 + fsName [syscall.MAX_PATH + 1]uint16 + volumeSerial uint32 + maxComponentLen uint32 + fileSystemFlags uint32 + ) + utf16Path, err := windows.UTF16PtrFromString(path) + if err != nil { + return false, err + } + ret, _, err := procGetVolumeInformationW.Call( + uintptr(unsafe.Pointer(utf16Path)), + uintptr(unsafe.Pointer(&volumeName[0])), + uintptr(len(volumeName)), + uintptr(unsafe.Pointer(&volumeSerial)), + uintptr(unsafe.Pointer(&maxComponentLen)), + uintptr(unsafe.Pointer(&fileSystemFlags)), + uintptr(unsafe.Pointer(&fsName[0])), + uintptr(len(fsName)), + ) + if ret == 0 { + return false, err + } + + supportsEAs := (fileSystemFlags & fileSupportsExtendedAttributes) != 0 + return supportsEAs, nil +} diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 2785e041230..ea167b96de1 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -32,6 +32,9 @@ var ( modAdvapi32 = syscall.NewLazyDLL("advapi32.dll") procEncryptFile = modAdvapi32.NewProc("EncryptFileW") procDecryptFile = modAdvapi32.NewProc("DecryptFileW") + + // eaUnsupportedVolumesMap is a map of volumes that do not support extended attributes. + eaUnsupportedVolumesMap = map[string]bool{} ) // mknod is not supported on Windows. @@ -358,7 +361,20 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT // Also do not allow processing of extended attributes for ADS. return false, nil } - if !strings.HasSuffix(filepath.Clean(path), `\`) { + if strings.HasSuffix(filepath.Clean(path), `\`) { + // This path is a volume + supportsEAs, err := fs.PathSupportsExtendedAttributes(path) + if err != nil { + return false, err + } + if supportsEAs { + delete(eaUnsupportedVolumesMap, filepath.VolumeName(path)) + } else { + // Add the volume to the map of volumes that do not support extended attributes. + eaUnsupportedVolumesMap[filepath.VolumeName(path)] = true + } + return supportsEAs, nil + } else { // Do not process file attributes and created time for windows directories like // C:, D: // Filepath.Clean(path) ends with '\' for Windows root drives only. @@ -375,8 +391,8 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT FileAttributes: &stat.FileAttributes, SecurityDescriptor: sd, }) + return !eaUnsupportedVolumesMap[filepath.VolumeName(path)], err } - return true, err } // windowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection From 041c0705e43003be2c0d100ca98379d0126b3935 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sat, 3 Aug 2024 16:19:59 -0600 Subject: [PATCH 519/893] Add changelog --- changelog/unreleased/pull-4980 | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 changelog/unreleased/pull-4980 diff --git a/changelog/unreleased/pull-4980 b/changelog/unreleased/pull-4980 new file mode 100644 index 00000000000..afa8a740681 --- /dev/null +++ b/changelog/unreleased/pull-4980 @@ -0,0 +1,10 @@ +Bugfix: Skip EA processing in Windows for volumes that do not support EA + +Restic was failing to backup files on some windows paths like network +drives because of errors while fetching ExtendedAttributes. +Either they return error codes like windows.E_NOT_SET or +windows.ERROR_INVALID_FUNCTION or it results in slower backups. +Restic now completely skips the attempt to fetch Extended Attributes +for such volumes where it is not supported. + +https://github.com/restic/restic/pull/4980 From 8c8a066c0e0988d840d5a0fb9f4d383615791ec0 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sat, 3 Aug 2024 18:06:47 -0600 Subject: [PATCH 520/893] Correct the bitmask for fileSupportsExtendedAttributes and add link --- internal/fs/ea_windows.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/fs/ea_windows.go b/internal/fs/ea_windows.go index e69f595a80f..b5d1e3cc877 100644 --- a/internal/fs/ea_windows.go +++ b/internal/fs/ea_windows.go @@ -61,7 +61,8 @@ var ( const ( // fileSupportsExtendedAttributes is a bitmask that indicates whether the file system supports extended attributes. - fileSupportsExtendedAttributes = 0x00000004 + // https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/ntifs/ns-ntifs-_file_fs_attribute_information + fileSupportsExtendedAttributes = 0x00800000 ) // ExtendedAttribute represents a single Windows EA. From 9dedba6dfc91eec3edfa569a775b161a4981d5e6 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sun, 4 Aug 2024 10:23:39 -0600 Subject: [PATCH 521/893] Address review comments --- internal/fs/ea_windows.go | 38 +++------------- internal/restic/node_windows.go | 78 +++++++++++++++++++-------------- 2 files changed, 52 insertions(+), 64 deletions(-) diff --git a/internal/fs/ea_windows.go b/internal/fs/ea_windows.go index b5d1e3cc877..d19a1ee6ae8 100644 --- a/internal/fs/ea_windows.go +++ b/internal/fs/ea_windows.go @@ -53,16 +53,6 @@ var ( errInvalidEaBuffer = errors.New("invalid extended attribute buffer") errEaNameTooLarge = errors.New("extended attribute name too large") errEaValueTooLarge = errors.New("extended attribute value too large") - - kernel32dll = syscall.NewLazyDLL("kernel32.dll") - - procGetVolumeInformationW = kernel32dll.NewProc("GetVolumeInformationW") -) - -const ( - // fileSupportsExtendedAttributes is a bitmask that indicates whether the file system supports extended attributes. - // https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/ntifs/ns-ntifs-_file_fs_attribute_information - fileSupportsExtendedAttributes = 0x00800000 ) // ExtendedAttribute represents a single Windows EA. @@ -295,32 +285,16 @@ func setFileEA(handle windows.Handle, iosb *ioStatusBlock, buf *uint8, bufLen ui } // PathSupportsExtendedAttributes returns true if the path supports extended attributes. -func PathSupportsExtendedAttributes(path string) (bool, error) { - var ( - volumeName [syscall.MAX_PATH + 1]uint16 - fsName [syscall.MAX_PATH + 1]uint16 - volumeSerial uint32 - maxComponentLen uint32 - fileSystemFlags uint32 - ) +func PathSupportsExtendedAttributes(path string) (supported bool, err error) { + var fileSystemFlags uint32 utf16Path, err := windows.UTF16PtrFromString(path) if err != nil { return false, err } - ret, _, err := procGetVolumeInformationW.Call( - uintptr(unsafe.Pointer(utf16Path)), - uintptr(unsafe.Pointer(&volumeName[0])), - uintptr(len(volumeName)), - uintptr(unsafe.Pointer(&volumeSerial)), - uintptr(unsafe.Pointer(&maxComponentLen)), - uintptr(unsafe.Pointer(&fileSystemFlags)), - uintptr(unsafe.Pointer(&fsName[0])), - uintptr(len(fsName)), - ) - if ret == 0 { + err = windows.GetVolumeInformation(utf16Path, nil, 0, nil, nil, &fileSystemFlags, nil, 0) + if err != nil { return false, err } - - supportsEAs := (fileSystemFlags & fileSupportsExtendedAttributes) != 0 - return supportsEAs, nil + supported = (fileSystemFlags & windows.FILE_SUPPORTS_EXTENDED_ATTRIBUTES) != 0 + return supported, nil } diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index ea167b96de1..0d72ecb3643 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -8,6 +8,7 @@ import ( "reflect" "runtime" "strings" + "sync" "syscall" "unsafe" @@ -33,8 +34,8 @@ var ( procEncryptFile = modAdvapi32.NewProc("EncryptFileW") procDecryptFile = modAdvapi32.NewProc("DecryptFileW") - // eaUnsupportedVolumesMap is a map of volumes that do not support extended attributes. - eaUnsupportedVolumesMap = map[string]bool{} + // eaSupportedVolumesMap is a map of volumes to boolean values indicating if they support extended attributes. + eaSupportedVolumesMap = sync.Map{} ) // mknod is not supported on Windows. @@ -354,45 +355,58 @@ func decryptFile(pathPointer *uint16) error { } // fillGenericAttributes fills in the generic attributes for windows like File Attributes, -// Created time etc. +// Created time and Security Descriptors. +// It also checks if the volume supports extended attributes and stores the result in a map +// so that it does not have to be checked again for subsequent calls for paths in the same volume. func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT) (allowExtended bool, err error) { if strings.Contains(filepath.Base(path), ":") { - //Do not process for Alternate Data Streams in Windows + // Do not process for Alternate Data Streams in Windows // Also do not allow processing of extended attributes for ADS. return false, nil } + volumeName := filepath.VolumeName(path) + allowExtended, err = checkAndStoreEASupport(volumeName) + if err != nil { + return false, err + } if strings.HasSuffix(filepath.Clean(path), `\`) { - // This path is a volume - supportsEAs, err := fs.PathSupportsExtendedAttributes(path) - if err != nil { - return false, err - } - if supportsEAs { - delete(eaUnsupportedVolumesMap, filepath.VolumeName(path)) - } else { - // Add the volume to the map of volumes that do not support extended attributes. - eaUnsupportedVolumesMap[filepath.VolumeName(path)] = true - } - return supportsEAs, nil - } else { - // Do not process file attributes and created time for windows directories like - // C:, D: - // Filepath.Clean(path) ends with '\' for Windows root drives only. - var sd *[]byte - if node.Type == "file" || node.Type == "dir" { - if sd, err = fs.GetSecurityDescriptor(path); err != nil { - return true, err - } + // filepath.Clean(path) ends with '\' for Windows root volume paths only + // Do not process file attributes, created time and sd for windows root volume paths + // Security descriptors are not supported for root volume paths. + // Though file attributes and created time are supported for root volume paths, + // we ignore them and we do not want to replace them during every restore. + return allowExtended, nil + } + + var sd *[]byte + if node.Type == "file" || node.Type == "dir" { + if sd, err = fs.GetSecurityDescriptor(path); err != nil { + return true, err } + } + // Add Windows attributes + node.GenericAttributes, err = WindowsAttrsToGenericAttributes(WindowsAttributes{ + CreationTime: getCreationTime(fi, path), + FileAttributes: &stat.FileAttributes, + SecurityDescriptor: sd, + }) + return allowExtended, err +} + +// checkAndStoreEASupport checks if a volume supports extended attributes and stores the result in a map +// If the result is already in the map, it returns the result from the map. +func checkAndStoreEASupport(volumeName string) (isEASupportedVolume bool, err error) { + eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeName) + if exists { + return eaSupportedValue.(bool), nil + } - // Add Windows attributes - node.GenericAttributes, err = WindowsAttrsToGenericAttributes(WindowsAttributes{ - CreationTime: getCreationTime(fi, path), - FileAttributes: &stat.FileAttributes, - SecurityDescriptor: sd, - }) - return !eaUnsupportedVolumesMap[filepath.VolumeName(path)], err + // Add backslash to the volume name to ensure it is a valid path + isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeName + `\`) + if err == nil { + eaSupportedVolumesMap.Store(volumeName, isEASupportedVolume) } + return isEASupportedVolume, err } // windowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection From 89712f66406c779e469e2060621d2a50184fdf99 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sun, 4 Aug 2024 10:36:13 -0600 Subject: [PATCH 522/893] Formatted --- internal/restic/node_windows.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 0d72ecb3643..a0c2ebe8522 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -364,11 +364,13 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT // Also do not allow processing of extended attributes for ADS. return false, nil } + volumeName := filepath.VolumeName(path) allowExtended, err = checkAndStoreEASupport(volumeName) if err != nil { return false, err } + if strings.HasSuffix(filepath.Clean(path), `\`) { // filepath.Clean(path) ends with '\' for Windows root volume paths only // Do not process file attributes, created time and sd for windows root volume paths From c13725b5d042e87ee59756236715a6008ecb8044 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sun, 4 Aug 2024 11:05:40 -0600 Subject: [PATCH 523/893] Check EA support only for volumes, files and dirs --- internal/restic/node_windows.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index a0c2ebe8522..0ecaa5a680b 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -365,23 +365,26 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT return false, nil } - volumeName := filepath.VolumeName(path) - allowExtended, err = checkAndStoreEASupport(volumeName) - if err != nil { - return false, err - } - if strings.HasSuffix(filepath.Clean(path), `\`) { // filepath.Clean(path) ends with '\' for Windows root volume paths only // Do not process file attributes, created time and sd for windows root volume paths // Security descriptors are not supported for root volume paths. // Though file attributes and created time are supported for root volume paths, // we ignore them and we do not want to replace them during every restore. + allowExtended, err = checkAndStoreEASupport(filepath.VolumeName(path)) + if err != nil { + return false, err + } return allowExtended, nil } var sd *[]byte if node.Type == "file" || node.Type == "dir" { + // Check EA support and get security descriptor for file/dir only + allowExtended, err = checkAndStoreEASupport(filepath.VolumeName(path)) + if err != nil { + return false, err + } if sd, err = fs.GetSecurityDescriptor(path); err != nil { return true, err } From 85639f5159d3bb22174b7a249aab5dd60f114e9f Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sun, 4 Aug 2024 13:19:13 -0600 Subject: [PATCH 524/893] Add handling for relative paths, vss paths, UNC paths --- internal/restic/node_windows.go | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 0ecaa5a680b..27dbbf9a308 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -398,9 +398,27 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT return allowExtended, err } -// checkAndStoreEASupport checks if a volume supports extended attributes and stores the result in a map +// checkAndStoreEASupport checks if the volume of the path supports extended attributes and stores the result in a map // If the result is already in the map, it returns the result from the map. -func checkAndStoreEASupport(volumeName string) (isEASupportedVolume bool, err error) { +func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { + // Check if it's a UNC path and format it correctly + if strings.HasPrefix(path, `\\?\UNC\`) { + // Convert \\?\UNC\ path to standard path to get the volume name correctly + path = `\\` + strings.TrimPrefix(path, `\\?\UNC\`) + } else if strings.HasPrefix(path, `\\?\GLOBALROOT`) { + // EAs are not supported for \\?\GLOBALROOT i.e. VSS snapshots + return false, nil + } else { + // Use the absolute path + path, err = filepath.Abs(path) + if err != nil { + return false, fmt.Errorf("failed to get absolute path: %w", err) + } + } + volumeName := filepath.VolumeName(path) + if volumeName == "" { + return false, nil + } eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeName) if exists { return eaSupportedValue.(bool), nil From 71632a8197a4862e0787a9651995a8caaad3ce3b Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Mon, 5 Aug 2024 16:03:43 -0600 Subject: [PATCH 525/893] Handle extended length paths --- internal/restic/node_windows.go | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 27dbbf9a308..34df4e9c161 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -38,6 +38,12 @@ var ( eaSupportedVolumesMap = sync.Map{} ) +const ( + extendedPathPrefix = `\\?\` + uncPathPrefix = `\\?\UNC\` + globalRootPrefix = `\\?\GLOBALROOT\` +) + // mknod is not supported on Windows. func mknod(_ string, _ uint32, _ uint64) (err error) { return errors.New("device nodes cannot be created on windows") @@ -371,7 +377,7 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT // Security descriptors are not supported for root volume paths. // Though file attributes and created time are supported for root volume paths, // we ignore them and we do not want to replace them during every restore. - allowExtended, err = checkAndStoreEASupport(filepath.VolumeName(path)) + allowExtended, err = checkAndStoreEASupport(path) if err != nil { return false, err } @@ -381,7 +387,7 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT var sd *[]byte if node.Type == "file" || node.Type == "dir" { // Check EA support and get security descriptor for file/dir only - allowExtended, err = checkAndStoreEASupport(filepath.VolumeName(path)) + allowExtended, err = checkAndStoreEASupport(path) if err != nil { return false, err } @@ -401,11 +407,14 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT // checkAndStoreEASupport checks if the volume of the path supports extended attributes and stores the result in a map // If the result is already in the map, it returns the result from the map. func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { - // Check if it's a UNC path and format it correctly - if strings.HasPrefix(path, `\\?\UNC\`) { - // Convert \\?\UNC\ path to standard path to get the volume name correctly - path = `\\` + strings.TrimPrefix(path, `\\?\UNC\`) - } else if strings.HasPrefix(path, `\\?\GLOBALROOT`) { + // Check if it's an extended length path + if strings.HasPrefix(path, uncPathPrefix) { + // Convert \\?\UNC\ extended path to standard path to get the volume name correctly + path = `\\` + path[len(uncPathPrefix):] + } else if strings.HasPrefix(path, extendedPathPrefix) { + //Extended length path prefix needs to be trimmed to get the volume name correctly + path = path[len(extendedPathPrefix):] + } else if strings.HasPrefix(path, globalRootPrefix) { // EAs are not supported for \\?\GLOBALROOT i.e. VSS snapshots return false, nil } else { From 853a686994de7c46be92f7f29d88baa62161800d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 10 Aug 2024 18:06:24 +0200 Subject: [PATCH 526/893] backend: return correct error on upload/request timeout --- internal/backend/watchdog_roundtriper.go | 3 +++ internal/backend/watchdog_roundtriper_test.go | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/internal/backend/watchdog_roundtriper.go b/internal/backend/watchdog_roundtriper.go index e3e10d7fedf..dc270b974f3 100644 --- a/internal/backend/watchdog_roundtriper.go +++ b/internal/backend/watchdog_roundtriper.go @@ -65,6 +65,9 @@ func (w *watchdogRoundtripper) RoundTrip(req *http.Request) (*http.Response, err resp, err := w.rt.RoundTrip(req) if err != nil { + if isTimeout(err) { + err = errRequestTimeout + } return nil, err } diff --git a/internal/backend/watchdog_roundtriper_test.go b/internal/backend/watchdog_roundtriper_test.go index bc43447e192..a852029ca17 100644 --- a/internal/backend/watchdog_roundtriper_test.go +++ b/internal/backend/watchdog_roundtriper_test.go @@ -135,7 +135,7 @@ func TestUploadTimeout(t *testing.T) { rtest.OK(t, err) resp, err := rt.RoundTrip(req) - rtest.Equals(t, context.Canceled, err) + rtest.Equals(t, errRequestTimeout, err) // make linter happy if resp != nil { rtest.OK(t, resp.Body.Close()) @@ -162,7 +162,7 @@ func TestProcessingTimeout(t *testing.T) { rtest.OK(t, err) resp, err := rt.RoundTrip(req) - rtest.Equals(t, context.Canceled, err) + rtest.Equals(t, errRequestTimeout, err) // make linter happy if resp != nil { rtest.OK(t, resp.Body.Close()) From 18e9d71d7abe152324cb6d0cb364d03e3d9979d5 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sat, 10 Aug 2024 10:38:04 -0600 Subject: [PATCH 527/893] Fix review comments --- changelog/unreleased/pull-4980 | 6 ++++-- internal/fs/sd_windows.go | 3 +++ internal/restic/node_windows.go | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/changelog/unreleased/pull-4980 b/changelog/unreleased/pull-4980 index afa8a740681..264f347faec 100644 --- a/changelog/unreleased/pull-4980 +++ b/changelog/unreleased/pull-4980 @@ -1,10 +1,12 @@ Bugfix: Skip EA processing in Windows for volumes that do not support EA Restic was failing to backup files on some windows paths like network -drives because of errors while fetching ExtendedAttributes. +drives because of errors while fetching extended attributes. Either they return error codes like windows.E_NOT_SET or windows.ERROR_INVALID_FUNCTION or it results in slower backups. -Restic now completely skips the attempt to fetch Extended Attributes +Restic now completely skips the attempt to fetch extended attributes for such volumes where it is not supported. https://github.com/restic/restic/pull/4980 +https://github.com/restic/restic/issues/4955 +https://github.com/restic/restic/issues/4950 diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index 2da1c5df452..0a73cbe53c6 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -11,6 +11,7 @@ import ( "unsafe" "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" "golang.org/x/sys/windows" ) @@ -60,6 +61,8 @@ func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err if err != nil { return nil, fmt.Errorf("get low-level named security info failed with: %w", err) } + } else if errors.Is(err, windows.ERROR_NOT_SUPPORTED) { + return nil, nil } else { return nil, fmt.Errorf("get named security info failed with: %w", err) } diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 34df4e9c161..ceb304d0c72 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -392,7 +392,7 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT return false, err } if sd, err = fs.GetSecurityDescriptor(path); err != nil { - return true, err + return allowExtended, err } } // Add Windows attributes From fa35e72214652de639dc6d9f2685e56905281dfd Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 10 Aug 2024 19:07:59 +0200 Subject: [PATCH 528/893] backend: tweak timeouts to make watchdog timeout test less flaky --- internal/backend/watchdog_roundtriper_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/backend/watchdog_roundtriper_test.go b/internal/backend/watchdog_roundtriper_test.go index bc43447e192..723a311cb95 100644 --- a/internal/backend/watchdog_roundtriper_test.go +++ b/internal/backend/watchdog_roundtriper_test.go @@ -190,7 +190,7 @@ func TestDownloadTimeout(t *testing.T) { })) defer srv.Close() - rt := newWatchdogRoundtripper(http.DefaultTransport, 10*time.Millisecond, 1024) + rt := newWatchdogRoundtripper(http.DefaultTransport, 25*time.Millisecond, 1024) req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(bytes.NewReader(msg))) rtest.OK(t, err) From ad48751adbdbf8a48365a753b6dfd748de7bb4f6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 26 Jul 2024 19:07:14 +0200 Subject: [PATCH 529/893] bump required go version to 1.21 --- .github/workflows/tests.yml | 10 ---------- build.go | 2 +- doc/020_installation.rst | 3 +-- go.mod | 2 +- internal/backend/rclone/backend.go | 2 +- internal/backend/rest/rest_test.go | 3 --- internal/backend/rest/rest_unix_test.go | 4 ++-- internal/backend/sftp/sftp.go | 2 +- internal/backend/util/errdot_119.go | 20 -------------------- internal/backend/util/errdot_old.go | 13 ------------- internal/backend/util/foreground.go | 3 +++ 11 files changed, 10 insertions(+), 54 deletions(-) delete mode 100644 internal/backend/util/errdot_119.go delete mode 100644 internal/backend/util/errdot_old.go diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3ca7a9edbce..e3277625adc 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -49,16 +49,6 @@ jobs: os: ubuntu-latest test_fuse: true - - job_name: Linux - go: 1.20.x - os: ubuntu-latest - test_fuse: true - - - job_name: Linux - go: 1.19.x - os: ubuntu-latest - test_fuse: true - name: ${{ matrix.job_name }} Go ${{ matrix.go }} runs-on: ${{ matrix.os }} diff --git a/build.go b/build.go index b3b7f5eee13..5a4baf1c645 100644 --- a/build.go +++ b/build.go @@ -58,7 +58,7 @@ var config = Config{ Main: "./cmd/restic", // package name for the main package DefaultBuildTags: []string{"selfupdate"}, // specify build tags which are always used Tests: []string{"./..."}, // tests to run - MinVersion: GoVersion{Major: 1, Minor: 18, Patch: 0}, // minimum Go version supported + MinVersion: GoVersion{Major: 1, Minor: 21, Patch: 0}, // minimum Go version supported } // Config configures the build. diff --git a/doc/020_installation.rst b/doc/020_installation.rst index 17b581a87bb..8566c109eb6 100644 --- a/doc/020_installation.rst +++ b/doc/020_installation.rst @@ -284,8 +284,7 @@ From Source *********** restic is written in the Go programming language and you need at least -Go version 1.19. Building for Solaris requires at least Go version 1.20. -Building restic may also work with older versions of Go, +Go version 1.21. Building restic may also work with older versions of Go, but that's not supported. See the `Getting started `__ guide of the Go project for instructions how to install Go. diff --git a/go.mod b/go.mod index 366d89a7f2f..bf547299e3a 100644 --- a/go.mod +++ b/go.mod @@ -86,4 +86,4 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect ) -go 1.19 +go 1.21 diff --git a/internal/backend/rclone/backend.go b/internal/backend/rclone/backend.go index 25082598f8c..8294aa8c419 100644 --- a/internal/backend/rclone/backend.go +++ b/internal/backend/rclone/backend.go @@ -94,7 +94,7 @@ func run(command string, args ...string) (*StdioConn, *sync.WaitGroup, chan stru err = errW } if err != nil { - if util.IsErrDot(err) { + if errors.Is(err, exec.ErrDot) { return nil, nil, nil, nil, errors.Errorf("cannot implicitly run relative executable %v found in current directory, use -o rclone.program=./ to override", cmd.Path) } return nil, nil, nil, nil, err diff --git a/internal/backend/rest/rest_test.go b/internal/backend/rest/rest_test.go index 93b9a103e81..891f60a874a 100644 --- a/internal/backend/rest/rest_test.go +++ b/internal/backend/rest/rest_test.go @@ -1,6 +1,3 @@ -//go:build go1.20 -// +build go1.20 - package rest_test import ( diff --git a/internal/backend/rest/rest_unix_test.go b/internal/backend/rest/rest_unix_test.go index 85ef7a73d9b..c4f08df0ef4 100644 --- a/internal/backend/rest/rest_unix_test.go +++ b/internal/backend/rest/rest_unix_test.go @@ -1,5 +1,5 @@ -//go:build !windows && go1.20 -// +build !windows,go1.20 +//go:build !windows +// +build !windows package rest_test diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index efbd0c8d5b7..d766591b76f 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -88,7 +88,7 @@ func startClient(cfg Config) (*SFTP, error) { bg, err := util.StartForeground(cmd) if err != nil { - if util.IsErrDot(err) { + if errors.Is(err, exec.ErrDot) { return nil, errors.Errorf("cannot implicitly run relative executable %v found in current directory, use -o sftp.command=./ to override", cmd.Path) } return nil, err diff --git a/internal/backend/util/errdot_119.go b/internal/backend/util/errdot_119.go deleted file mode 100644 index e20ed47b7df..00000000000 --- a/internal/backend/util/errdot_119.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build go1.19 -// +build go1.19 - -// This file provides a function to check whether an error from cmd.Start() is -// exec.ErrDot which was introduced in Go 1.19. -// This function is needed so that we can perform this check only for Go 1.19 and -// up, whereas for older versions we use a dummy/stub in the file errdot_old.go. -// Once the minimum Go version restic supports is 1.19, remove this file and -// replace any calls to it with the corresponding code as per below. - -package util - -import ( - "errors" - "os/exec" -) - -func IsErrDot(err error) bool { - return errors.Is(err, exec.ErrDot) -} diff --git a/internal/backend/util/errdot_old.go b/internal/backend/util/errdot_old.go deleted file mode 100644 index 4f7a0b40b3c..00000000000 --- a/internal/backend/util/errdot_old.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !go1.19 -// +build !go1.19 - -// This file provides a stub for IsErrDot() for Go versions below 1.19. -// See the corresponding file errdot_119.go for more information. -// Once the minimum Go version restic supports is 1.19, remove this file -// and perform the actions listed in errdot_119.go. - -package util - -func IsErrDot(err error) bool { - return false -} diff --git a/internal/backend/util/foreground.go b/internal/backend/util/foreground.go index 35cbada1aae..477fc890022 100644 --- a/internal/backend/util/foreground.go +++ b/internal/backend/util/foreground.go @@ -11,6 +11,9 @@ import ( // to the previous process group. // // The command's environment has all RESTIC_* variables removed. +// +// Return exec.ErrDot if it would implicitly run an executable from the current +// directory. func StartForeground(cmd *exec.Cmd) (bg func() error, err error) { env := os.Environ() // Returns a copy that we can modify. From fbecc9db664d0dad2a4f2cd25e5366e79276157f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 26 Jul 2024 19:08:03 +0200 Subject: [PATCH 530/893] upgrade all direct dependencies --- go.mod | 39 ++++++++++++------------ go.sum | 93 +++++++++++++++++++++++++++++----------------------------- 2 files changed, 64 insertions(+), 68 deletions(-) diff --git a/go.mod b/go.mod index bf547299e3a..043e72ed965 100644 --- a/go.mod +++ b/go.mod @@ -2,9 +2,9 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.43.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.3.1 github.com/cenkalti/backoff/v4 v4.3.0 @@ -14,7 +14,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/klauspost/compress v1.17.9 - github.com/minio/minio-go/v7 v7.0.66 + github.com/minio/minio-go/v7 v7.0.74 github.com/minio/sha256-simd v1.0.1 github.com/ncw/swift/v2 v2.0.2 github.com/peterbourgon/unixtransport v0.0.4 @@ -26,31 +26,33 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.24.0 - golang.org/x/net v0.26.0 + golang.org/x/crypto v0.25.0 + golang.org/x/net v0.27.0 golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.7.0 golang.org/x/sys v0.22.0 golang.org/x/term v0.22.0 golang.org/x/text v0.16.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.187.0 + google.golang.org/api v0.189.0 ) require ( cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.6.1 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect - cloud.google.com/go/iam v1.1.8 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 // indirect + cloud.google.com/go/auth v0.7.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/iam v1.1.10 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -60,29 +62,24 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.5 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/kr/fs v0.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect + google.golang.org/genproto v0.0.0-20240722135656-d784300faade // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade // indirect google.golang.org/grpc v1.64.1 // indirect google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 2743f20038f..052c9fef01b 100644 --- a/go.sum +++ b/go.sum @@ -1,26 +1,28 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.6.1 h1:T0Zw1XM5c1GlpN2HYr2s+m3vr1p2wy+8VN+Z1FKxW38= -cloud.google.com/go/auth v0.6.1/go.mod h1:eFHG7zDzbXHKmjJddFG/rBlcGp6t25SwRUiEQSlO4x4= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= -cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= -cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= +cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE= +cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs= +cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= +cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/iam v1.1.10 h1:ZSAr64oEhQSClwBL670MsJAW5/RLiC6kfw3Bqmd5ZDI= +cloud.google.com/go/iam v1.1.10/go.mod h1:iEgMq62sg8zx446GCaijmA2Miwg5o3UbO+nI47WHJps= +cloud.google.com/go/longrunning v0.5.9 h1:haH9pAuXdPAMqHvzX0zlWQigXT7B0+CL4/2nXXdBo5k= +cloud.google.com/go/longrunning v0.5.9/go.mod h1:HD+0l9/OOW0za6UWdKJtXoFAX/BGg/3Wj8p10NeWF7c= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 h1:1nGuui+4POelzDwI7RG56yfQJHCnKvwfMoU7VsEp+Zg= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0/go.mod h1:99EvauvlcJ1U06amZiksfYz/3aFGyIhWGHVyiZXtBAI= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 h1:H+U3Gk9zY56G3u872L82bk4thcsy2Gghb9ExT4Zvm1o= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0/go.mod h1:mgrmMSgaLp9hmax62XQTd0N4aAqSE5E0DulSpVYK7vc= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 h1:Be6KInmFEKV81c0pOAEbRYehLMwmmGI1exuFj248AMk= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0/go.mod h1:WCPBHsOXfBVnivScjs2ypRfimjEW0qPVLGgJkZlrIOA= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s= @@ -67,13 +69,17 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -101,8 +107,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= @@ -120,13 +126,11 @@ github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -142,15 +146,10 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= -github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs= +github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYCFe0= +github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk= github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= @@ -172,6 +171,7 @@ github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6k github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/restic/chunker v0.4.0 h1:YUPYCUn70MYP7VO4yllypp2SjmsRhRJaad3xKu1QFRw= github.com/restic/chunker v0.4.0/go.mod h1:z0cH2BejpW636LXw0R/BGyv+Ey8+m9QGiOanDHItzyw= @@ -180,12 +180,11 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -200,6 +199,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -215,6 +215,7 @@ go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= @@ -224,8 +225,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= @@ -248,8 +249,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -272,7 +273,6 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -308,19 +308,19 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.187.0 h1:Mxs7VATVC2v7CY+7Xwm4ndkX71hpElcvx0D1Ji/p1eo= -google.golang.org/api v0.187.0/go.mod h1:KIHlTc4x7N7gKKuVsdmfBXN13yEEWXWFURWY6SBp2gk= +google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI= +google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/HkBX+Ltq8T0ke0PKIRBNBatoDTVls= -google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc= -google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto v0.0.0-20240722135656-d784300faade h1:lKFsS7wpngDgSCeFn7MoLy+wBDQZ1UQIJD4UNM1Qvkg= +google.golang.org/genproto v0.0.0-20240722135656-d784300faade/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade h1:oCRSWfwGXQsqlVdErcyTt4A93Y8fo0/9D4b1gnI++qo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -343,9 +343,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 0b19f6cf5ab65bb6efb856b761a96ca52631e865 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 26 Jul 2024 19:10:25 +0200 Subject: [PATCH 531/893] Switch back to sha256 from the std library The std library now also supports the sha assembly instructions on ARM64. Thus, sha256-simd no longer provides a performance benefit. --- cmd/restic/cmd_stats.go | 2 +- go.mod | 1 - go.sum | 2 -- internal/backend/test/tests.go | 2 +- internal/fuse/snapshots_dirstruct.go | 3 +-- internal/repository/check.go | 2 +- internal/repository/packer_manager.go | 3 +-- internal/restic/id.go | 3 +-- 8 files changed, 6 insertions(+), 12 deletions(-) diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 60ab1e5bcb4..5d105e31cb8 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -2,6 +2,7 @@ package main import ( "context" + "crypto/sha256" "encoding/json" "fmt" "path/filepath" @@ -16,7 +17,6 @@ import ( "github.com/restic/restic/internal/ui/table" "github.com/restic/restic/internal/walker" - "github.com/minio/sha256-simd" "github.com/spf13/cobra" ) diff --git a/go.mod b/go.mod index 043e72ed965..e1c92c2fa06 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,6 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/klauspost/compress v1.17.9 github.com/minio/minio-go/v7 v7.0.74 - github.com/minio/sha256-simd v1.0.1 github.com/ncw/swift/v2 v2.0.2 github.com/peterbourgon/unixtransport v0.0.4 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 052c9fef01b..d75b70d1425 100644 --- a/go.sum +++ b/go.sum @@ -148,8 +148,6 @@ github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYCFe0= github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk= github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= diff --git a/internal/backend/test/tests.go b/internal/backend/test/tests.go index e2879a3bdeb..800d1f8c060 100644 --- a/internal/backend/test/tests.go +++ b/internal/backend/test/tests.go @@ -3,6 +3,7 @@ package test import ( "bytes" "context" + "crypto/sha256" "fmt" "io" "math/rand" @@ -12,7 +13,6 @@ import ( "testing" "time" - "github.com/minio/sha256-simd" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" diff --git a/internal/fuse/snapshots_dirstruct.go b/internal/fuse/snapshots_dirstruct.go index 049319c6f74..03ff5319301 100644 --- a/internal/fuse/snapshots_dirstruct.go +++ b/internal/fuse/snapshots_dirstruct.go @@ -6,6 +6,7 @@ package fuse import ( "bytes" "context" + "crypto/sha256" "fmt" "path" "sort" @@ -15,8 +16,6 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" - - "github.com/minio/sha256-simd" ) type MetaDirData struct { diff --git a/internal/repository/check.go b/internal/repository/check.go index 1eeea58dc1f..4e57a7c1c25 100644 --- a/internal/repository/check.go +++ b/internal/repository/check.go @@ -4,12 +4,12 @@ import ( "bufio" "bytes" "context" + "crypto/sha256" "fmt" "io" "sort" "github.com/klauspost/compress/zstd" - "github.com/minio/sha256-simd" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" diff --git a/internal/repository/packer_manager.go b/internal/repository/packer_manager.go index 213a3df4138..8dd8f71c1b3 100644 --- a/internal/repository/packer_manager.go +++ b/internal/repository/packer_manager.go @@ -3,6 +3,7 @@ package repository import ( "bufio" "context" + "crypto/sha256" "io" "os" "runtime" @@ -17,8 +18,6 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository/pack" - - "github.com/minio/sha256-simd" ) // packer holds a pack.packer together with a hash writer. diff --git a/internal/restic/id.go b/internal/restic/id.go index e71c6d71b33..0742cd6f1d0 100644 --- a/internal/restic/id.go +++ b/internal/restic/id.go @@ -2,11 +2,10 @@ package restic import ( "crypto/rand" + "crypto/sha256" "encoding/hex" "fmt" "io" - - "github.com/minio/sha256-simd" ) // Hash returns the ID for data. From 84c79f14565338d9d833fbd3b1d732232cf8d8f7 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 26 Jul 2024 19:12:39 +0200 Subject: [PATCH 532/893] bump required go version to 1.21 --- changelog/unreleased/pull-4938 | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 changelog/unreleased/pull-4938 diff --git a/changelog/unreleased/pull-4938 b/changelog/unreleased/pull-4938 new file mode 100644 index 00000000000..0fa876ca094 --- /dev/null +++ b/changelog/unreleased/pull-4938 @@ -0,0 +1,7 @@ +Change: Update dependencies and require Go 1.21 or newer + +We have updated all dependencies. Since some libraries require newer Go standard +library features, support for Go 1.19 and 1.20 has been dropped, which means that +restic now requires at least Go 1.21 to build. + +https://github.com/restic/restic/pull/4938 From 400ae5594078643fc9bdf35b7db3c65dfa4098f4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 10 Aug 2024 19:34:49 +0200 Subject: [PATCH 533/893] replace deprecated usages of math/rand --- internal/backend/cache/file_test.go | 21 +++++---- internal/backend/test/tests.go | 37 ++++++++-------- internal/repository/prune_test.go | 12 +++-- internal/repository/repack_test.go | 56 ++++++++++++------------ internal/repository/repair_index_test.go | 10 ++++- internal/repository/repair_pack_test.go | 18 ++++---- internal/restic/blob_set_test.go | 4 +- 7 files changed, 88 insertions(+), 70 deletions(-) diff --git a/internal/backend/cache/file_test.go b/internal/backend/cache/file_test.go index 331e3251daa..ed2cd295abc 100644 --- a/internal/backend/cache/file_test.go +++ b/internal/backend/cache/file_test.go @@ -19,10 +19,10 @@ import ( "golang.org/x/sync/errgroup" ) -func generateRandomFiles(t testing.TB, tpe backend.FileType, c *Cache) restic.IDSet { +func generateRandomFiles(t testing.TB, random *rand.Rand, tpe backend.FileType, c *Cache) restic.IDSet { ids := restic.NewIDSet() - for i := 0; i < rand.Intn(15)+10; i++ { - buf := rtest.Random(rand.Int(), 1<<19) + for i := 0; i < random.Intn(15)+10; i++ { + buf := rtest.Random(random.Int(), 1<<19) id := restic.Hash(buf) h := backend.Handle{Type: tpe, Name: id.String()} @@ -88,7 +88,7 @@ func clearFiles(t testing.TB, c *Cache, tpe restic.FileType, valid restic.IDSet) func TestFiles(t *testing.T) { seed := time.Now().Unix() t.Logf("seed is %v", seed) - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) c := TestNewCache(t) @@ -100,7 +100,7 @@ func TestFiles(t *testing.T) { for _, tpe := range tests { t.Run(tpe.String(), func(t *testing.T) { - ids := generateRandomFiles(t, tpe, c) + ids := generateRandomFiles(t, random, tpe, c) id := randomID(ids) h := backend.Handle{Type: tpe, Name: id.String()} @@ -140,12 +140,12 @@ func TestFiles(t *testing.T) { func TestFileLoad(t *testing.T) { seed := time.Now().Unix() t.Logf("seed is %v", seed) - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) c := TestNewCache(t) // save about 5 MiB of data in the cache - data := rtest.Random(rand.Int(), 5234142) + data := rtest.Random(random.Int(), 5234142) id := restic.ID{} copy(id[:], data) h := backend.Handle{ @@ -223,6 +223,10 @@ func TestFileSaveConcurrent(t *testing.T) { t.Skip("may not work due to FILE_SHARE_DELETE issue") } + seed := time.Now().Unix() + t.Logf("seed is %v", seed) + random := rand.New(rand.NewSource(seed)) + const nproc = 40 var ( @@ -231,7 +235,8 @@ func TestFileSaveConcurrent(t *testing.T) { g errgroup.Group id restic.ID ) - rand.Read(id[:]) + + random.Read(id[:]) h := backend.Handle{ Type: restic.PackFile, diff --git a/internal/backend/test/tests.go b/internal/backend/test/tests.go index 800d1f8c060..add2f531afc 100644 --- a/internal/backend/test/tests.go +++ b/internal/backend/test/tests.go @@ -21,10 +21,11 @@ import ( "github.com/restic/restic/internal/backend" ) -func seedRand(t testing.TB) { +func seedRand(t testing.TB) *rand.Rand { seed := time.Now().UnixNano() - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) t.Logf("rand initialized with seed %d", seed) + return random } func beTest(ctx context.Context, be backend.Backend, h backend.Handle) (bool, error) { @@ -128,7 +129,7 @@ func (s *Suite[C]) TestConfig(t *testing.T) { // TestLoad tests the backend's Load function. func (s *Suite[C]) TestLoad(t *testing.T) { - seedRand(t) + random := seedRand(t) b := s.open(t) defer s.close(t, b) @@ -140,7 +141,7 @@ func (s *Suite[C]) TestLoad(t *testing.T) { test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize non-existing blob: %v", err) test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize non-existing blob: %v", err) - length := rand.Intn(1<<24) + 2000 + length := random.Intn(1<<24) + 2000 data := test.Random(23, length) id := restic.Hash(data) @@ -173,8 +174,8 @@ func (s *Suite[C]) TestLoad(t *testing.T) { } for i := 0; i < loadTests; i++ { - l := rand.Intn(length + 2000) - o := rand.Intn(length + 2000) + l := random.Intn(length + 2000) + o := random.Intn(length + 2000) d := data if o < len(d) { @@ -186,7 +187,7 @@ func (s *Suite[C]) TestLoad(t *testing.T) { getlen := l if l >= len(d) { - if rand.Float32() >= 0.5 { + if random.Float32() >= 0.5 { getlen = 0 } else { getlen = len(d) @@ -254,9 +255,9 @@ type setter interface { // TestList makes sure that the backend implements List() pagination correctly. func (s *Suite[C]) TestList(t *testing.T) { - seedRand(t) + random := seedRand(t) - numTestFiles := rand.Intn(20) + 20 + numTestFiles := random.Intn(20) + 20 b := s.open(t) defer s.close(t, b) @@ -277,7 +278,7 @@ func (s *Suite[C]) TestList(t *testing.T) { list1 := make(map[restic.ID]int64) for i := 0; i < numTestFiles; i++ { - data := test.Random(rand.Int(), rand.Intn(100)+55) + data := test.Random(random.Int(), random.Intn(100)+55) id := restic.Hash(data) h := backend.Handle{Type: backend.PackFile, Name: id.String()} err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher())) @@ -353,8 +354,6 @@ func (s *Suite[C]) TestList(t *testing.T) { // TestListCancel tests that the context is respected and the error is returned by List. func (s *Suite[C]) TestListCancel(t *testing.T) { - seedRand(t) - numTestFiles := 5 b := s.open(t) @@ -498,7 +497,7 @@ func (ec errorCloser) Rewind() error { // TestSave tests saving data in the backend. func (s *Suite[C]) TestSave(t *testing.T) { - seedRand(t) + random := seedRand(t) b := s.open(t) defer s.close(t, b) @@ -510,7 +509,7 @@ func (s *Suite[C]) TestSave(t *testing.T) { } for i := 0; i < saveTests; i++ { - length := rand.Intn(1<<23) + 200000 + length := random.Intn(1<<23) + 200000 data := test.Random(23, length) id = sha256.Sum256(data) @@ -554,7 +553,7 @@ func (s *Suite[C]) TestSave(t *testing.T) { t.Fatal(err) } - length := rand.Intn(1<<23) + 200000 + length := random.Intn(1<<23) + 200000 data := test.Random(23, length) id = sha256.Sum256(data) @@ -614,7 +613,7 @@ func (r *incompleteByteReader) Length() int64 { // TestSaveError tests saving data in the backend. func (s *Suite[C]) TestSaveError(t *testing.T) { - seedRand(t) + random := seedRand(t) b := s.open(t) defer func() { @@ -623,7 +622,7 @@ func (s *Suite[C]) TestSaveError(t *testing.T) { _ = b.Close() }() - length := rand.Intn(1<<23) + 200000 + length := random.Intn(1<<23) + 200000 data := test.Random(24, length) var id restic.ID copy(id[:], data) @@ -653,7 +652,7 @@ func (b *wrongByteReader) Hash() []byte { // TestSaveWrongHash tests that uploads with a wrong hash fail func (s *Suite[C]) TestSaveWrongHash(t *testing.T) { - seedRand(t) + random := seedRand(t) b := s.open(t) defer s.close(t, b) @@ -662,7 +661,7 @@ func (s *Suite[C]) TestSaveWrongHash(t *testing.T) { return } - length := rand.Intn(1<<23) + 200000 + length := random.Intn(1<<23) + 200000 data := test.Random(25, length) var id restic.ID copy(id[:], data) diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go index 02eefc46321..94d0dfa94ee 100644 --- a/internal/repository/prune_test.go +++ b/internal/repository/prune_test.go @@ -3,7 +3,9 @@ package repository_test import ( "context" "math" + "math/rand" "testing" + "time" "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/repository" @@ -14,10 +16,14 @@ import ( ) func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { + seed := time.Now().UnixNano() + random := rand.New(rand.NewSource(seed)) + t.Logf("rand initialized with seed %d", seed) + repo, be := repository.TestRepositoryWithVersion(t, 0) - createRandomBlobs(t, repo, 4, 0.5, true) - createRandomBlobs(t, repo, 5, 0.5, true) - keep, _ := selectBlobs(t, repo, 0.5) + createRandomBlobs(t, random, repo, 4, 0.5, true) + createRandomBlobs(t, random, repo, 5, 0.5, true) + keep, _ := selectBlobs(t, random, repo, 0.5) var wg errgroup.Group repo.StartPackUploader(context.TODO(), &wg) diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index 476e63b4721..59bafe84cb7 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -14,11 +14,11 @@ import ( "golang.org/x/sync/errgroup" ) -func randomSize(min, max int) int { - return rand.Intn(max-min) + min +func randomSize(random *rand.Rand, min, max int) int { + return random.Intn(max-min) + min } -func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32, smallBlobs bool) { +func createRandomBlobs(t testing.TB, random *rand.Rand, repo restic.Repository, blobs int, pData float32, smallBlobs bool) { var wg errgroup.Group repo.StartPackUploader(context.TODO(), &wg) @@ -28,20 +28,20 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl length int ) - if rand.Float32() < pData { + if random.Float32() < pData { tpe = restic.DataBlob if smallBlobs { - length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB of data + length = randomSize(random, 1*1024, 20*1024) // 1KiB to 20KiB of data } else { - length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data + length = randomSize(random, 10*1024, 1024*1024) // 10KiB to 1MiB of data } } else { tpe = restic.TreeBlob - length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB + length = randomSize(random, 1*1024, 20*1024) // 1KiB to 20KiB } buf := make([]byte, length) - rand.Read(buf) + random.Read(buf) id, exists, _, err := repo.SaveBlob(context.TODO(), tpe, buf, restic.ID{}, false) if err != nil { @@ -66,10 +66,10 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl } } -func createRandomWrongBlob(t testing.TB, repo restic.Repository) restic.BlobHandle { - length := randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data +func createRandomWrongBlob(t testing.TB, random *rand.Rand, repo restic.Repository) restic.BlobHandle { + length := randomSize(random, 10*1024, 1024*1024) // 10KiB to 1MiB of data buf := make([]byte, length) - rand.Read(buf) + random.Read(buf) id := restic.Hash(buf) // invert first data byte buf[0] ^= 0xff @@ -89,7 +89,7 @@ func createRandomWrongBlob(t testing.TB, repo restic.Repository) restic.BlobHand // selectBlobs splits the list of all blobs randomly into two lists. A blob // will be contained in the firstone with probability p. -func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 restic.BlobSet) { +func selectBlobs(t *testing.T, random *rand.Rand, repo restic.Repository, p float32) (list1, list2 restic.BlobSet) { list1 = restic.NewBlobSet() list2 = restic.NewBlobSet() @@ -109,7 +109,7 @@ func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 } blobs.Insert(h) - if rand.Float32() <= p { + if random.Float32() <= p { list1.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type}) } else { list2.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type}) @@ -189,12 +189,12 @@ func testRepack(t *testing.T, version uint) { repo, _ := repository.TestRepositoryWithVersion(t, version) seed := time.Now().UnixNano() - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) t.Logf("rand seed is %v", seed) // add a small amount of blobs twice to create multiple pack files - createRandomBlobs(t, repo, 10, 0.7, false) - createRandomBlobs(t, repo, 10, 0.7, false) + createRandomBlobs(t, random, repo, 10, 0.7, false) + createRandomBlobs(t, random, repo, 10, 0.7, false) packsBefore := listPacks(t, repo) @@ -208,7 +208,7 @@ func testRepack(t *testing.T, version uint) { packsBefore, packsAfter) } - removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2) + removeBlobs, keepBlobs := selectBlobs(t, random, repo, 0.2) removePacks := findPacksForBlobs(t, repo, removeBlobs) @@ -269,14 +269,14 @@ func testRepackCopy(t *testing.T, version uint) { dstRepoWrapped := &oneConnectionRepo{dstRepo} seed := time.Now().UnixNano() - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) t.Logf("rand seed is %v", seed) // add a small amount of blobs twice to create multiple pack files - createRandomBlobs(t, repo, 10, 0.7, false) - createRandomBlobs(t, repo, 10, 0.7, false) + createRandomBlobs(t, random, repo, 10, 0.7, false) + createRandomBlobs(t, random, repo, 10, 0.7, false) - _, keepBlobs := selectBlobs(t, repo, 0.2) + _, keepBlobs := selectBlobs(t, random, repo, 0.2) copyPacks := findPacksForBlobs(t, repo, keepBlobs) _, err := repository.Repack(context.TODO(), repoWrapped, dstRepoWrapped, copyPacks, keepBlobs, nil) @@ -308,14 +308,14 @@ func testRepackWrongBlob(t *testing.T, version uint) { repo, _ := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) seed := time.Now().UnixNano() - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 5, 0.7, false) - createRandomWrongBlob(t, repo) + createRandomBlobs(t, random, repo, 5, 0.7, false) + createRandomWrongBlob(t, random, repo) // just keep all blobs, but also rewrite every pack - _, keepBlobs := selectBlobs(t, repo, 0) + _, keepBlobs := selectBlobs(t, random, repo, 0) rewritePacks := findPacksForBlobs(t, repo, keepBlobs) _, err := repository.Repack(context.TODO(), repo, repo, rewritePacks, keepBlobs, nil) @@ -334,12 +334,12 @@ func testRepackBlobFallback(t *testing.T, version uint) { repo, _ := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) seed := time.Now().UnixNano() - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) t.Logf("rand seed is %v", seed) - length := randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data + length := randomSize(random, 10*1024, 1024*1024) // 10KiB to 1MiB of data buf := make([]byte, length) - rand.Read(buf) + random.Read(buf) id := restic.Hash(buf) // corrupted copy diff --git a/internal/repository/repair_index_test.go b/internal/repository/repair_index_test.go index ac47d59ff7f..3b0af4e224b 100644 --- a/internal/repository/repair_index_test.go +++ b/internal/repository/repair_index_test.go @@ -2,7 +2,9 @@ package repository_test import ( "context" + "math/rand" "testing" + "time" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/checker" @@ -17,9 +19,13 @@ func listIndex(t *testing.T, repo restic.Lister) restic.IDSet { } func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, repo *repository.Repository, be backend.Backend)) { + seed := time.Now().UnixNano() + random := rand.New(rand.NewSource(seed)) + t.Logf("rand initialized with seed %d", seed) + repo, be := repository.TestRepositoryWithVersion(t, 0) - createRandomBlobs(t, repo, 4, 0.5, true) - createRandomBlobs(t, repo, 5, 0.5, true) + createRandomBlobs(t, random, repo, 4, 0.5, true) + createRandomBlobs(t, random, repo, 5, 0.5, true) indexes := listIndex(t, repo) t.Logf("old indexes %v", indexes) diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index 0d6d340f493..e5f7a7f2265 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -38,25 +38,25 @@ func TestRepairBrokenPack(t *testing.T) { func testRepairBrokenPack(t *testing.T, version uint) { tests := []struct { name string - damage func(t *testing.T, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) + damage func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) }{ { "valid pack", - func(t *testing.T, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { return packsBefore, restic.NewBlobSet() }, }, { "broken pack", - func(t *testing.T, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { - wrongBlob := createRandomWrongBlob(t, repo) + func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + wrongBlob := createRandomWrongBlob(t, random, repo) damagedPacks := findPacksForBlobs(t, repo, restic.NewBlobSet(wrongBlob)) return damagedPacks, restic.NewBlobSet(wrongBlob) }, }, { "partially broken pack", - func(t *testing.T, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { // damage one of the pack files damagedID := packsBefore.List()[0] replaceFile(t, be, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, @@ -79,7 +79,7 @@ func testRepairBrokenPack(t *testing.T, version uint) { }, }, { "truncated pack", - func(t *testing.T, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { + func(t *testing.T, random *rand.Rand, repo *repository.Repository, be backend.Backend, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { // damage one of the pack files damagedID := packsBefore.List()[0] replaceFile(t, be, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, @@ -106,14 +106,14 @@ func testRepairBrokenPack(t *testing.T, version uint) { repo, be := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true}) seed := time.Now().UnixNano() - rand.Seed(seed) + random := rand.New(rand.NewSource(seed)) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 5, 0.7, true) + createRandomBlobs(t, random, repo, 5, 0.7, true) packsBefore := listPacks(t, repo) blobsBefore := listBlobs(repo) - toRepair, damagedBlobs := test.damage(t, repo, be, packsBefore) + toRepair, damagedBlobs := test.damage(t, random, repo, be, packsBefore) rtest.OK(t, repository.RepairPacks(context.TODO(), repo, toRepair, &progress.NoopPrinter{})) // reload index diff --git a/internal/restic/blob_set_test.go b/internal/restic/blob_set_test.go index e26b48fe964..4e0961aa506 100644 --- a/internal/restic/blob_set_test.go +++ b/internal/restic/blob_set_test.go @@ -9,6 +9,8 @@ import ( ) func TestBlobSetString(t *testing.T) { + random := rand.New(rand.NewSource(42)) + s := NewBlobSet() rtest.Equals(t, "{}", s.String()) @@ -21,7 +23,7 @@ func TestBlobSetString(t *testing.T) { var h BlobHandle for i := 0; i < 100; i++ { h.Type = DataBlob - _, _ = rand.Read(h.ID[:]) + _, _ = random.Read(h.ID[:]) s.Insert(h) } From ecd03b4fc6ed7f542e07aaa0ad2e1a165292127e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 10 Aug 2024 17:58:50 +0000 Subject: [PATCH 534/893] build(deps): bump golang.org/x/crypto from 0.25.0 to 0.26.0 Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.25.0 to 0.26.0. - [Commits](https://github.com/golang/crypto/compare/v0.25.0...v0.26.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index e1c92c2fa06..c0258d05f78 100644 --- a/go.mod +++ b/go.mod @@ -25,13 +25,13 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.25.0 + golang.org/x/crypto v0.26.0 golang.org/x/net v0.27.0 golang.org/x/oauth2 v0.21.0 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.22.0 - golang.org/x/term v0.22.0 - golang.org/x/text v0.16.0 + golang.org/x/sync v0.8.0 + golang.org/x/sys v0.23.0 + golang.org/x/term v0.23.0 + golang.org/x/text v0.17.0 golang.org/x/time v0.5.0 google.golang.org/api v0.189.0 ) diff --git a/go.sum b/go.sum index d75b70d1425..4e3fb28e75d 100644 --- a/go.sum +++ b/go.sum @@ -223,8 +223,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= @@ -258,8 +258,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -276,21 +276,21 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From f2e2e5f5ab0f135d95921ff79766993323af0145 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 10 Aug 2024 17:58:58 +0000 Subject: [PATCH 535/893] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore Bumps [github.com/Azure/azure-sdk-for-go/sdk/azcore](https://github.com/Azure/azure-sdk-for-go) from 1.13.0 to 1.14.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.13.0...sdk/azcore/v1.14.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azcore dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e1c92c2fa06..306106780ea 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.43.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 github.com/Backblaze/blazer v0.6.1 diff --git a/go.sum b/go.sum index d75b70d1425..500e333a8d5 100644 --- a/go.sum +++ b/go.sum @@ -13,8 +13,8 @@ cloud.google.com/go/longrunning v0.5.9 h1:haH9pAuXdPAMqHvzX0zlWQigXT7B0+CL4/2nXX cloud.google.com/go/longrunning v0.5.9/go.mod h1:HD+0l9/OOW0za6UWdKJtXoFAX/BGg/3Wj8p10NeWF7c= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= From 049186371f7ffad3100a5276d9afac1191b52b76 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 10 Aug 2024 18:17:20 +0000 Subject: [PATCH 536/893] build(deps): bump google.golang.org/api from 0.189.0 to 0.191.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.189.0 to 0.191.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.189.0...v0.191.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 21 ++++++++++----------- go.sum | 44 ++++++++++++++++++++++---------------------- 2 files changed, 32 insertions(+), 33 deletions(-) diff --git a/go.mod b/go.mod index c0258d05f78..fe0748b11ae 100644 --- a/go.mod +++ b/go.mod @@ -27,21 +27,21 @@ require ( go.uber.org/automaxprocs v1.5.3 golang.org/x/crypto v0.26.0 golang.org/x/net v0.27.0 - golang.org/x/oauth2 v0.21.0 + golang.org/x/oauth2 v0.22.0 golang.org/x/sync v0.8.0 golang.org/x/sys v0.23.0 golang.org/x/term v0.23.0 golang.org/x/text v0.17.0 - golang.org/x/time v0.5.0 - google.golang.org/api v0.189.0 + golang.org/x/time v0.6.0 + google.golang.org/api v0.191.0 ) require ( cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.7.2 // indirect + cloud.google.com/go/auth v0.7.3 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.10 // indirect + cloud.google.com/go/iam v1.1.12 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect @@ -54,12 +54,11 @@ require ( github.com/goccy/go-json v0.10.3 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect - github.com/google/s2a-go v0.1.7 // indirect + github.com/google/s2a-go v0.1.8 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.5 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/kr/fs v0.1.0 // indirect @@ -74,9 +73,9 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - google.golang.org/genproto v0.0.0-20240722135656-d784300faade // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade // indirect + google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect google.golang.org/grpc v1.64.1 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 4e3fb28e75d..5ca6df16a8d 100644 --- a/go.sum +++ b/go.sum @@ -1,16 +1,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE= -cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs= +cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= +cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/iam v1.1.10 h1:ZSAr64oEhQSClwBL670MsJAW5/RLiC6kfw3Bqmd5ZDI= -cloud.google.com/go/iam v1.1.10/go.mod h1:iEgMq62sg8zx446GCaijmA2Miwg5o3UbO+nI47WHJps= -cloud.google.com/go/longrunning v0.5.9 h1:haH9pAuXdPAMqHvzX0zlWQigXT7B0+CL4/2nXXdBo5k= -cloud.google.com/go/longrunning v0.5.9/go.mod h1:HD+0l9/OOW0za6UWdKJtXoFAX/BGg/3Wj8p10NeWF7c= +cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= +cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= +cloud.google.com/go/longrunning v0.5.11 h1:Havn1kGjz3whCfoD8dxMLP73Ph5w+ODyZB9RUsDxtGk= +cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= @@ -112,15 +112,15 @@ github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1 github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= -github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= @@ -250,8 +250,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -291,8 +291,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -306,19 +306,19 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI= -google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8= +google.golang.org/api v0.191.0 h1:cJcF09Z+4HAB2t5qTQM1ZtfL/PemsLFkcFG67qq2afk= +google.golang.org/api v0.191.0/go.mod h1:tD5dsFGxFza0hnQveGfVk9QQYKcfp+VzgRqyXFxE0+E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240722135656-d784300faade h1:lKFsS7wpngDgSCeFn7MoLy+wBDQZ1UQIJD4UNM1Qvkg= -google.golang.org/genproto v0.0.0-20240722135656-d784300faade/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade h1:oCRSWfwGXQsqlVdErcyTt4A93Y8fo0/9D4b1gnI++qo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= +google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= +google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= +google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= From 1d392a36f9df04840364dce29f553a41b449f3c8 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sun, 11 Aug 2024 01:23:47 -0600 Subject: [PATCH 537/893] Fix extended attributes handling for VSS snapshots --- internal/fs/ea_windows.go | 18 +++++++ internal/restic/node_windows.go | 84 +++++++++++++++++++++++---------- 2 files changed, 77 insertions(+), 25 deletions(-) diff --git a/internal/fs/ea_windows.go b/internal/fs/ea_windows.go index d19a1ee6ae8..bf7b02fd407 100644 --- a/internal/fs/ea_windows.go +++ b/internal/fs/ea_windows.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "errors" "fmt" + "strings" "syscall" "unsafe" @@ -298,3 +299,20 @@ func PathSupportsExtendedAttributes(path string) (supported bool, err error) { supported = (fileSystemFlags & windows.FILE_SUPPORTS_EXTENDED_ATTRIBUTES) != 0 return supported, nil } + +// GetVolumePathName returns the volume path name for the given path. +func GetVolumePathName(path string) (volumeName string, err error) { + utf16Path, err := windows.UTF16PtrFromString(path) + if err != nil { + return "", err + } + // Get the volume path (e.g., "D:") + var volumePath [windows.MAX_PATH + 1]uint16 + err = windows.GetVolumePathName(utf16Path, &volumePath[0], windows.MAX_PATH+1) + if err != nil { + return "", err + } + // Trim any trailing backslashes + volumeName = strings.TrimRight(windows.UTF16ToString(volumePath[:]), "\\") + return volumeName, nil +} diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index ceb304d0c72..6adb51f0d44 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -407,38 +407,72 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT // checkAndStoreEASupport checks if the volume of the path supports extended attributes and stores the result in a map // If the result is already in the map, it returns the result from the map. func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { - // Check if it's an extended length path - if strings.HasPrefix(path, uncPathPrefix) { - // Convert \\?\UNC\ extended path to standard path to get the volume name correctly - path = `\\` + path[len(uncPathPrefix):] - } else if strings.HasPrefix(path, extendedPathPrefix) { - //Extended length path prefix needs to be trimmed to get the volume name correctly - path = path[len(extendedPathPrefix):] - } else if strings.HasPrefix(path, globalRootPrefix) { - // EAs are not supported for \\?\GLOBALROOT i.e. VSS snapshots - return false, nil - } else { - // Use the absolute path - path, err = filepath.Abs(path) + var volumeName string + volumeName, err = prepareVolumeName(path) + if err != nil { + return false, err + } + + if volumeName != "" { + // First check if the manually prepared volume name is already in the map + eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeName) + if exists { + return eaSupportedValue.(bool), nil + } + // If not found, check if EA is supported with manually prepared volume name + isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeName + `\`) if err != nil { - return false, fmt.Errorf("failed to get absolute path: %w", err) + return false, err } } - volumeName := filepath.VolumeName(path) - if volumeName == "" { - return false, nil + // If an entry is not found, get the actual volume name using the GetVolumePathName function + volumeNameActual, err := fs.GetVolumePathName(path) + if err != nil { + return false, err } - eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeName) - if exists { - return eaSupportedValue.(bool), nil + if volumeNameActual != volumeName { + // If the actual volume name is different, check cache for the actual volume name + eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeNameActual) + if exists { + return eaSupportedValue.(bool), nil + } + // If the actual volume name is different and is not in the map, again check if the new volume supports extended attributes with the actual volume name + isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeNameActual + `\`) + if err != nil { + return false, err + } } + eaSupportedVolumesMap.Store(volumeNameActual, isEASupportedVolume) + return isEASupportedVolume, err +} - // Add backslash to the volume name to ensure it is a valid path - isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeName + `\`) - if err == nil { - eaSupportedVolumesMap.Store(volumeName, isEASupportedVolume) +// prepareVolumeName prepares the volume name for different cases in Windows +func prepareVolumeName(path string) (volumeName string, err error) { + // Check if it's an extended length path + if strings.HasPrefix(path, globalRootPrefix) { + // Extract the VSS snapshot volume name eg. `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX` + if parts := strings.SplitN(path, `\`, 7); len(parts) >= 6 { + volumeName = strings.Join(parts[:6], `\`) + } else { + volumeName = filepath.VolumeName(path) + } + } else { + if strings.HasPrefix(path, uncPathPrefix) { + // Convert \\?\UNC\ extended path to standard path to get the volume name correctly + path = `\\` + path[len(uncPathPrefix):] + } else if strings.HasPrefix(path, extendedPathPrefix) { + //Extended length path prefix needs to be trimmed to get the volume name correctly + path = path[len(extendedPathPrefix):] + } else { + // Use the absolute path + path, err = filepath.Abs(path) + if err != nil { + return "", fmt.Errorf("failed to get absolute path: %w", err) + } + } + volumeName = filepath.VolumeName(path) } - return isEASupportedVolume, err + return volumeName, nil } // windowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection From b5b5c1fe8e2ffe17d5b836af2d3a54326a28a945 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sun, 11 Aug 2024 01:32:55 -0600 Subject: [PATCH 538/893] Add changelog --- changelog/unreleased/pull-4998 | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 changelog/unreleased/pull-4998 diff --git a/changelog/unreleased/pull-4998 b/changelog/unreleased/pull-4998 new file mode 100644 index 00000000000..23ff3dbd2fd --- /dev/null +++ b/changelog/unreleased/pull-4998 @@ -0,0 +1,8 @@ +Bugfix: Fix extended attributes handling for VSS snapshots + +Restic was failing to backup extended attributes for VSS snapshots +after the fix for https://github.com/restic/restic/pull/4980. +Restic now correctly handles extended attributes for VSS snapshots. + +https://github.com/restic/restic/pull/4998 +https://github.com/restic/restic/pull/4980 From 849c4414552528d326c7adc053670abbfd820485 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sun, 11 Aug 2024 01:48:25 -0600 Subject: [PATCH 539/893] Gracefully handle invalid prepared volume names --- internal/restic/node_windows.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 6adb51f0d44..2ca7e42e6d3 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -417,11 +417,13 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { // First check if the manually prepared volume name is already in the map eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeName) if exists { + // Cache hit, immediately return the cached value return eaSupportedValue.(bool), nil } // If not found, check if EA is supported with manually prepared volume name isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeName + `\`) - if err != nil { + // If the prepared volume name is not valid, we will next fetch the actual volume name. + if err != nil && !errors.Is(err, windows.DNS_ERROR_INVALID_NAME) { return false, err } } @@ -434,6 +436,7 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { // If the actual volume name is different, check cache for the actual volume name eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeNameActual) if exists { + // Cache hit, immediately return the cached value return eaSupportedValue.(bool), nil } // If the actual volume name is different and is not in the map, again check if the new volume supports extended attributes with the actual volume name From 2d250a9135c4018ed0eb63de446156cd8a9932ae Mon Sep 17 00:00:00 2001 From: Michael Terry Date: Sun, 28 Jul 2024 08:36:19 -0400 Subject: [PATCH 540/893] version: add message_type in --json mode --- cmd/restic/cmd_version.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/cmd/restic/cmd_version.go b/cmd/restic/cmd_version.go index cd32e24704d..daf984a8532 100644 --- a/cmd/restic/cmd_version.go +++ b/cmd/restic/cmd_version.go @@ -25,17 +25,19 @@ Exit status is 1 if there was any error. Run: func(_ *cobra.Command, _ []string) { if globalOptions.JSON { type jsonVersion struct { - Version string `json:"version"` - GoVersion string `json:"go_version"` - GoOS string `json:"go_os"` - GoArch string `json:"go_arch"` + MessageType string `json:"message_type"` // version + Version string `json:"version"` + GoVersion string `json:"go_version"` + GoOS string `json:"go_os"` + GoArch string `json:"go_arch"` } jsonS := jsonVersion{ - Version: version, - GoVersion: runtime.Version(), - GoOS: runtime.GOOS, - GoArch: runtime.GOARCH, + MessageType: "version", + Version: version, + GoVersion: runtime.Version(), + GoOS: runtime.GOOS, + GoArch: runtime.GOARCH, } err := json.NewEncoder(globalOptions.stdout).Encode(jsonS) From 6283915f862814e73f32763e6527c378e20dd493 Mon Sep 17 00:00:00 2001 From: Michael Terry Date: Sun, 28 Jul 2024 09:08:10 -0400 Subject: [PATCH 541/893] main: format exit errors as JSON when using --json --- changelog/unreleased/issue-4948 | 6 +++++ cmd/restic/main.go | 42 ++++++++++++++++++++++++++++----- 2 files changed, 42 insertions(+), 6 deletions(-) create mode 100644 changelog/unreleased/issue-4948 diff --git a/changelog/unreleased/issue-4948 b/changelog/unreleased/issue-4948 new file mode 100644 index 00000000000..3fd350d0d7c --- /dev/null +++ b/changelog/unreleased/issue-4948 @@ -0,0 +1,6 @@ +Enhancement: Format exit errors as JSON with --json + +Restic now prints any exit error messages as JSON when requested. + +https://github.com/restic/restic/issues/4948 +https://github.com/restic/restic/pull/4952 diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 5818221a587..fda53ca0e8d 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "context" + "encoding/json" "fmt" "log" "os" @@ -102,6 +103,30 @@ func tweakGoGC() { } } +func printExitError(code int, message string) { + if globalOptions.JSON { + type jsonExitError struct { + MessageType string `json:"message_type"` // exit_error + Code int `json:"code"` + Message string `json:"message"` + } + + jsonS := jsonExitError{ + MessageType: "exit_error", + Code: code, + Message: message, + } + + err := json.NewEncoder(globalOptions.stderr).Encode(jsonS) + if err != nil { + Warnf("JSON encode failed: %v\n", err) + return + } + } else { + fmt.Fprintf(globalOptions.stderr, "%v\n", message) + } +} + func main() { tweakGoGC() // install custom global logger into a buffer, if an error occurs @@ -131,21 +156,22 @@ func main() { err = nil } + var exitMessage string switch { case restic.IsAlreadyLocked(err): - fmt.Fprintf(os.Stderr, "%v\nthe `unlock` command can be used to remove stale locks\n", err) + exitMessage = fmt.Sprintf("%v\nthe `unlock` command can be used to remove stale locks", err) case err == ErrInvalidSourceData: - fmt.Fprintf(os.Stderr, "Warning: %v\n", err) + exitMessage = fmt.Sprintf("Warning: %v", err) case errors.IsFatal(err): - fmt.Fprintf(os.Stderr, "%v\n", err) + exitMessage = err.Error() case err != nil: - fmt.Fprintf(os.Stderr, "%+v\n", err) + exitMessage = fmt.Sprintf("%+v", err) if logBuffer.Len() > 0 { - fmt.Fprintf(os.Stderr, "also, the following messages were logged by a library:\n") + exitMessage += "also, the following messages were logged by a library:\n" sc := bufio.NewScanner(logBuffer) for sc.Scan() { - fmt.Fprintln(os.Stderr, sc.Text()) + exitMessage += fmt.Sprintln(sc.Text()) } } } @@ -165,5 +191,9 @@ func main() { default: exitCode = 1 } + + if exitCode != 0 { + printExitError(exitCode, exitMessage) + } Exit(exitCode) } From d3f9c05312b28e7a5b55b229f289625119b09314 Mon Sep 17 00:00:00 2001 From: Michael Terry Date: Sat, 3 Aug 2024 16:20:56 -0400 Subject: [PATCH 542/893] docs: update scripting documentation --- doc/075_scripting.rst | 51 +++++++++++++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index fa7fa1b6ec1..90bf111d7c3 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -83,12 +83,33 @@ JSON output of most restic commands are documented here. list of allowed values is documented may be extended at any time. +Exit errors +----------- + +Fatal errors will result in a final JSON message on ``stderr`` before the process exits. +It will hold the error message and the exit code. + +.. note:: + Some errors cannot be caught and reported this way, + such as Go runtime errors or command line parsing errors. + ++----------------------+-------------------------------------------+ +| ``message_type`` | Always "exit_error" | ++----------------------+-------------------------------------------+ +| ``code`` | Exit code (see above chart) | ++----------------------+-------------------------------------------+ +| ``error`` | Error message | ++----------------------+-------------------------------------------+ + Output formats -------------- -Currently only the output on ``stdout`` is JSON formatted. Errors printed on ``stderr`` -are still printed as plain text messages. The generated JSON output uses one of the -following two formats. +Commands print their main JSON output on ``stdout``. +The generated JSON output uses one of the following two formats. + +.. note:: + Not all messages and errors have been converted to JSON yet. + Feel free to submit a pull request! Single JSON document ^^^^^^^^^^^^^^^^^^^^ @@ -136,6 +157,8 @@ Status Error ^^^^^ +These errors are printed on ``stderr``. + +----------------------+-------------------------------------------+ | ``message_type`` | Always "error" | +----------------------+-------------------------------------------+ @@ -542,6 +565,8 @@ Status Error ^^^^^ +These errors are printed on ``stderr``. + +----------------------+-------------------------------------------+ | ``message_type`` | Always "error" | +----------------------+-------------------------------------------+ @@ -691,12 +716,14 @@ version The version command returns a single JSON object. -+----------------+--------------------+ -| ``version`` | restic version | -+----------------+--------------------+ -| ``go_version`` | Go compile version | -+----------------+--------------------+ -| ``go_os`` | Go OS | -+----------------+--------------------+ -| ``go_arch`` | Go architecture | -+----------------+--------------------+ ++------------------+--------------------+ +| ``message_type`` | Always "version" | ++------------------+--------------------+ +| ``version`` | restic version | ++------------------+--------------------+ +| ``go_version`` | Go compile version | ++------------------+--------------------+ +| ``go_os`` | Go OS | ++------------------+--------------------+ +| ``go_arch`` | Go architecture | ++------------------+--------------------+ From f66624f5bf6a7c45b05c5b08b765ddd0500c767f Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 11 Aug 2024 01:42:13 +0530 Subject: [PATCH 543/893] cache: backend add List method and a cache clear functionality * removes files which are no longer in the repository, including index files, snapshot files and pack files from the cache. cache: fix ids set initialisation with NewIDSet() --- internal/backend/cache/backend.go | 40 +++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/internal/backend/cache/backend.go b/internal/backend/cache/backend.go index 94f648cf4d5..92cca4d0ed3 100644 --- a/internal/backend/cache/backend.go +++ b/internal/backend/cache/backend.go @@ -2,11 +2,14 @@ package cache import ( "context" + "fmt" "io" + "os" "sync" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" ) // Backend wraps a restic.Backend and adds a cache. @@ -215,3 +218,40 @@ func (b *Backend) IsNotExist(err error) bool { func (b *Backend) Unwrap() backend.Backend { return b.Backend } + +func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(f backend.FileInfo) error) error { + if !b.Cache.canBeCached(t) { + return b.Backend.List(ctx, t, fn) + } + + // will contain the IDs of the files that are in the repository + ids := restic.NewIDSet() + + // wrap the original function to also add the file to the ids set + wrapFn := func(f backend.FileInfo) error { + id, err := restic.ParseID(f.Name) + if err != nil { + // returning error here since, if we cannot parse the ID, the file + // is invalid and the list must exit. + return err + } + + ids.Insert(id) + + // execute the original function + return fn(f) + } + + err := b.Backend.List(ctx, t, wrapFn) + if err != nil { + return err + } + + // clear the cache for files that are not in the repo anymore, ignore errors + err = b.Cache.Clear(t, ids) + if err != nil { + fmt.Fprintf(os.Stderr, "error clearing %s files in cache: %v\n", t.String(), err) + } + + return nil +} From a23e7bfb82984fd88c42aa09365b25333089d071 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 11 Aug 2024 15:43:03 +0530 Subject: [PATCH 544/893] cache: check for context cancellation before clearing cache --- internal/backend/cache/backend.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/backend/cache/backend.go b/internal/backend/cache/backend.go index 92cca4d0ed3..58b03dd3862 100644 --- a/internal/backend/cache/backend.go +++ b/internal/backend/cache/backend.go @@ -247,6 +247,10 @@ func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(f backen return err } + if ctx.Err() != nil { + return ctx.Err() + } + // clear the cache for files that are not in the repo anymore, ignore errors err = b.Cache.Clear(t, ids) if err != nil { From 720609f8ba6dcf44b7fe51cd9b543ee44bbbaf38 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 11 Aug 2024 15:58:27 +0530 Subject: [PATCH 545/893] repository: removed redundant prepareCache method from Repository * remove the prepareCache method from the Repository * changed the signature of the SetIndex function to no longer return an error --- internal/checker/checker.go | 6 +---- internal/repository/repair_index.go | 6 ++--- internal/repository/repository.go | 37 ++--------------------------- internal/restic/repository.go | 2 +- 4 files changed, 6 insertions(+), 45 deletions(-) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 031e13807f0..d5e7fd1f818 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -146,11 +146,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e return hints, append(errs, err) } - err = c.repo.SetIndex(c.masterIndex) - if err != nil { - debug.Log("SetIndex returned error: %v", err) - errs = append(errs, err) - } + c.repo.SetIndex(c.masterIndex) // compute pack size using index entries c.packs, err = pack.Size(ctx, c.repo, false) diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index 77080925461..c72dcfd00f4 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -52,10 +52,8 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, return err } - err = repo.SetIndex(mi) - if err != nil { - return err - } + repo.SetIndex(mi) + packSizeFromIndex, err = pack.Size(ctx, repo, false) if err != nil { return err diff --git a/internal/repository/repository.go b/internal/repository/repository.go index f7fd65c71d4..3dc248c5eab 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "math" - "os" "runtime" "sort" "sync" @@ -586,9 +585,8 @@ func (r *Repository) ListPacksFromIndex(ctx context.Context, packs restic.IDSet) } // SetIndex instructs the repository to use the given index. -func (r *Repository) SetIndex(i restic.MasterIndex) error { +func (r *Repository) SetIndex(i restic.MasterIndex) { r.idx = i.(*index.MasterIndex) - return r.prepareCache() } func (r *Repository) clearIndex() { @@ -628,12 +626,8 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { return errors.New("index uses feature not supported by repository version 1") } } - if ctx.Err() != nil { - return ctx.Err() - } - // remove index files from the cache which have been removed in the repo - return r.prepareCache() + return ctx.Err() } // createIndexFromPacks creates a new index by reading all given pack files (with sizes). @@ -699,33 +693,6 @@ func (r *Repository) createIndexFromPacks(ctx context.Context, packsize map[rest return invalid, nil } -// prepareCache initializes the local cache. indexIDs is the list of IDs of -// index files still present in the repo. -func (r *Repository) prepareCache() error { - if r.Cache == nil { - return nil - } - - indexIDs := r.idx.IDs() - debug.Log("prepare cache with %d index files", len(indexIDs)) - - // clear old index files - err := r.Cache.Clear(restic.IndexFile, indexIDs) - if err != nil { - fmt.Fprintf(os.Stderr, "error clearing index files in cache: %v\n", err) - } - - packs := r.idx.Packs(restic.NewIDSet()) - - // clear old packs - err = r.Cache.Clear(restic.PackFile, packs) - if err != nil { - fmt.Fprintf(os.Stderr, "error clearing pack files in cache: %v\n", err) - } - - return nil -} - // SearchKey finds a key with the supplied password, afterwards the config is // read and parsed. It tries at most maxKeys key files in the repo. func (r *Repository) SearchKey(ctx context.Context, password string, maxKeys int, keyHint string) error { diff --git a/internal/restic/repository.go b/internal/restic/repository.go index b18b036a733..ce8401b37d7 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -22,7 +22,7 @@ type Repository interface { Key() *crypto.Key LoadIndex(ctx context.Context, p *progress.Counter) error - SetIndex(mi MasterIndex) error + SetIndex(mi MasterIndex) LookupBlob(t BlobType, id ID) []PackedBlob LookupBlobSize(t BlobType, id ID) (size uint, exists bool) From 506e07127f1430b3aa1c7a1d98e2efdf58c57c65 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 11 Aug 2024 16:07:38 +0530 Subject: [PATCH 546/893] changelog: add unrelease changelog --- changelog/unreleased/issue-4934 | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 changelog/unreleased/issue-4934 diff --git a/changelog/unreleased/issue-4934 b/changelog/unreleased/issue-4934 new file mode 100644 index 00000000000..03194168e2d --- /dev/null +++ b/changelog/unreleased/issue-4934 @@ -0,0 +1,9 @@ +Enhancement: Clear removed snapshots, index and pack files from the local cache + +Restic did not clear removed snapshots from the cache after the `forget` +operation; only indexes and pack files were removed automatically. +Restic now automatically clears removed indexes, packs and snapshots from the +local cache. + +https://github.com/restic/restic/issues/4934 +https://github.com/restic/restic/pull/4981 \ No newline at end of file From 5fd984ba6f2bb0f5a92ec14eef36eb28da6e99a2 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 11 Aug 2024 16:44:43 +0530 Subject: [PATCH 547/893] cache: add test for the automated cache clear to cache backend --- internal/backend/cache/backend_test.go | 58 ++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/internal/backend/cache/backend_test.go b/internal/backend/cache/backend_test.go index 7addc275d5a..dca51c2bf2b 100644 --- a/internal/backend/cache/backend_test.go +++ b/internal/backend/cache/backend_test.go @@ -57,6 +57,13 @@ func randomData(n int) (backend.Handle, []byte) { return h, data } +func list(t testing.TB, be backend.Backend, fn func(backend.FileInfo) error) { + err := be.List(context.TODO(), backend.IndexFile, fn) + if err != nil { + t.Fatal(err) + } +} + func TestBackend(t *testing.T) { be := mem.New() c := TestNewCache(t) @@ -238,3 +245,54 @@ func TestErrorBackend(t *testing.T) { wg.Wait() } + +func TestAutomaticCacheClear(t *testing.T) { + be := mem.New() + c := TestNewCache(t) + wbe := c.Wrap(be) + + // add two handles h1 and h2 + h1, data := randomData(2000) + // save h1 directly to the backend + save(t, be, h1, data) + if c.Has(h1) { + t.Errorf("cache has file1 too early") + } + + h2, data2 := randomData(3000) + + // save h2 directly to the backend + save(t, be, h2, data2) + if c.Has(h2) { + t.Errorf("cache has file2 too early") + } + + loadAndCompare(t, wbe, h1, data) + if !c.Has(h1) { + t.Errorf("cache doesn't have file1 after load") + } + + loadAndCompare(t, wbe, h2, data2) + if !c.Has(h2) { + t.Errorf("cache doesn't have file2 after load") + } + + // remove h1 directly from the backend + remove(t, be, h1) + if !c.Has(h1) { + t.Errorf("file1 not in cache any more, should be removed from cache only after list") + } + + // list all files in the backend + list(t, wbe, func(_ backend.FileInfo) error { return nil }) + + // h1 should be removed from the cache + if c.Has(h1) { + t.Errorf("cache has file1 after remove") + } + + // h2 should still be in the cache + if !c.Has(h2) { + t.Errorf("cache doesn't have file2 after list") + } +} From 4401265e36c7ddf836884666a051a11c19ac92a9 Mon Sep 17 00:00:00 2001 From: Andreas Deininger Date: Sun, 11 Aug 2024 21:38:15 +0200 Subject: [PATCH 548/893] Fix typos --- doc/040_backup.rst | 2 +- internal/restorer/fileswriter_test.go | 2 +- internal/restorer/restorer_windows.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 25c5a8ad110..f1f355c531e 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -585,7 +585,7 @@ Sometimes, it can be useful to directly save the output of a program, for exampl ``mysqldump`` so that the SQL can later be restored. Restic supports this mode of operation; just supply the option ``--stdin-from-command`` when using the ``backup`` action, and write the command in place of the files/directories. To prevent -restic from interpreting the arguments for the commmand, make sure to add ``--`` before +restic from interpreting the arguments for the command, make sure to add ``--`` before the command starts: .. code-block:: console diff --git a/internal/restorer/fileswriter_test.go b/internal/restorer/fileswriter_test.go index c6984792703..9ea8767b871 100644 --- a/internal/restorer/fileswriter_test.go +++ b/internal/restorer/fileswriter_test.go @@ -49,7 +49,7 @@ func TestFilesWriterRecursiveOverwrite(t *testing.T) { // must error if recursive delete is not allowed w := newFilesWriter(1, false) err := w.writeToFile(path, []byte{1}, 0, 2, false) - rtest.Assert(t, errors.Is(err, notEmptyDirError()), "unexepected error got %v", err) + rtest.Assert(t, errors.Is(err, notEmptyDirError()), "unexpected error got %v", err) rtest.Equals(t, 0, len(w.buckets[0].files)) // must replace directory diff --git a/internal/restorer/restorer_windows.go b/internal/restorer/restorer_windows.go index 72337d8ae80..9ddc0a932e1 100644 --- a/internal/restorer/restorer_windows.go +++ b/internal/restorer/restorer_windows.go @@ -8,6 +8,6 @@ import "strings" // toComparableFilename returns a filename suitable for equality checks. On Windows, it returns the // uppercase version of the string. On all other systems, it returns the unmodified filename. func toComparableFilename(path string) string { - // apparently NTFS internally uppercases filenames for comparision + // apparently NTFS internally uppercases filenames for comparison return strings.ToUpper(path) } From f1407afd1fbf2dd99e2ba98a7fc7c79227a512fc Mon Sep 17 00:00:00 2001 From: Shivashis Padhi Date: Sun, 11 Aug 2024 22:25:21 +0200 Subject: [PATCH 549/893] restore: Add progress bar to 'restore --verify' --- changelog/unreleased/issue-4795 | 7 +++ cmd/restic/cmd_restore.go | 5 +- internal/restorer/restorer.go | 25 +++++--- internal/restorer/restorer_test.go | 71 ++++++++++++++-------- internal/restorer/restorer_unix_test.go | 10 +-- internal/restorer/restorer_windows_test.go | 6 +- 6 files changed, 83 insertions(+), 41 deletions(-) create mode 100644 changelog/unreleased/issue-4795 diff --git a/changelog/unreleased/issue-4795 b/changelog/unreleased/issue-4795 new file mode 100644 index 00000000000..084335f510c --- /dev/null +++ b/changelog/unreleased/issue-4795 @@ -0,0 +1,7 @@ +Enhancement: `restore --verify` shows progress with a progress bar + +If restore command was run with `--verify` restic didn't show any progress indication, now it shows a progress bar while 'verification' is running. +The progress bar is text only for now and doesn't respect `--json` flag. + +https://github.com/restic/restic/issues/4795 +https://github.com/restic/restic/pull/4989 diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 89942f4cf93..b3c247fdd3b 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -221,7 +221,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, msg.P("restoring %s to %s\n", res.Snapshot(), opts.Target) } - err = res.RestoreTo(ctx, opts.Target) + countRestoredFiles, err := res.RestoreTo(ctx, opts.Target) if err != nil { return err } @@ -238,7 +238,8 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, } var count int t0 := time.Now() - count, err = res.VerifyFiles(ctx, opts.Target) + bar := newTerminalProgressMax(!gopts.Quiet && !gopts.JSON && stdoutIsTerminal(), 0, "files verified", term) + count, err = res.VerifyFiles(ctx, opts.Target, countRestoredFiles, bar) if err != nil { return err } diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 00da4e18e82..0e30b82f8fb 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -12,6 +12,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" restoreui "github.com/restic/restic/internal/ui/restore" "golang.org/x/sync/errgroup" @@ -333,12 +334,13 @@ func (res *Restorer) ensureDir(target string) error { // RestoreTo creates the directories and files in the snapshot below dst. // Before an item is created, res.Filter is called. -func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { +func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error) { + restoredFileCount := uint64(0) var err error if !filepath.IsAbs(dst) { dst, err = filepath.Abs(dst) if err != nil { - return errors.Wrap(err, "Abs") + return restoredFileCount, errors.Wrap(err, "Abs") } } @@ -346,7 +348,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { // ensure that the target directory exists and is actually a directory // Using ensureDir is too aggressive here as it also removes unexpected files if err := fs.MkdirAll(dst, 0700); err != nil { - return fmt.Errorf("cannot create target directory: %w", err) + return restoredFileCount, fmt.Errorf("cannot create target directory: %w", err) } } @@ -406,19 +408,22 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { } } res.trackFile(location, updateMetadataOnly) + if !updateMetadataOnly { + restoredFileCount++ + } return nil }) return err }, }) if err != nil { - return err + return 0, err } if !res.opts.DryRun { err = filerestorer.restoreFiles(ctx) if err != nil { - return err + return 0, err } } @@ -466,7 +471,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { return err }, }) - return err + return restoredFileCount, err } func (res *Restorer) removeUnexpectedFiles(ctx context.Context, target, location string, expectedFilenames []string) error { @@ -587,7 +592,7 @@ const nVerifyWorkers = 8 // have been successfully written to dst. It stops when it encounters an // error. It returns that error and the number of files it has successfully // verified. -func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { +func (res *Restorer) VerifyFiles(ctx context.Context, dst string, countRestoredFiles uint64, p *progress.Counter) (int, error) { type mustCheck struct { node *restic.Node path string @@ -598,6 +603,11 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { work = make(chan mustCheck, 2*nVerifyWorkers) ) + if p != nil { + p.SetMax(countRestoredFiles) + defer p.Done() + } + g, ctx := errgroup.WithContext(ctx) // Traverse tree and send jobs to work. @@ -632,6 +642,7 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { if err != nil || ctx.Err() != nil { break } + p.Add(1) atomic.AddUint64(&nchecked, 1) } return err diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 191f3b8ef48..20ac2e72f1a 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -22,6 +22,7 @@ import ( "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" restoreui "github.com/restic/restic/internal/ui/restore" "golang.org/x/sync/errgroup" ) @@ -403,13 +404,13 @@ func TestRestorer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + countRestoredFiles, err := res.RestoreTo(ctx, tempdir) if err != nil { t.Fatal(err) } if len(test.ErrorsMust)+len(test.ErrorsMay) == 0 { - _, err = res.VerifyFiles(ctx, tempdir) + _, err = res.VerifyFiles(ctx, tempdir, countRestoredFiles, nil) rtest.OK(t, err) } @@ -501,13 +502,18 @@ func TestRestorerRelative(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, "restore") + countRestoredFiles, err := res.RestoreTo(ctx, "restore") if err != nil { t.Fatal(err) } - nverified, err := res.VerifyFiles(ctx, "restore") + p := progress.NewCounter(time.Second, countRestoredFiles, func(value uint64, total uint64, runtime time.Duration, final bool) {}) + defer p.Done() + nverified, err := res.VerifyFiles(ctx, "restore", countRestoredFiles, p) rtest.OK(t, err) rtest.Equals(t, len(test.Files), nverified) + counterValue, maxValue := p.Get() + rtest.Equals(t, counterValue, uint64(2)) + rtest.Equals(t, maxValue, uint64(2)) for filename, err := range errors { t.Errorf("unexpected error for %v found: %v", filename, err) @@ -524,6 +530,13 @@ func TestRestorerRelative(t *testing.T) { t.Errorf("file %v has wrong content: want %q, got %q", filename, content, data) } } + + // verify that restoring the same snapshot again results in countRestoredFiles == 0 + countRestoredFiles, err = res.RestoreTo(ctx, "restore") + if err != nil { + t.Fatal(err) + } + rtest.Equals(t, uint64(0), countRestoredFiles) }) } } @@ -835,7 +848,7 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) var testPatterns = []struct { @@ -872,9 +885,9 @@ func TestVerifyCancel(t *testing.T) { tempdir := rtest.TempDir(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - - rtest.OK(t, res.RestoreTo(ctx, tempdir)) - err := os.WriteFile(filepath.Join(tempdir, "foo"), []byte("bar"), 0644) + countRestoredFiles, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + err = os.WriteFile(filepath.Join(tempdir, "foo"), []byte("bar"), 0644) rtest.OK(t, err) var errs []error @@ -883,7 +896,7 @@ func TestVerifyCancel(t *testing.T) { return err } - nverified, err := res.VerifyFiles(ctx, tempdir) + nverified, err := res.VerifyFiles(ctx, tempdir, countRestoredFiles, nil) rtest.Equals(t, 0, nverified) rtest.Assert(t, err != nil, "nil error from VerifyFiles") rtest.Equals(t, 1, len(errs)) @@ -915,7 +928,7 @@ func TestRestorerSparseFiles(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err = res.RestoreTo(ctx, tempdir) + _, err = res.RestoreTo(ctx, tempdir) rtest.OK(t, err) filename := filepath.Join(tempdir, "zeros") @@ -952,15 +965,17 @@ func saveSnapshotsAndOverwrite(t *testing.T, baseSnapshot Snapshot, overwriteSna t.Logf("base snapshot saved as %v", id.Str()) res := NewRestorer(repo, sn, baseOptions) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) // overwrite snapshot sn, id = saveSnapshot(t, repo, overwriteSnapshot, noopGetGenericAttributes) t.Logf("overwrite snapshot saved as %v", id.Str()) res = NewRestorer(repo, sn, overwriteOptions) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + countRestoredFiles, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) - _, err := res.VerifyFiles(ctx, tempdir) + _, err = res.VerifyFiles(ctx, tempdir, countRestoredFiles, nil) rtest.OK(t, err) return tempdir @@ -1238,8 +1253,9 @@ func TestRestoreModified(t *testing.T) { t.Logf("snapshot saved as %v", id.Str()) res := NewRestorer(repo, sn, Options{Overwrite: OverwriteIfChanged}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) - n, err := res.VerifyFiles(ctx, tempdir) + countRestoredFiles, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + n, err := res.VerifyFiles(ctx, tempdir, countRestoredFiles, nil) rtest.OK(t, err) rtest.Equals(t, 2, n, "unexpected number of verified files") } @@ -1264,7 +1280,8 @@ func TestRestoreIfChanged(t *testing.T) { t.Logf("snapshot saved as %v", id.Str()) res := NewRestorer(repo, sn, Options{}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) // modify file but maintain size and timestamp path := filepath.Join(tempdir, "foo") @@ -1283,7 +1300,8 @@ func TestRestoreIfChanged(t *testing.T) { for _, overwrite := range []OverwriteBehavior{OverwriteIfChanged, OverwriteAlways} { res = NewRestorer(repo, sn, Options{Overwrite: overwrite}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) data, err := os.ReadFile(path) rtest.OK(t, err) if overwrite == OverwriteAlways { @@ -1319,9 +1337,10 @@ func TestRestoreDryRun(t *testing.T) { t.Logf("snapshot saved as %v", id.Str()) res := NewRestorer(repo, sn, Options{DryRun: true}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) - _, err := os.Stat(tempdir) + _, err = os.Stat(tempdir) rtest.Assert(t, errors.Is(err, os.ErrNotExist), "expected no file to be created, got %v", err) } @@ -1345,7 +1364,8 @@ func TestRestoreDryRunDelete(t *testing.T) { sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) res := NewRestorer(repo, sn, Options{DryRun: true, Delete: true}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err = res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) _, err = os.Stat(tempfile) rtest.Assert(t, err == nil, "expected file to still exist, got error %v", err) @@ -1463,14 +1483,14 @@ func TestRestoreDelete(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) res = NewRestorer(repo, deleteSn, Options{Delete: true}) if test.selectFilter != nil { res.SelectFilter = test.selectFilter } - err = res.RestoreTo(ctx, tempdir) + _, err = res.RestoreTo(ctx, tempdir) rtest.OK(t, err) for fn, shouldExist := range test.fileState { @@ -1503,7 +1523,7 @@ func TestRestoreToFile(t *testing.T) { sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) res := NewRestorer(repo, sn, Options{}) - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.Assert(t, strings.Contains(err.Error(), "cannot create target directory"), "unexpected error %v", err) } @@ -1535,7 +1555,8 @@ func TestRestorerLongPath(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rtest.OK(t, res.RestoreTo(ctx, tmp)) - _, err = res.VerifyFiles(ctx, tmp) + countRestoredFiles, err := res.RestoreTo(ctx, tmp) + rtest.OK(t, err) + _, err = res.VerifyFiles(ctx, tmp, countRestoredFiles, nil) rtest.OK(t, err) } diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go index 27d990af4be..c4e8149b247 100644 --- a/internal/restorer/restorer_unix_test.go +++ b/internal/restorer/restorer_unix_test.go @@ -37,7 +37,7 @@ func TestRestorerRestoreEmptyHardlinkedFields(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) f1, err := os.Stat(filepath.Join(tempdir, "dirtest/file1")) @@ -96,7 +96,7 @@ func testRestorerProgressBar(t *testing.T, dryRun bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) progress.Finish() @@ -126,7 +126,8 @@ func TestRestorePermissions(t *testing.T) { t.Logf("snapshot saved as %v", id.Str()) res := NewRestorer(repo, sn, Options{}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) for _, overwrite := range []OverwriteBehavior{OverwriteIfChanged, OverwriteAlways} { // tamper with permissions @@ -134,7 +135,8 @@ func TestRestorePermissions(t *testing.T) { rtest.OK(t, os.Chmod(path, 0o700)) res = NewRestorer(repo, sn, Options{Overwrite: overwrite}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) fi, err := os.Stat(path) rtest.OK(t, err) rtest.Equals(t, fs.FileMode(0o600), fi.Mode().Perm(), "unexpected permissions") diff --git a/internal/restorer/restorer_windows_test.go b/internal/restorer/restorer_windows_test.go index 3f6c8472ba0..4764bed2d4c 100644 --- a/internal/restorer/restorer_windows_test.go +++ b/internal/restorer/restorer_windows_test.go @@ -181,7 +181,7 @@ func runAttributeTests(t *testing.T, fileInfo NodeInfo, existingFileAttr FileAtt ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, testDir) + _, err := res.RestoreTo(ctx, testDir) rtest.OK(t, err) mainFilePath := path.Join(testDir, fileInfo.parentDir, fileInfo.name) @@ -562,11 +562,11 @@ func TestRestoreDeleteCaseInsensitive(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) res = NewRestorer(repo, deleteSn, Options{Delete: true}) - err = res.RestoreTo(ctx, tempdir) + _, err = res.RestoreTo(ctx, tempdir) rtest.OK(t, err) // anotherfile must still exist From 19f487750ee2d8a19bcc4c8be9963ec1b97210af Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sun, 11 Aug 2024 19:25:58 -0600 Subject: [PATCH 550/893] Add test cases and handle volume GUID paths Gracefully handle errors while checking for EA and add debug logs. --- changelog/unreleased/pull-4980 | 1 + changelog/unreleased/pull-4998 | 8 -- internal/fs/ea_windows_test.go | 76 +++++++++++ internal/restic/node_windows.go | 49 ++++--- internal/restic/node_windows_test.go | 196 +++++++++++++++++++++++++++ 5 files changed, 306 insertions(+), 24 deletions(-) delete mode 100644 changelog/unreleased/pull-4998 diff --git a/changelog/unreleased/pull-4980 b/changelog/unreleased/pull-4980 index 264f347faec..5713db7a2b1 100644 --- a/changelog/unreleased/pull-4980 +++ b/changelog/unreleased/pull-4980 @@ -8,5 +8,6 @@ Restic now completely skips the attempt to fetch extended attributes for such volumes where it is not supported. https://github.com/restic/restic/pull/4980 +https://github.com/restic/restic/pull/4998 https://github.com/restic/restic/issues/4955 https://github.com/restic/restic/issues/4950 diff --git a/changelog/unreleased/pull-4998 b/changelog/unreleased/pull-4998 deleted file mode 100644 index 23ff3dbd2fd..00000000000 --- a/changelog/unreleased/pull-4998 +++ /dev/null @@ -1,8 +0,0 @@ -Bugfix: Fix extended attributes handling for VSS snapshots - -Restic was failing to backup extended attributes for VSS snapshots -after the fix for https://github.com/restic/restic/pull/4980. -Restic now correctly handles extended attributes for VSS snapshots. - -https://github.com/restic/restic/pull/4998 -https://github.com/restic/restic/pull/4980 diff --git a/internal/fs/ea_windows_test.go b/internal/fs/ea_windows_test.go index b249f43c405..74afd7aa56a 100644 --- a/internal/fs/ea_windows_test.go +++ b/internal/fs/ea_windows_test.go @@ -10,6 +10,7 @@ import ( "os" "path/filepath" "reflect" + "strings" "syscall" "testing" "unsafe" @@ -245,3 +246,78 @@ func testSetGetEA(t *testing.T, path string, handle windows.Handle, testEAs []Ex t.Fatalf("EAs read from path %s don't match", path) } } + +func TestPathSupportsExtendedAttributes(t *testing.T) { + testCases := []struct { + name string + path string + expected bool + }{ + { + name: "System drive", + path: os.Getenv("SystemDrive") + `\`, + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + supported, err := PathSupportsExtendedAttributes(tc.path) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if supported != tc.expected { + t.Errorf("Expected %v, got %v for path %s", tc.expected, supported, tc.path) + } + }) + } + + // Test with an invalid path + _, err := PathSupportsExtendedAttributes("Z:\\NonExistentPath-UAS664da5s4dyu56das45f5as") + if err == nil { + t.Error("Expected an error for non-existent path, but got nil") + } +} + +func TestGetVolumePathName(t *testing.T) { + tempDirVolume := filepath.VolumeName(os.TempDir()) + testCases := []struct { + name string + path string + expectedPrefix string + }{ + { + name: "Root directory", + path: os.Getenv("SystemDrive") + `\`, + expectedPrefix: os.Getenv("SystemDrive"), + }, + { + name: "Nested directory", + path: os.Getenv("SystemDrive") + `\Windows\System32`, + expectedPrefix: os.Getenv("SystemDrive"), + }, + { + name: "Temp directory", + path: os.TempDir() + `\`, + expectedPrefix: tempDirVolume, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + volumeName, err := GetVolumePathName(tc.path) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if !strings.HasPrefix(volumeName, tc.expectedPrefix) { + t.Errorf("Expected volume name to start with %s, but got %s", tc.expectedPrefix, volumeName) + } + }) + } + + // Test with an invalid path + _, err := GetVolumePathName("Z:\\NonExistentPath") + if err == nil { + t.Error("Expected an error for non-existent path, but got nil") + } +} diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 2ca7e42e6d3..bce01ccad04 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -42,6 +42,7 @@ const ( extendedPathPrefix = `\\?\` uncPathPrefix = `\\?\UNC\` globalRootPrefix = `\\?\GLOBALROOT\` + volumeGUIDPrefix = `\\?\Volume{` ) // mknod is not supported on Windows. @@ -422,15 +423,21 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { } // If not found, check if EA is supported with manually prepared volume name isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeName + `\`) - // If the prepared volume name is not valid, we will next fetch the actual volume name. + // If the prepared volume name is not valid, we will fetch the actual volume name next. if err != nil && !errors.Is(err, windows.DNS_ERROR_INVALID_NAME) { - return false, err + debug.Log("Error checking if extended attributes are supported for prepared volume name %s: %v", volumeName, err) + // There can be multiple errors like path does not exist, bad network path, etc. + // We just gracefully disallow extended attributes for cases. + return false, nil } } // If an entry is not found, get the actual volume name using the GetVolumePathName function volumeNameActual, err := fs.GetVolumePathName(path) if err != nil { - return false, err + debug.Log("Error getting actual volume name %s for path %s: %v", volumeName, path, err) + // There can be multiple errors like path does not exist, bad network path, etc. + // We just gracefully disallow extended attributes for cases. + return false, nil } if volumeNameActual != volumeName { // If the actual volume name is different, check cache for the actual volume name @@ -441,11 +448,19 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { } // If the actual volume name is different and is not in the map, again check if the new volume supports extended attributes with the actual volume name isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeNameActual + `\`) + // Debug log for cases where the prepared volume name is not valid if err != nil { - return false, err + debug.Log("Error checking if extended attributes are supported for actual volume name %s: %v", volumeNameActual, err) + // There can be multiple errors like path does not exist, bad network path, etc. + // We just gracefully disallow extended attributes for cases. + return false, nil + } else { + debug.Log("Checking extended attributes. Prepared volume name: %s, actual volume name: %s, isEASupportedVolume: %v, err: %v", volumeName, volumeNameActual, isEASupportedVolume, err) } } - eaSupportedVolumesMap.Store(volumeNameActual, isEASupportedVolume) + if volumeNameActual != "" { + eaSupportedVolumesMap.Store(volumeNameActual, isEASupportedVolume) + } return isEASupportedVolume, err } @@ -460,17 +475,19 @@ func prepareVolumeName(path string) (volumeName string, err error) { volumeName = filepath.VolumeName(path) } } else { - if strings.HasPrefix(path, uncPathPrefix) { - // Convert \\?\UNC\ extended path to standard path to get the volume name correctly - path = `\\` + path[len(uncPathPrefix):] - } else if strings.HasPrefix(path, extendedPathPrefix) { - //Extended length path prefix needs to be trimmed to get the volume name correctly - path = path[len(extendedPathPrefix):] - } else { - // Use the absolute path - path, err = filepath.Abs(path) - if err != nil { - return "", fmt.Errorf("failed to get absolute path: %w", err) + if !strings.HasPrefix(path, volumeGUIDPrefix) { // Handle volume GUID path + if strings.HasPrefix(path, uncPathPrefix) { + // Convert \\?\UNC\ extended path to standard path to get the volume name correctly + path = `\\` + path[len(uncPathPrefix):] + } else if strings.HasPrefix(path, extendedPathPrefix) { + //Extended length path prefix needs to be trimmed to get the volume name correctly + path = path[len(extendedPathPrefix):] + } else { + // Use the absolute path + path, err = filepath.Abs(path) + if err != nil { + return "", fmt.Errorf("failed to get absolute path: %w", err) + } } } volumeName = filepath.VolumeName(path) diff --git a/internal/restic/node_windows_test.go b/internal/restic/node_windows_test.go index 4fd57bbb757..6ba25559b7a 100644 --- a/internal/restic/node_windows_test.go +++ b/internal/restic/node_windows_test.go @@ -12,6 +12,7 @@ import ( "strings" "syscall" "testing" + "time" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" @@ -329,3 +330,198 @@ func TestRestoreExtendedAttributes(t *testing.T) { } } } + +func TestPrepareVolumeName(t *testing.T) { + currentVolume := filepath.VolumeName(func() string { + // Get the current working directory + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get current working directory: %v", err) + } + return pwd + }()) + // Create a temporary directory for the test + tempDir, err := os.MkdirTemp("", "restic_test_"+time.Now().Format("20060102150405")) + if err != nil { + t.Fatalf("Failed to create temp directory: %v", err) + } + defer os.RemoveAll(tempDir) + + // Create a long file name + longFileName := `\Very\Long\Path\That\Exceeds\260\Characters\` + strings.Repeat(`\VeryLongFolderName`, 20) + `\\LongFile.txt` + longFilePath := filepath.Join(tempDir, longFileName) + + tempDirVolume := filepath.VolumeName(tempDir) + // Create the file + content := []byte("This is a test file with a very long name.") + err = os.MkdirAll(filepath.Dir(longFilePath), 0755) + test.OK(t, err) + if err != nil { + t.Fatalf("Failed to create long folder: %v", err) + } + err = os.WriteFile(longFilePath, content, 0644) + test.OK(t, err) + if err != nil { + t.Fatalf("Failed to create long file: %v", err) + } + osVolumeGUIDPath := getOSVolumeGUIDPath(t) + osVolumeGUIDVolume := filepath.VolumeName(osVolumeGUIDPath) + + testCases := []struct { + name string + path string + expectedVolume string + expectError bool + expectedEASupported bool + isRealPath bool + }{ + { + name: "Network drive path", + path: `Z:\Shared\Documents`, + expectedVolume: `Z:`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Subst drive path", + path: `X:\Virtual\Folder`, + expectedVolume: `X:`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Windows reserved path", + path: `\\.\` + os.Getenv("SystemDrive") + `\System32\drivers\etc\hosts`, + expectedVolume: `\\.\` + os.Getenv("SystemDrive"), + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "Long UNC path", + path: `\\?\UNC\LongServerName\VeryLongShareName\DeepPath\File.txt`, + expectedVolume: `\\LongServerName\VeryLongShareName`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Volume GUID path", + path: osVolumeGUIDPath, + expectedVolume: osVolumeGUIDVolume, + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "Volume GUID path with subfolder", + path: osVolumeGUIDPath + `\Windows`, + expectedVolume: osVolumeGUIDVolume, + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "Standard path", + path: os.Getenv("SystemDrive") + `\Users\`, + expectedVolume: os.Getenv("SystemDrive"), + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "Extended length path", + path: longFilePath, + expectedVolume: tempDirVolume, + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "UNC path", + path: `\\server\share\folder`, + expectedVolume: `\\server\share`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Extended UNC path", + path: `\\?\UNC\server\share\folder`, + expectedVolume: `\\server\share`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Volume Shadow Copy path", + path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1\Users\test`, + expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Relative path", + path: `folder\subfolder`, + + expectedVolume: currentVolume, // Get current volume + expectError: false, + expectedEASupported: true, + }, + { + name: "Empty path", + path: ``, + expectedVolume: currentVolume, + expectError: false, + expectedEASupported: true, + isRealPath: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + isEASupported, err := checkAndStoreEASupport(tc.path) + test.OK(t, err) + test.Equals(t, tc.expectedEASupported, isEASupported) + + volume, err := prepareVolumeName(tc.path) + + if tc.expectError { + test.Assert(t, err != nil, "Expected an error, but got none") + } else { + test.OK(t, err) + } + test.Equals(t, tc.expectedVolume, volume) + + if tc.isRealPath { + isEASupportedVolume, err := fs.PathSupportsExtendedAttributes(volume + `\`) + // If the prepared volume name is not valid, we will next fetch the actual volume name. + test.OK(t, err) + + test.Equals(t, tc.expectedEASupported, isEASupportedVolume) + + actualVolume, err := fs.GetVolumePathName(tc.path) + test.OK(t, err) + test.Equals(t, tc.expectedVolume, actualVolume) + } + }) + } +} + +func getOSVolumeGUIDPath(t *testing.T) string { + // Get the path of the OS drive (usually C:\) + osDrive := os.Getenv("SystemDrive") + "\\" + + // Convert to a volume GUID path + volumeName, err := windows.UTF16PtrFromString(osDrive) + test.OK(t, err) + if err != nil { + return "" + } + + var volumeGUID [windows.MAX_PATH]uint16 + err = windows.GetVolumeNameForVolumeMountPoint(volumeName, &volumeGUID[0], windows.MAX_PATH) + test.OK(t, err) + if err != nil { + return "" + } + + return windows.UTF16ToString(volumeGUID[:]) +} From 56f28c9bd5a06620a82373969a698d452c0ec31f Mon Sep 17 00:00:00 2001 From: Michael Terry Date: Tue, 30 Jul 2024 19:06:18 -0400 Subject: [PATCH 551/893] main: return an exit code (12) for "bad password" errors --- changelog/unreleased/pull-4959 | 6 ++++++ cmd/restic/cmd_backup.go | 1 + cmd/restic/cmd_cat.go | 1 + cmd/restic/cmd_check.go | 1 + cmd/restic/cmd_copy.go | 1 + cmd/restic/cmd_debug.go | 1 + cmd/restic/cmd_diff.go | 1 + cmd/restic/cmd_dump.go | 1 + cmd/restic/cmd_find.go | 1 + cmd/restic/cmd_forget.go | 1 + cmd/restic/cmd_key_add.go | 1 + cmd/restic/cmd_key_list.go | 1 + cmd/restic/cmd_key_passwd.go | 1 + cmd/restic/cmd_key_remove.go | 1 + cmd/restic/cmd_list.go | 1 + cmd/restic/cmd_ls.go | 1 + cmd/restic/cmd_migrate.go | 1 + cmd/restic/cmd_mount.go | 1 + cmd/restic/cmd_prune.go | 1 + cmd/restic/cmd_recover.go | 1 + cmd/restic/cmd_repair_index.go | 1 + cmd/restic/cmd_repair_packs.go | 1 + cmd/restic/cmd_repair_snapshots.go | 1 + cmd/restic/cmd_restore.go | 1 + cmd/restic/cmd_rewrite.go | 1 + cmd/restic/cmd_self_update.go | 1 + cmd/restic/cmd_snapshots.go | 1 + cmd/restic/cmd_stats.go | 1 + cmd/restic/cmd_tag.go | 1 + cmd/restic/global.go | 2 +- cmd/restic/main.go | 5 +++++ doc/075_scripting.rst | 2 ++ 32 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/pull-4959 diff --git a/changelog/unreleased/pull-4959 b/changelog/unreleased/pull-4959 new file mode 100644 index 00000000000..120527e22b4 --- /dev/null +++ b/changelog/unreleased/pull-4959 @@ -0,0 +1,6 @@ +Enhancement: Return exit code 12 for "bad password" + +Restic now returns exit code 12 when it can't open the repository +because of a bad password. + +https://github.com/restic/restic/pull/4959 diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 9957b578493..28b6c7feb4a 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -43,6 +43,7 @@ Exit status is 1 if there was a fatal error (no snapshot created). Exit status is 3 if some source data could not be read (incomplete snapshot created). Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, PreRun: func(_ *cobra.Command, _ []string) { if backupOptions.Host == "" { diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index d3e98b2ffd1..ac03798d259 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -27,6 +27,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 9cccc0609fc..b0749e0221b 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -39,6 +39,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index d7761174a00..40015b13c57 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -38,6 +38,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, RunE: func(cmd *cobra.Command, args []string) error { return runCopy(cmd.Context(), copyOptions, globalOptions, args) diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 74c21df24d4..18b4b763175 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -47,6 +47,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 6793184b136..24f445b6492 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -43,6 +43,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 9c0fe535e68..a5794ad3087 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -38,6 +38,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index aebca594e56..f84ad43c335 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -37,6 +37,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 27b8f4f7490..01fe0e606d0 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -39,6 +39,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index c9f0ef23319..2737410a05b 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, } diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index ae751a48751..1c70cce8a74 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -27,6 +27,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index 723acaaab65..9bb1417494d 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, } diff --git a/cmd/restic/cmd_key_remove.go b/cmd/restic/cmd_key_remove.go index c4c24fdb706..3cb2e0bd789 100644 --- a/cmd/restic/cmd_key_remove.go +++ b/cmd/restic/cmd_key_remove.go @@ -24,6 +24,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 060bca8718d..4aa9f43bb17 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 76e192b6c9e..7c712e48168 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -43,6 +43,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index 7e472ff121c..2cc44bff0a5 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -26,6 +26,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 3e0b159bec0..0b79afe45e1 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -68,6 +68,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 7e706ccf899..e19c2e04b16 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -32,6 +32,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index 4e8b8c07763..0ff6e2d66ee 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -26,6 +26,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index e6b6e9fa500..83c1bfa7f9b 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index b0afefb2de1..290c3734e74 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -27,6 +27,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index fc221ebea29..385854312ad 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -41,6 +41,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index d71cb768387..eb437a11de7 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -36,6 +36,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 463720ee146..d1088d00bd0 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -42,6 +42,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_self_update.go b/cmd/restic/cmd_self_update.go index 0fce41241dc..09c86bf2ca9 100644 --- a/cmd/restic/cmd_self_update.go +++ b/cmd/restic/cmd_self_update.go @@ -28,6 +28,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index 826ab55ec36..442c5737519 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -27,6 +27,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 5d105e31cb8..552eaff6d3e 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -53,6 +53,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index ea73955f012..47e3c02ad58 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -29,6 +29,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 846f3339fe1..22aa8a2904e 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -493,7 +493,7 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi } } if err != nil { - if errors.IsFatal(err) { + if errors.IsFatal(err) || errors.Is(err, repository.ErrNoKeyFound) { return nil, err } return nil, errors.Fatalf("%s", err) diff --git a/cmd/restic/main.go b/cmd/restic/main.go index fda53ca0e8d..b366a6ad3f8 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -18,6 +18,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/options" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) @@ -164,6 +165,8 @@ func main() { exitMessage = fmt.Sprintf("Warning: %v", err) case errors.IsFatal(err): exitMessage = err.Error() + case errors.Is(err, repository.ErrNoKeyFound): + exitMessage = fmt.Sprintf("Fatal: %v", err) case err != nil: exitMessage = fmt.Sprintf("%+v", err) @@ -186,6 +189,8 @@ func main() { exitCode = 10 case restic.IsAlreadyLocked(err): exitCode = 11 + case errors.Is(err, repository.ErrNoKeyFound): + exitCode = 12 case errors.Is(err, context.Canceled): exitCode = 130 default: diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 90bf111d7c3..92eaaaf4a6f 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -63,6 +63,8 @@ a more specific description. +-----+----------------------------------------------------+ | 11 | Failed to lock repository (since restic 0.17.0) | +-----+----------------------------------------------------+ +| 12 | Wrong password (since restic 0.17.1) | ++-----+----------------------------------------------------+ | 130 | Restic was interrupted using SIGINT or SIGSTOP | +-----+----------------------------------------------------+ From b7d014b68528e84bc817c0299c5dd04731b1e84b Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 17 Aug 2024 00:18:13 +0530 Subject: [PATCH 552/893] Revert "repository: removed redundant prepareCache method from Repository" This reverts commit 720609f8ba6dcf44b7fe51cd9b543ee44bbbaf38. --- internal/checker/checker.go | 6 ++++- internal/repository/repair_index.go | 6 +++-- internal/repository/repository.go | 37 +++++++++++++++++++++++++++-- internal/restic/repository.go | 2 +- 4 files changed, 45 insertions(+), 6 deletions(-) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index d5e7fd1f818..031e13807f0 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -146,7 +146,11 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e return hints, append(errs, err) } - c.repo.SetIndex(c.masterIndex) + err = c.repo.SetIndex(c.masterIndex) + if err != nil { + debug.Log("SetIndex returned error: %v", err) + errs = append(errs, err) + } // compute pack size using index entries c.packs, err = pack.Size(ctx, c.repo, false) diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index c72dcfd00f4..77080925461 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -52,8 +52,10 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, return err } - repo.SetIndex(mi) - + err = repo.SetIndex(mi) + if err != nil { + return err + } packSizeFromIndex, err = pack.Size(ctx, repo, false) if err != nil { return err diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 3dc248c5eab..f7fd65c71d4 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "math" + "os" "runtime" "sort" "sync" @@ -585,8 +586,9 @@ func (r *Repository) ListPacksFromIndex(ctx context.Context, packs restic.IDSet) } // SetIndex instructs the repository to use the given index. -func (r *Repository) SetIndex(i restic.MasterIndex) { +func (r *Repository) SetIndex(i restic.MasterIndex) error { r.idx = i.(*index.MasterIndex) + return r.prepareCache() } func (r *Repository) clearIndex() { @@ -626,8 +628,12 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { return errors.New("index uses feature not supported by repository version 1") } } + if ctx.Err() != nil { + return ctx.Err() + } - return ctx.Err() + // remove index files from the cache which have been removed in the repo + return r.prepareCache() } // createIndexFromPacks creates a new index by reading all given pack files (with sizes). @@ -693,6 +699,33 @@ func (r *Repository) createIndexFromPacks(ctx context.Context, packsize map[rest return invalid, nil } +// prepareCache initializes the local cache. indexIDs is the list of IDs of +// index files still present in the repo. +func (r *Repository) prepareCache() error { + if r.Cache == nil { + return nil + } + + indexIDs := r.idx.IDs() + debug.Log("prepare cache with %d index files", len(indexIDs)) + + // clear old index files + err := r.Cache.Clear(restic.IndexFile, indexIDs) + if err != nil { + fmt.Fprintf(os.Stderr, "error clearing index files in cache: %v\n", err) + } + + packs := r.idx.Packs(restic.NewIDSet()) + + // clear old packs + err = r.Cache.Clear(restic.PackFile, packs) + if err != nil { + fmt.Fprintf(os.Stderr, "error clearing pack files in cache: %v\n", err) + } + + return nil +} + // SearchKey finds a key with the supplied password, afterwards the config is // read and parsed. It tries at most maxKeys key files in the repo. func (r *Repository) SearchKey(ctx context.Context, password string, maxKeys int, keyHint string) error { diff --git a/internal/restic/repository.go b/internal/restic/repository.go index ce8401b37d7..b18b036a733 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -22,7 +22,7 @@ type Repository interface { Key() *crypto.Key LoadIndex(ctx context.Context, p *progress.Counter) error - SetIndex(mi MasterIndex) + SetIndex(mi MasterIndex) error LookupBlob(t BlobType, id ID) []PackedBlob LookupBlobSize(t BlobType, id ID) (size uint, exists bool) From 88174cd0a480c6cd0be97771c8c82c7678444790 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 17 Aug 2024 00:21:49 +0530 Subject: [PATCH 553/893] cache: remove redundant index file cleanup addressing code review comments --- internal/repository/repository.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index f7fd65c71d4..d408e310570 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -706,19 +706,10 @@ func (r *Repository) prepareCache() error { return nil } - indexIDs := r.idx.IDs() - debug.Log("prepare cache with %d index files", len(indexIDs)) - - // clear old index files - err := r.Cache.Clear(restic.IndexFile, indexIDs) - if err != nil { - fmt.Fprintf(os.Stderr, "error clearing index files in cache: %v\n", err) - } - packs := r.idx.Packs(restic.NewIDSet()) // clear old packs - err = r.Cache.Clear(restic.PackFile, packs) + err := r.Cache.Clear(restic.PackFile, packs) if err != nil { fmt.Fprintf(os.Stderr, "error clearing pack files in cache: %v\n", err) } From c9097994b97991cf2070b482cce1f92b5321c89c Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 17 Aug 2024 00:24:19 +0530 Subject: [PATCH 554/893] changelog: update changelog --- changelog/unreleased/issue-4934 | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/changelog/unreleased/issue-4934 b/changelog/unreleased/issue-4934 index 03194168e2d..6891ca20419 100644 --- a/changelog/unreleased/issue-4934 +++ b/changelog/unreleased/issue-4934 @@ -1,9 +1,8 @@ -Enhancement: Clear removed snapshots, index and pack files from the local cache +Enhancement: Clear removed snapshots from local cache of the current host -Restic did not clear removed snapshots from the cache after the `forget` -operation; only indexes and pack files were removed automatically. -Restic now automatically clears removed indexes, packs and snapshots from the -local cache. +Restic only removed snapshots from the cache on the host that runs the `forget` command. +On other hosts that use the same repository, the old snapshots remained in the cache. +Restic now, automatically clears old snapshots from the local cache of the current host. https://github.com/restic/restic/issues/4934 https://github.com/restic/restic/pull/4981 \ No newline at end of file From ceb45d9816ea946a588f780d66166483e0ecf2de Mon Sep 17 00:00:00 2001 From: Andreas Deininger Date: Sat, 17 Aug 2024 12:39:41 +0200 Subject: [PATCH 555/893] GitHub test actions: fix warnings 'Restore cache failed' --- .github/workflows/tests.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e3277625adc..bf4e63bb7d6 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -56,6 +56,9 @@ jobs: GOPROXY: https://proxy.golang.org steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up Go ${{ matrix.go }} uses: actions/setup-go@v5 with: @@ -129,9 +132,6 @@ jobs: echo $Env:USERPROFILE\tar\bin >> $Env:GITHUB_PATH if: matrix.os == 'windows-latest' - - name: Check out code - uses: actions/checkout@v4 - - name: Build with build.go run: | go run build.go @@ -220,14 +220,14 @@ jobs: name: Cross Compile for subset ${{ matrix.subset }} steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up Go ${{ env.latest_go }} uses: actions/setup-go@v5 with: go-version: ${{ env.latest_go }} - - name: Check out code - uses: actions/checkout@v4 - - name: Cross-compile for subset ${{ matrix.subset }} run: | mkdir build-output build-output-debug @@ -242,14 +242,14 @@ jobs: # allow annotating code in the PR checks: write steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up Go ${{ env.latest_go }} uses: actions/setup-go@v5 with: go-version: ${{ env.latest_go }} - - name: Check out code - uses: actions/checkout@v4 - - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: From 97df01b9b861980056cd237bb75a8fc0b956d207 Mon Sep 17 00:00:00 2001 From: Michael Terry Date: Sat, 17 Aug 2024 15:00:39 -0400 Subject: [PATCH 556/893] docs: correct wrong exit_error message field name --- doc/075_scripting.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 92eaaaf4a6f..94749e6fb14 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -100,7 +100,7 @@ It will hold the error message and the exit code. +----------------------+-------------------------------------------+ | ``code`` | Exit code (see above chart) | +----------------------+-------------------------------------------+ -| ``error`` | Error message | +| ``message`` | Error message | +----------------------+-------------------------------------------+ Output formats From 1b8ca32e7dac3d4187b600e463b2f23784e334e9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 18 Aug 2024 19:41:58 +0200 Subject: [PATCH 557/893] Mention RESTIC_HOST environment variable in docs --- doc/040_backup.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index f1f355c531e..b53ae8d095d 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -704,6 +704,7 @@ environment variables. The following lists these environment variables: RESTIC_TLS_CLIENT_CERT Location of TLS client certificate and private key (replaces --tls-client-cert) RESTIC_CACHE_DIR Location of the cache directory RESTIC_COMPRESSION Compression mode (only available for repository format version 2) + RESTIC_HOST Only consider snapshots for this host / Set the hostname for the snapshot manually (replaces --host) RESTIC_PROGRESS_FPS Frames per second by which the progress bar is updated RESTIC_PACK_SIZE Target size for pack files RESTIC_READ_CONCURRENCY Concurrency for file reads From 551dfee707d4aff803f23b23aff56169aedb6d5b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 18 Aug 2024 19:45:54 +0200 Subject: [PATCH 558/893] Improve description for no password on secondary repo --- cmd/restic/secondary_repo.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/secondary_repo.go b/cmd/restic/secondary_repo.go index 9a3eb5fe2c6..44621afa18f 100644 --- a/cmd/restic/secondary_repo.go +++ b/cmd/restic/secondary_repo.go @@ -50,7 +50,7 @@ func initSecondaryRepoOptions(f *pflag.FlagSet, opts *secondaryRepoOptions, repo f.StringVarP(&opts.PasswordFile, "from-password-file", "", "", "`file` to read the source repository password from (default: $RESTIC_FROM_PASSWORD_FILE)") f.StringVarP(&opts.KeyHint, "from-key-hint", "", "", "key ID of key to try decrypting the source repository first (default: $RESTIC_FROM_KEY_HINT)") f.StringVarP(&opts.PasswordCommand, "from-password-command", "", "", "shell `command` to obtain the source repository password from (default: $RESTIC_FROM_PASSWORD_COMMAND)") - f.BoolVar(&opts.InsecureNoPassword, "from-insecure-no-password", false, "use an empty password for the source repository, must be passed to every restic command (insecure)") + f.BoolVar(&opts.InsecureNoPassword, "from-insecure-no-password", false, "use an empty password for the source repository (insecure)") opts.Repo = os.Getenv("RESTIC_FROM_REPOSITORY") opts.RepositoryFile = os.Getenv("RESTIC_FROM_REPOSITORY_FILE") From 5988d825b7bc9d7fed1389f30850e615801278ff Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Aug 2024 23:48:45 +0200 Subject: [PATCH 559/893] group commands and make features/options visible --- cmd/restic/cmd_backup.go | 1 + cmd/restic/cmd_cache.go | 1 + cmd/restic/cmd_cat.go | 1 + cmd/restic/cmd_check.go | 1 + cmd/restic/cmd_copy.go | 1 + cmd/restic/cmd_debug.go | 5 +++-- cmd/restic/cmd_diff.go | 1 + cmd/restic/cmd_dump.go | 1 + cmd/restic/cmd_features.go | 2 +- cmd/restic/cmd_find.go | 1 + cmd/restic/cmd_forget.go | 1 + cmd/restic/cmd_init.go | 1 + cmd/restic/cmd_key.go | 1 + cmd/restic/cmd_list.go | 1 + cmd/restic/cmd_ls.go | 1 + cmd/restic/cmd_migrate.go | 1 + cmd/restic/cmd_mount.go | 1 + cmd/restic/cmd_options.go | 2 +- cmd/restic/cmd_prune.go | 1 + cmd/restic/cmd_recover.go | 1 + cmd/restic/cmd_repair.go | 5 +++-- cmd/restic/cmd_restore.go | 1 + cmd/restic/cmd_rewrite.go | 1 + cmd/restic/cmd_snapshots.go | 1 + cmd/restic/cmd_stats.go | 1 + cmd/restic/cmd_tag.go | 1 + cmd/restic/cmd_unlock.go | 1 + cmd/restic/main.go | 16 ++++++++++++++++ 28 files changed, 47 insertions(+), 6 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 28b6c7feb4a..562108a3370 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -55,6 +55,7 @@ Exit status is 12 if the password is incorrect. backupOptions.Host = hostname } }, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() diff --git a/cmd/restic/cmd_cache.go b/cmd/restic/cmd_cache.go index e71d383656f..e54c734518e 100644 --- a/cmd/restic/cmd_cache.go +++ b/cmd/restic/cmd_cache.go @@ -28,6 +28,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { return runCache(cacheOptions, globalOptions, args) diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index ac03798d259..6160c54dfc2 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -29,6 +29,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runCat(cmd.Context(), globalOptions, args) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index b0749e0221b..dcf7f27df5a 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -41,6 +41,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 40015b13c57..cfe574d3527 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -40,6 +40,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { return runCopy(cmd.Context(), copyOptions, globalOptions, args) }, diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 18b4b763175..2a48762d106 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -29,8 +29,9 @@ import ( ) var cmdDebug = &cobra.Command{ - Use: "debug", - Short: "Debug commands", + Use: "debug", + Short: "Debug commands", + GroupID: cmdGroupDefault, } var cmdDebugDump = &cobra.Command{ diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 24f445b6492..594e387e895 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -45,6 +45,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runDiff(cmd.Context(), diffOptions, globalOptions, args) diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index a5794ad3087..7d6652e1744 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -40,6 +40,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runDump(cmd.Context(), dumpOptions, globalOptions, args) diff --git a/cmd/restic/cmd_features.go b/cmd/restic/cmd_features.go index 497013696ea..a2f04be311c 100644 --- a/cmd/restic/cmd_features.go +++ b/cmd/restic/cmd_features.go @@ -31,7 +31,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. `, - Hidden: true, + GroupID: cmdGroupAdvanced, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { if len(args) != 0 { diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index f84ad43c335..cb5c0e5e080 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -39,6 +39,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runFind(cmd.Context(), findOptions, globalOptions, args) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 01fe0e606d0..58a9d25b71d 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -41,6 +41,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() diff --git a/cmd/restic/cmd_init.go b/cmd/restic/cmd_init.go index 3c0319e55d8..2a2aae1dc85 100644 --- a/cmd/restic/cmd_init.go +++ b/cmd/restic/cmd_init.go @@ -26,6 +26,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runInit(cmd.Context(), initOptions, globalOptions, args) diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go index c687eca53d6..80e892f20c6 100644 --- a/cmd/restic/cmd_key.go +++ b/cmd/restic/cmd_key.go @@ -11,6 +11,7 @@ var cmdKey = &cobra.Command{ The "key" command allows you to set multiple access keys or passwords per repository. `, + GroupID: cmdGroupDefault, } func init() { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 4aa9f43bb17..1a4791e311d 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -26,6 +26,7 @@ Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { return runList(cmd.Context(), globalOptions, args) }, diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 7c712e48168..69e2781037e 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -46,6 +46,7 @@ Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { return runLs(cmd.Context(), lsOptions, globalOptions, args) }, diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index 2cc44bff0a5..5c3e425edfc 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -29,6 +29,7 @@ Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() defer cancel() diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 0b79afe45e1..2f57a6d1f7d 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -71,6 +71,7 @@ Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { return runMount(cmd.Context(), mountOptions, globalOptions, args) }, diff --git a/cmd/restic/cmd_options.go b/cmd/restic/cmd_options.go index 4cd574b6811..9c07b262674 100644 --- a/cmd/restic/cmd_options.go +++ b/cmd/restic/cmd_options.go @@ -20,7 +20,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. `, - Hidden: true, + GroupID: cmdGroupAdvanced, DisableAutoGenTag: true, Run: func(_ *cobra.Command, _ []string) { fmt.Printf("All Extended Options:\n") diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index e19c2e04b16..e8473bd6f2f 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -34,6 +34,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { term, cancel := setupTermstatus() diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index 0ff6e2d66ee..a6ef59cc205 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -28,6 +28,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return runRecover(cmd.Context(), globalOptions) diff --git a/cmd/restic/cmd_repair.go b/cmd/restic/cmd_repair.go index aefe02f3c46..65a903a49c2 100644 --- a/cmd/restic/cmd_repair.go +++ b/cmd/restic/cmd_repair.go @@ -5,8 +5,9 @@ import ( ) var cmdRepair = &cobra.Command{ - Use: "repair", - Short: "Repair the repository", + Use: "repair", + Short: "Repair the repository", + GroupID: cmdGroupDefault, } func init() { diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index eb437a11de7..c58b0b80df2 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -38,6 +38,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index d1088d00bd0..7788016b78e 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -44,6 +44,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runRewrite(cmd.Context(), rewriteOptions, globalOptions, args) diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index 442c5737519..42677918fcf 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -29,6 +29,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runSnapshots(cmd.Context(), snapshotOptions, globalOptions, args) diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 552eaff6d3e..dd51175d310 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -55,6 +55,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runStats(cmd.Context(), statsOptions, globalOptions, args) diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index 47e3c02ad58..c7bf725e92d 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -31,6 +31,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runTag(cmd.Context(), tagOptions, globalOptions, args) diff --git a/cmd/restic/cmd_unlock.go b/cmd/restic/cmd_unlock.go index 96eef7e02db..d87cde065e6 100644 --- a/cmd/restic/cmd_unlock.go +++ b/cmd/restic/cmd_unlock.go @@ -19,6 +19,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return runUnlock(cmd.Context(), unlockOptions, globalOptions) diff --git a/cmd/restic/main.go b/cmd/restic/main.go index b366a6ad3f8..4cb135c4840 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -84,6 +84,22 @@ The full documentation can be found at https://restic.readthedocs.io/ . }, } +var cmdGroupDefault = "default" +var cmdGroupAdvanced = "advanced" + +func init() { + cmdRoot.AddGroup( + &cobra.Group{ + ID: cmdGroupDefault, + Title: "Available Commands:", + }, + &cobra.Group{ + ID: cmdGroupAdvanced, + Title: "Advanced Options:", + }, + ) +} + // Distinguish commands that need the password from those that work without, // so we don't run $RESTIC_PASSWORD_COMMAND for no reason (it might prompt the // user for authentication). From 7739aa685c16acf9aac325e96f24ae5db92d4c1e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Aug 2024 23:49:20 +0200 Subject: [PATCH 560/893] Add missing DisableAutoGenTag flag for commands --- cmd/restic/cmd_copy.go | 1 + cmd/restic/cmd_debug.go | 1 + cmd/restic/cmd_key.go | 1 + cmd/restic/cmd_repair.go | 1 + 4 files changed, 4 insertions(+) diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index cfe574d3527..cd92193ac38 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -41,6 +41,7 @@ Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, GroupID: cmdGroupDefault, + DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runCopy(cmd.Context(), copyOptions, globalOptions, args) }, diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 2a48762d106..b9219249220 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -32,6 +32,7 @@ var cmdDebug = &cobra.Command{ Use: "debug", Short: "Debug commands", GroupID: cmdGroupDefault, + DisableAutoGenTag: true, } var cmdDebugDump = &cobra.Command{ diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go index 80e892f20c6..a94caa0d843 100644 --- a/cmd/restic/cmd_key.go +++ b/cmd/restic/cmd_key.go @@ -11,6 +11,7 @@ var cmdKey = &cobra.Command{ The "key" command allows you to set multiple access keys or passwords per repository. `, + DisableAutoGenTag: true, GroupID: cmdGroupDefault, } diff --git a/cmd/restic/cmd_repair.go b/cmd/restic/cmd_repair.go index 65a903a49c2..6a1a1f9dc34 100644 --- a/cmd/restic/cmd_repair.go +++ b/cmd/restic/cmd_repair.go @@ -8,6 +8,7 @@ var cmdRepair = &cobra.Command{ Use: "repair", Short: "Repair the repository", GroupID: cmdGroupDefault, + DisableAutoGenTag: true, } func init() { From e7db5febcf482f4088c5efcd37fa5688f0c865ef Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Aug 2024 23:52:21 +0200 Subject: [PATCH 561/893] update docs --- doc/manual_rest.rst | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst index a7a0f96e0c3..031f4fc528c 100644 --- a/doc/manual_rest.rst +++ b/doc/manual_rest.rst @@ -28,8 +28,6 @@ Usage help is available: dump Print a backed-up file to stdout find Find a file, a directory or restic IDs forget Remove snapshots from the repository - generate Generate manual pages and auto-completion files (bash, fish, zsh, powershell) - help Help about any command init Initialize a new repository key Manage keys (passwords) list List objects in the repository @@ -41,11 +39,19 @@ Usage help is available: repair Repair the repository restore Extract the data from a snapshot rewrite Rewrite snapshots to exclude unwanted files - self-update Update the restic binary snapshots List all snapshots stats Scan the repository and show basic statistics tag Modify tags on snapshots unlock Remove locks other processes created + + Advanced Options: + features Print list of feature flags + options Print list of extended options + + Additional Commands: + generate Generate manual pages and auto-completion files (bash, fish, zsh, powershell) + help Help about any command + self-update Update the restic binary version Print version information Flags: From 2e55209b347008f3ead8bb3cb07bfc91ad199256 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 15 Aug 2024 21:17:49 +0200 Subject: [PATCH 562/893] restic: restore timestamps after extended attributes restoring the xattr containing resource forks on macOS apparently modifies the file modification timestamps. Thus, restore the timestamp after xattrs. --- changelog/unreleased/issue-4969 | 7 +++++++ internal/restic/node.go | 12 ++++++------ 2 files changed, 13 insertions(+), 6 deletions(-) create mode 100644 changelog/unreleased/issue-4969 diff --git a/changelog/unreleased/issue-4969 b/changelog/unreleased/issue-4969 new file mode 100644 index 00000000000..ce76a738987 --- /dev/null +++ b/changelog/unreleased/issue-4969 @@ -0,0 +1,7 @@ +Bugfix: Correctly restore timestamp for files with resource forks on macOS + +On macOS, timestamps were incorrectly restored for files with resource forks. +This has been fixed. + +https://github.com/restic/restic/issues/4969 +https://github.com/restic/restic/pull/5006 diff --git a/internal/restic/node.go b/internal/restic/node.go index 7c19882276a..6afdff64a09 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -249,22 +249,22 @@ func (node Node) restoreMetadata(path string, warn func(msg string)) error { firsterr = errors.WithStack(err) } - if err := node.RestoreTimestamps(path); err != nil { - debug.Log("error restoring timestamps for dir %v: %v", path, err) + if err := node.restoreExtendedAttributes(path); err != nil { + debug.Log("error restoring extended attributes for %v: %v", path, err) if firsterr == nil { firsterr = err } } - if err := node.restoreExtendedAttributes(path); err != nil { - debug.Log("error restoring extended attributes for %v: %v", path, err) + if err := node.restoreGenericAttributes(path, warn); err != nil { + debug.Log("error restoring generic attributes for %v: %v", path, err) if firsterr == nil { firsterr = err } } - if err := node.restoreGenericAttributes(path, warn); err != nil { - debug.Log("error restoring generic attributes for %v: %v", path, err) + if err := node.RestoreTimestamps(path); err != nil { + debug.Log("error restoring timestamps for %v: %v", path, err) if firsterr == nil { firsterr = err } From 43b36ad2b0215cf29be54b9aa7e093528de13949 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 25 Aug 2024 23:13:54 +0200 Subject: [PATCH 563/893] restore: test timestamps for macOS resource forks are restored correctly --- internal/restic/node_test.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index 7991d33e0cb..642beadc58c 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -197,6 +197,20 @@ var nodeTests = []Node{ {"user.foo", []byte("bar")}, }, }, + { + Name: "testXattrFileMacOSResourceFork", + Type: "file", + Content: IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0604, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + ExtendedAttributes: []ExtendedAttribute{ + {"com.apple.ResourceFork", []byte("bar")}, + }, + }, } func TestNodeRestoreAt(t *testing.T) { @@ -216,6 +230,11 @@ func TestNodeRestoreAt(t *testing.T) { extAttrArr[i].Name = strings.ToUpper(extAttrArr[i].Name) } } + for _, attr := range test.ExtendedAttributes { + if strings.HasPrefix(attr.Name, "com.apple.") && runtime.GOOS != "darwin" { + t.Skipf("attr %v only relevant on macOS", attr.Name) + } + } // tempdir might be backed by a filesystem that does not support // extended attributes From 311b27ced8a3af600de9d89a21017ff59eece106 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 25 Aug 2024 23:14:39 +0200 Subject: [PATCH 564/893] restic: cleanup redundant code in test case --- internal/restic/node_test.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index 642beadc58c..ab7f66e5b50 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -248,10 +248,6 @@ func TestNodeRestoreAt(t *testing.T) { rtest.OK(t, test.CreateAt(context.TODO(), nodePath, nil)) rtest.OK(t, test.RestoreMetadata(nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) })) - if test.Type == "dir" { - rtest.OK(t, test.RestoreTimestamps(nodePath)) - } - fi, err := os.Lstat(nodePath) rtest.OK(t, err) From ff9ef08f65e04cfbcede42009cb24bba7b0fc46d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 25 Aug 2024 23:52:33 +0200 Subject: [PATCH 565/893] doc/backup: link to exit code for scripting section --- doc/040_backup.rst | 7 ++++--- doc/075_scripting.rst | 2 ++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index b53ae8d095d..4c9a44c748f 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -778,11 +778,12 @@ environment variables and configuration files; see their respective manuals. Exit status codes ***************** -Restic returns one of the following exit status codes after the backup command is run: +Restic returns an exit status code after the backup command is run: * 0 when the backup was successful (snapshot with all source files created) * 1 when there was a fatal error (no snapshot created) * 3 when some source files could not be read (incomplete snapshot with remaining files created) +* further exit codes are documented in :ref:`exit-codes`. Fatal errors occur for example when restic is unable to write to the backup destination, when there are network connectivity issues preventing successful communication, or when an invalid @@ -795,5 +796,5 @@ file read errors that occurred while running the backup. If there are errors of restic will still try to complete the backup run with all the other files, and create a snapshot that then contains all but the unreadable files. -One can use these exit status codes in scripts and other automation tools, to make them aware of -the outcome of the backup run. To manually inspect the exit code in e.g. Linux, run ``echo $?``. +For use of these exit status codes in scripts and other automation tools, see :ref:`exit-codes`. +To manually inspect the exit code in e.g. Linux, run ``echo $?``. diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 94749e6fb14..c619ead7ceb 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -39,6 +39,8 @@ Note that restic will also return exit code ``1`` if a different error is encoun If there are no errors, restic will return a zero exit code and print the repository metadata. +.. _exit-codes: + Exit codes ********** From 354c2c38cc1aa47469eb77ffbdf639350b5440ab Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 25 Aug 2024 23:53:12 +0200 Subject: [PATCH 566/893] doc/backup: move exit status codes section up --- doc/040_backup.rst | 48 +++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 4c9a44c748f..696b235cce6 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -686,6 +686,30 @@ created as it would only be written at the very (successful) end of the backup operation. Previous snapshots will still be there and will still work. +Exit status codes +***************** + +Restic returns an exit status code after the backup command is run: + +* 0 when the backup was successful (snapshot with all source files created) +* 1 when there was a fatal error (no snapshot created) +* 3 when some source files could not be read (incomplete snapshot with remaining files created) +* further exit codes are documented in :ref:`exit-codes`. + +Fatal errors occur for example when restic is unable to write to the backup destination, when +there are network connectivity issues preventing successful communication, or when an invalid +password or command line argument is provided. When restic returns this exit status code, one +should not expect a snapshot to have been created. + +Source file read errors occur when restic fails to read one or more files or directories that +it was asked to back up, e.g. due to permission problems. Restic displays the number of source +file read errors that occurred while running the backup. If there are errors of this type, +restic will still try to complete the backup run with all the other files, and create a +snapshot that then contains all but the unreadable files. + +For use of these exit status codes in scripts and other automation tools, see :ref:`exit-codes`. +To manually inspect the exit code in e.g. Linux, run ``echo $?``. + Environment Variables ********************* @@ -774,27 +798,3 @@ See :ref:`caching` for the rules concerning cache locations when The external programs that restic may execute include ``rclone`` (for rclone backends) and ``ssh`` (for the SFTP backend). These may respond to further environment variables and configuration files; see their respective manuals. - -Exit status codes -***************** - -Restic returns an exit status code after the backup command is run: - -* 0 when the backup was successful (snapshot with all source files created) -* 1 when there was a fatal error (no snapshot created) -* 3 when some source files could not be read (incomplete snapshot with remaining files created) -* further exit codes are documented in :ref:`exit-codes`. - -Fatal errors occur for example when restic is unable to write to the backup destination, when -there are network connectivity issues preventing successful communication, or when an invalid -password or command line argument is provided. When restic returns this exit status code, one -should not expect a snapshot to have been created. - -Source file read errors occur when restic fails to read one or more files or directories that -it was asked to back up, e.g. due to permission problems. Restic displays the number of source -file read errors that occurred while running the backup. If there are errors of this type, -restic will still try to complete the backup run with all the other files, and create a -snapshot that then contains all but the unreadable files. - -For use of these exit status codes in scripts and other automation tools, see :ref:`exit-codes`. -To manually inspect the exit code in e.g. Linux, run ``echo $?``. From 9649a9c62bd7a9db182470fda1d227ca30c5357a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 12:17:43 +0200 Subject: [PATCH 567/893] doc: use regional urls for Amazon S3 and add generic s3 provider section Split description for non-Amazon S3 providers into separate section. The section now also includes the `s3.bucket-lookup` extended option. Switch to using regional URLs for Amazon S3 to replace the need for setting the region. --- doc/030_preparing_a_new_repo.rst | 64 ++++++++++++++++++-------------- 1 file changed, 36 insertions(+), 28 deletions(-) diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 87975f9fad2..462a66d75a9 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -249,28 +249,22 @@ while creating the bucket. $ export AWS_ACCESS_KEY_ID= $ export AWS_SECRET_ACCESS_KEY= +When using temporary credentials make sure to include the session token via +the environment variable ``AWS_SESSION_TOKEN``. + You can then easily initialize a repository that uses your Amazon S3 as -a backend. If the bucket does not exist it will be created in the -default location: +a backend. Make sure to use the endpoint for the correct region. The example +uses ``us-east-1``. If the bucket does not exist it will be created in that region: .. code-block:: console - $ restic -r s3:s3.amazonaws.com/bucket_name init + $ restic -r s3:s3.us-east-1.amazonaws.com/bucket_name init enter password for new repository: enter password again: - created restic repository eefee03bbd at s3:s3.amazonaws.com/bucket_name + created restic repository eefee03bbd at s3:s3.us-east-1.amazonaws.com/bucket_name Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. -If needed, you can manually specify the region to use by either setting the -environment variable ``AWS_DEFAULT_REGION`` or calling restic with an option -parameter like ``-o s3.region="us-east-1"``. If the region is not specified, -the default region is used. Afterwards, the S3 server (at least for AWS, -``s3.amazonaws.com``) will redirect restic to the correct endpoint. - -When using temporary credentials make sure to include the session token via -then environment variable ``AWS_SESSION_TOKEN``. - Until version 0.8.0, restic used a default prefix of ``restic``, so the files in the bucket were placed in a directory named ``restic``. If you want to access a repository created with an older version of restic, specify the path @@ -278,25 +272,14 @@ after the bucket name like this: .. code-block:: console - $ restic -r s3:s3.amazonaws.com/bucket_name/restic [...] + $ restic -r s3:s3.us-east-1.amazonaws.com/bucket_name/restic [...] -For an S3-compatible server that is not Amazon (like Minio, see below), -or is only available via HTTP, you can specify the URL to the server -like this: ``s3:http://server:port/bucket_name``. - .. note:: restic expects `path-style URLs `__ - like for example ``s3.us-west-2.amazonaws.com/bucket_name``. + like for example ``s3.us-west-2.amazonaws.com/bucket_name`` for Amazon S3. Virtual-hosted–style URLs like ``bucket_name.s3.us-west-2.amazonaws.com``, where the bucket name is part of the hostname are not supported. These must be converted to path-style URLs instead, for example ``s3.us-west-2.amazonaws.com/bucket_name``. - -.. note:: Certain S3-compatible servers do not properly implement the - ``ListObjectsV2`` API, most notably Ceph versions before v14.2.5. On these - backends, as a temporary workaround, you can provide the - ``-o s3.list-objects-v1=true`` option to use the older - ``ListObjects`` API instead. This option may be removed in future - versions of restic. - + See below for configuration options for S3-compatible storage from other providers. Minio Server ************ @@ -321,13 +304,38 @@ this command. .. code-block:: console - $ ./restic -r s3:http://localhost:9000/restic init + $ restic -r s3:http://localhost:9000/restic init enter password for new repository: enter password again: created restic repository 6ad29560f5 at s3:http://localhost:9000/restic1 Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. +S3-compatible Storage +********************* + +For an S3-compatible server that is not Amazon, you can specify the URL to the server +like this: ``s3:https://server:port/bucket_name``. + +If needed, you can manually specify the region to use by either setting the +environment variable ``AWS_DEFAULT_REGION`` or calling restic with an option +parameter like ``-o s3.region="us-east-1"``. If the region is not specified, +the default region ``us-east-1`` is used. + +To select between path-style and virtual-hosted access, the extended option +``-o s3.bucket-lookup=auto`` can be used. It supports the following values: + +- ``auto``: Default behavior. Uses ``dns`` for Amazon and Google endpoints. Uses + ``path`` for all other endpoints +- ``dns``: Use virtual-hosted-style bucket access +- ``path``: Use path-style bucket access + +Certain S3-compatible servers do not properly implement the ``ListObjectsV2`` API, +most notably Ceph versions before v14.2.5. On these backends, as a temporary +workaround, you can provide the ``-o s3.list-objects-v1=true`` option to use the +older ``ListObjects`` API instead. This option may be removed in future versions +of restic. + Wasabi ************ From 7de53a51b8c30e52de590b4e7c08a94684ed9d76 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 12:21:13 +0200 Subject: [PATCH 568/893] doc: shrink wasabi / alibaba cloud example Remove descriptions for both providers and shorten the example to the minimum. --- doc/030_preparing_a_new_repo.rst | 60 ++++++-------------------------- 1 file changed, 10 insertions(+), 50 deletions(-) diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 462a66d75a9..a169f34ccf2 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -337,73 +337,33 @@ older ``ListObjects`` API instead. This option may be removed in future versions of restic. Wasabi -************ - -`Wasabi `__ is a low cost Amazon S3 conformant object storage provider. -Due to its S3 conformance, Wasabi can be used as a storage provider for a restic repository. +****** -- Create a Wasabi bucket using the `Wasabi Console `__. -- Determine the correct Wasabi service URL for your bucket `here `__. +S3 storage from `Wasabi `__ can be used as follows. -You must first setup the following environment variables with the -credentials of your Wasabi account. +- Determine the correct Wasabi service URL for your bucket `here `__. +- Set environment variables with the necessary account credentials .. code-block:: console $ export AWS_ACCESS_KEY_ID= $ export AWS_SECRET_ACCESS_KEY= - -Now you can easily initialize restic to use Wasabi as a backend with -this command. - -.. code-block:: console - - $ ./restic -r s3:https:/// init - enter password for new repository: - enter password again: - created restic repository xxxxxxxxxx at s3:https:/// - Please note that knowledge of your password is required to access - the repository. Losing your password means that your data is irrecoverably lost. + $ restic -r s3:https:/// init Alibaba Cloud (Aliyun) Object Storage System (OSS) ************************************************** -`Alibaba OSS `__ is an -encrypted, secure, cost-effective, and easy-to-use object storage -service that enables you to store, back up, and archive large amounts -of data in the cloud. - -Alibaba OSS is S3 compatible so it can be used as a storage provider -for a restic repository with a couple of extra parameters. +S3 storage from `Alibaba OSS `__ can be used as follows. -- Determine the correct `Alibaba OSS region endpoint `__ - this will be something like ``oss-eu-west-1.aliyuncs.com`` -- You'll need the region name too - this will be something like ``oss-eu-west-1`` - -You must first setup the following environment variables with the -credentials of your Alibaba OSS account. +- Determine the correct `Alibaba OSS region endpoint `__ - this will be something like ``oss-eu-west-1.aliyuncs.com`` +- You will need the region name too - this will be something like ``oss-eu-west-1`` +- Set environment variables with the necessary account credentials .. code-block:: console $ export AWS_ACCESS_KEY_ID= $ export AWS_SECRET_ACCESS_KEY= - -Now you can easily initialize restic to use Alibaba OSS as a backend with -this command. - -.. code-block:: console - - $ ./restic -o s3.bucket-lookup=dns -o s3.region= -r s3:https:/// init - enter password for new backend: - enter password again: - created restic backend xxxxxxxxxx at s3:https:/// - Please note that knowledge of your password is required to access - the repository. Losing your password means that your data is irrecoverably lost. - -For example with an actual endpoint: - -.. code-block:: console - - $ restic -o s3.bucket-lookup=dns -o s3.region=oss-eu-west-1 -r s3:https://oss-eu-west-1.aliyuncs.com/bucketname init + $ restic -o s3.bucket-lookup=dns -o s3.region= -r s3:https:/// init OpenStack Swift *************** From 8a8f5f3986944eec170a85f850ca8dd3d13334a9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 12:24:02 +0200 Subject: [PATCH 569/893] doc: fix typos --- doc/030_preparing_a_new_repo.rst | 2 +- doc/manual_rest.rst | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index a169f34ccf2..fd5b3112748 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -307,7 +307,7 @@ this command. $ restic -r s3:http://localhost:9000/restic init enter password for new repository: enter password again: - created restic repository 6ad29560f5 at s3:http://localhost:9000/restic1 + created restic repository 6ad29560f5 at s3:http://localhost:9000/restic Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst index a7a0f96e0c3..e3a02b3865d 100644 --- a/doc/manual_rest.rst +++ b/doc/manual_rest.rst @@ -8,7 +8,7 @@ Usage help is available: .. code-block:: console - $ ./restic --help + $ restic --help restic is a backup program which allows saving multiple revisions of files and directories in an encrypted repository stored on different backends. @@ -85,7 +85,7 @@ command: .. code-block:: console - $ ./restic backup --help + $ restic backup --help The "backup" command creates a new snapshot and saves the files and directories given as the arguments. From b8f409723d7454c7e461d0c855105e133c172309 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 25 Aug 2024 21:52:34 +0200 Subject: [PATCH 570/893] make timeout for slow requests configurable --- changelog/unreleased/issue-4970 | 13 +++++++++++++ cmd/restic/global.go | 1 + doc/faq.rst | 14 ++++++++++++++ internal/backend/http_transport.go | 9 ++++++++- 4 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/issue-4970 diff --git a/changelog/unreleased/issue-4970 b/changelog/unreleased/issue-4970 new file mode 100644 index 00000000000..524e91b7566 --- /dev/null +++ b/changelog/unreleased/issue-4970 @@ -0,0 +1,13 @@ +Enhancement: Make timeout for stuck requests customizable + +Restic monitors connections to the backend to detect stuck requests. If a request +does not return any data within five minutes, restic assumes the request is stuck and +retries it. However, for large repositories it sometimes takes longer than that to +collect a list of all files, causing the following error: + +`List(data) returned error, retrying after 1s: [...]: request timeout` + +It is now possible to increase the timeout using the `--stuck-request-timeout` option. + +https://github.com/restic/restic/issues/4970 +https://github.com/restic/restic/pull/5014 diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 22aa8a2904e..375b57f98bd 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -140,6 +140,7 @@ func init() { f.UintVar(&globalOptions.PackSize, "pack-size", 0, "set target pack `size` in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE)") f.StringSliceVarP(&globalOptions.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)") f.StringVar(&globalOptions.HTTPUserAgent, "http-user-agent", "", "set a http user agent for outgoing http requests") + f.DurationVar(&globalOptions.StuckRequestTimeout, "stuck-request-timeout", 5*time.Minute, "`duration` after which to retry stuck requests") // Use our "generate" command instead of the cobra provided "completion" command cmdRoot.CompletionOptions.DisableDefaultCmd = true diff --git a/doc/faq.rst b/doc/faq.rst index b26398f8c8b..74dd77d7143 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -228,3 +228,17 @@ Restic backup command fails to find a valid file in Windows If the name of a file in Windows contains an invalid character, Restic will not be able to read the file. To solve this issue, consider renaming the particular file. + +What can I do in case of "request timeout" errors? +-------------------------------------------------- + +Restic monitors connections to the backend to detect stuck requests. If a request +does not return any data within five minutes, restic assumes the request is stuck and +retries it. However, for large repositories it sometimes takes longer than that to +collect a list of all files, causing the following error: + +:: + + List(data) returned error, retrying after 1s: [...]: request timeout + +In this case you can increase the timeout using the ``--stuck-request-timeout`` option. diff --git a/internal/backend/http_transport.go b/internal/backend/http_transport.go index 5162d35715e..5a3856e412e 100644 --- a/internal/backend/http_transport.go +++ b/internal/backend/http_transport.go @@ -31,6 +31,9 @@ type TransportOptions struct { // Specify Custom User-Agent for the http Client HTTPUserAgent string + + // Timeout after which to retry stuck requests + StuckRequestTimeout time.Duration } // readPEMCertKey reads a file and returns the PEM encoded certificate and key @@ -143,7 +146,11 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { } if feature.Flag.Enabled(feature.BackendErrorRedesign) { - rt = newWatchdogRoundtripper(rt, 5*time.Minute, 128*1024) + if opts.StuckRequestTimeout == 0 { + opts.StuckRequestTimeout = 5 * time.Minute + } + + rt = newWatchdogRoundtripper(rt, opts.StuckRequestTimeout, 128*1024) } // wrap in the debug round tripper (if active) From 828c8bc1e8f7732ab6777c314ba5333abc15cf0c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 14:38:32 +0200 Subject: [PATCH 571/893] doc: describe how to handle rewrite encoding error --- doc/045_working_with_repos.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 8dba8439f1d..f31e75c8427 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -305,6 +305,13 @@ In order to preview the changes which ``rewrite`` would make, you can use the modifying the repository. Instead restic will only print the actions it would perform. +.. note:: The ``rewrite`` command verifies that it does not modify snapshots in + unexpected ways and fails with an ``cannot encode tree at "[...]" without loosing information`` + error otherwise. This can occur when rewriting a snapshot created by a newer + version of restic or some third-party implementation. + + To convert a snapshot into the format expected by the ``rewrite`` command + use ``restic repair snapshots ``. Modifying metadata of snapshots =============================== From 908d097904f153b5d7e6fcf13e798b2be2bbcc85 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 14:38:45 +0200 Subject: [PATCH 572/893] doc: mark S3 layout as deprecated --- doc/design.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/doc/design.rst b/doc/design.rst index 7fb8b71b2ea..26f1f333fcc 100644 --- a/doc/design.rst +++ b/doc/design.rst @@ -126,8 +126,8 @@ the option ``-o local.layout=default``, valid values are ``default`` and ``s3legacy``. The option for the sftp backend is named ``sftp.layout``, for the s3 backend ``s3.layout``. -S3 Legacy Layout ----------------- +S3 Legacy Layout (deprecated) +----------------------------- Unfortunately during development the Amazon S3 backend uses slightly different paths (directory names use singular instead of plural for ``key``, @@ -152,8 +152,7 @@ the ``data`` directory. The S3 Legacy repository layout looks like this: /snapshot └── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec -The S3 backend understands and accepts both forms, new backends are -always created with the default layout for compatibility reasons. +Restic 0.17 is the last version that supports the legacy layout. Pack Format =========== From 05077eaa204a315de6776722e38bbd1764431b69 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 14:40:04 +0200 Subject: [PATCH 573/893] doc: JSON encoder must be deterministic --- doc/design.rst | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/doc/design.rst b/doc/design.rst index 26f1f333fcc..d83ac8b91b2 100644 --- a/doc/design.rst +++ b/doc/design.rst @@ -233,7 +233,9 @@ Individual files for the index, locks or snapshots are encrypted and authenticated like Data and Tree Blobs, so the outer structure is ``IV || Ciphertext || MAC`` again. In repository format version 1 the plaintext always consists of a JSON document which must either be an -object or an array. +object or an array. The JSON encoder must deterministically encode the +document and should match the behavior of the Go standard library implementation +in ``encoding/json``. Repository format version 2 adds support for compression. The plaintext now starts with a header to indicate the encoding version to distinguish @@ -472,6 +474,10 @@ A snapshot references a tree by the SHA-256 hash of the JSON string representation of its contents. Trees and data are saved in pack files in a subdirectory of the directory ``data``. +The JSON encoder must deterministically encode the document and should +match the behavior of the Go standard library implementation in ``encoding/json``. +This ensures that trees can be properly deduplicated. + The command ``restic cat blob`` can be used to inspect the tree referenced above (piping the output of the command to ``jq .`` so that the JSON is indented): From dc5d3fc4732bc1c0c20769b62084b963801245b4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 14:40:38 +0200 Subject: [PATCH 574/893] doc: full tree blob data structure is in the code --- doc/design.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/doc/design.rst b/doc/design.rst index d83ac8b91b2..c974e997a44 100644 --- a/doc/design.rst +++ b/doc/design.rst @@ -512,12 +512,11 @@ this metadata is generated: - The name is quoted using `strconv.Quote `__ before being saved. This handles non-unicode names, but also changes the representation of names containing ``"`` or ``\``. - - The filemode saved is the mode defined by `fs.FileMode `__ masked by ``os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky`` - -When the entry references a directory, the field ``subtree`` contains the plain text -ID of another tree object. +- When the entry references a directory, the field ``subtree`` contains the plain text + ID of another tree object. +- Check the implementation for a full struct definition. When the command ``restic cat blob`` is used, the plaintext ID is needed to print a tree. The tree referenced above can be dumped as follows: From 36c4475ad9e0a44db05e20509834f98c4abea64a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 15:32:43 +0200 Subject: [PATCH 575/893] rest: improve handling of HTTP2 goaway The HTTP client can only retry HTTP2 requests after receiving a GOAWAY response if it can rewind the body. As we use a custom data type, explicitly provide an implementation of `GetBody`. --- changelog/unreleased/pull-5018 | 13 +++++++++++++ internal/backend/rest/rest.go | 6 ++++++ 2 files changed, 19 insertions(+) create mode 100644 changelog/unreleased/pull-5018 diff --git a/changelog/unreleased/pull-5018 b/changelog/unreleased/pull-5018 new file mode 100644 index 00000000000..1b7b9f428f0 --- /dev/null +++ b/changelog/unreleased/pull-5018 @@ -0,0 +1,13 @@ +Bugfix: Improve HTTP2 support for rest backend + +If rest-server tried to gracefully shut down an HTTP2 connection still used by the client, +this could result in the following error. + +``` +http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error +``` + +This has been fixed. + +https://github.com/restic/restic/pull/5018 +https://forum.restic.net/t/receiving-http2-goaway-messages-with-windows-restic-v0-17-0/8367 diff --git a/internal/backend/rest/rest.go b/internal/backend/rest/rest.go index 1af88ec3f8f..d0a08175b57 100644 --- a/internal/backend/rest/rest.go +++ b/internal/backend/rest/rest.go @@ -143,6 +143,12 @@ func (b *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindR if err != nil { return errors.WithStack(err) } + req.GetBody = func() (io.ReadCloser, error) { + if err := rd.Rewind(); err != nil { + return nil, err + } + return io.NopCloser(rd), nil + } req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Accept", ContentTypeV2) From e24dd5a1627d19b412c1a81554c32e903f114c1f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 22 Aug 2024 23:16:12 +0200 Subject: [PATCH 576/893] backend/retry: don't trip circuit breaker if context is canceled When the context used for a load operation is canceled, then the result is always an error independent of whether the file could be retrieved from the backend. Do not false positively trip the circuit breaker in this case. The old behavior was problematic when trying to lock a repository. When `Lock.checkForOtherLocks` listed multiple lock files in parallel and one of them fails to load, then all other loads were canceled. This cancelation was remembered by the circuit breaker, such that locking retries would fail. --- changelog/unreleased/pull-5011 | 10 ++++++++ internal/backend/retry/backend_retry.go | 5 ++-- internal/backend/retry/backend_retry_test.go | 24 ++++++++++++++++++++ 3 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 changelog/unreleased/pull-5011 diff --git a/changelog/unreleased/pull-5011 b/changelog/unreleased/pull-5011 new file mode 100644 index 00000000000..8bd5ef5323a --- /dev/null +++ b/changelog/unreleased/pull-5011 @@ -0,0 +1,10 @@ +Bugfix: Fix rare failures to retry locking a repository + +Restic 0.17.0 could in rare cases fail to retry locking a repository if +one of the lock files failed to load. The lock operation failed with error +`unable to create lock in backend: circuit breaker open for file ` + +The error handling has been fixed to correctly retry locking the repository. + +https://github.com/restic/restic/issues/5005 +https://github.com/restic/restic/pull/5011 diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index 8d0f42bfdc3..92c285c4b36 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -209,9 +209,10 @@ func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offse return be.Backend.Load(ctx, h, length, offset, consumer) }) - if feature.Flag.Enabled(feature.BackendErrorRedesign) && err != nil && !be.IsPermanentError(err) { + if feature.Flag.Enabled(feature.BackendErrorRedesign) && err != nil && ctx.Err() == nil && !be.IsPermanentError(err) { // We've exhausted the retries, the file is likely inaccessible. By excluding permanent - // errors, not found or truncated files are not recorded. + // errors, not found or truncated files are not recorded. Also ignore errors if the context + // was canceled. be.failedLoads.LoadOrStore(key, time.Now()) } diff --git a/internal/backend/retry/backend_retry_test.go b/internal/backend/retry/backend_retry_test.go index fd76200d406..ffb8ae18699 100644 --- a/internal/backend/retry/backend_retry_test.go +++ b/internal/backend/retry/backend_retry_test.go @@ -357,6 +357,30 @@ func TestBackendLoadCircuitBreaker(t *testing.T) { test.Equals(t, notFound, err, "expected circuit breaker to reset, got %v") } +func TestBackendLoadCircuitBreakerCancel(t *testing.T) { + cctx, cancel := context.WithCancel(context.Background()) + be := mock.NewBackend() + be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { + cancel() + return nil, errors.New("something") + } + nilRd := func(rd io.Reader) (err error) { + return nil + } + + TestFastRetries(t) + retryBackend := New(be, 2, nil, nil) + // canceling the context should not trip the circuit breaker + err := retryBackend.Load(cctx, backend.Handle{Name: "other"}, 0, 0, nilRd) + test.Equals(t, context.Canceled, err, "unexpected error") + + // reset context and check that the cirucit breaker does not return an error + cctx, cancel = context.WithCancel(context.Background()) + defer cancel() + err = retryBackend.Load(cctx, backend.Handle{Name: "other"}, 0, 0, nilRd) + test.Equals(t, context.Canceled, err, "unexpected error") +} + func TestBackendStatNotExists(t *testing.T) { // stat should not retry if the error matches IsNotExist notFound := errors.New("not found") From 2296fdf668468797b3ca1aa3be8b5dfeba1a9780 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Aug 2024 23:24:43 +0200 Subject: [PATCH 577/893] lock: introduce short delay between failed locking retries Failed locking attempts were immediately retried up to three times without any delay between the retries. If a lock file is not found while checking for other locks, with the reworked backend retries there is no delay between those retries. This is a problem if a backend requires a few seconds to reflect file deletions in the file listings. To work around this problem, introduce a short exponentially increasing delay between the retries. The number of retries is now increased to 4. This results in delays of 5, 10 and 20 seconds between the retries. --- .../unreleased/{pull-5011 => issue-5005} | 3 +++ internal/restic/lock.go | 27 ++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) rename changelog/unreleased/{pull-5011 => issue-5005} (76%) diff --git a/changelog/unreleased/pull-5011 b/changelog/unreleased/issue-5005 similarity index 76% rename from changelog/unreleased/pull-5011 rename to changelog/unreleased/issue-5005 index 8bd5ef5323a..90c164b07ea 100644 --- a/changelog/unreleased/pull-5011 +++ b/changelog/unreleased/issue-5005 @@ -5,6 +5,9 @@ one of the lock files failed to load. The lock operation failed with error `unable to create lock in backend: circuit breaker open for file ` The error handling has been fixed to correctly retry locking the repository. +In addition, restic now waits a few seconds between locking retries to +increase chances of success. https://github.com/restic/restic/issues/5005 https://github.com/restic/restic/pull/5011 +https://github.com/restic/restic/pull/5012 diff --git a/internal/restic/lock.go b/internal/restic/lock.go index 49c7cedf22d..969d0593db1 100644 --- a/internal/restic/lock.go +++ b/internal/restic/lock.go @@ -103,10 +103,14 @@ func NewExclusiveLock(ctx context.Context, repo Unpacked) (*Lock, error) { var waitBeforeLockCheck = 200 * time.Millisecond +// delay increases by factor 2 on each retry +var initialWaitBetweenLockRetries = 5 * time.Second + // TestSetLockTimeout can be used to reduce the lock wait timeout for tests. func TestSetLockTimeout(t testing.TB, d time.Duration) { t.Logf("setting lock timeout to %v", d) waitBeforeLockCheck = d + initialWaitBetweenLockRetries = d } func newLock(ctx context.Context, repo Unpacked, excl bool) (*Lock, error) { @@ -170,8 +174,17 @@ func (l *Lock) checkForOtherLocks(ctx context.Context) error { if l.lockID != nil { checkedIDs.Insert(*l.lockID) } + delay := initialWaitBetweenLockRetries // retry locking a few times - for i := 0; i < 3; i++ { + for i := 0; i < 4; i++ { + if i != 0 { + // sleep between retries to give backend some time to settle + if err := cancelableDelay(ctx, delay); err != nil { + return err + } + delay *= 2 + } + // Store updates in new IDSet to prevent data races var m sync.Mutex newCheckedIDs := NewIDSet(checkedIDs.List()...) @@ -213,6 +226,18 @@ func (l *Lock) checkForOtherLocks(ctx context.Context) error { return err } +func cancelableDelay(ctx context.Context, delay time.Duration) error { + // delay next try a bit + timer := time.NewTimer(delay) + select { + case <-ctx.Done(): + timer.Stop() + return ctx.Err() + case <-timer.C: + } + return nil +} + // createLock acquires the lock by creating a file in the repository. func (l *Lock) createLock(ctx context.Context) (ID, error) { id, err := SaveJSONUnpacked(ctx, l.repo, LockFile, l) From 6fbfccc2d3a40f848dbe77e858d044cd6dfc6737 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 19:31:21 +0200 Subject: [PATCH 578/893] fs: fix race condition in get/set security descriptor Calling `Load()` twice for an atomic variable can return different values each time. This resulted in trying to read the security descriptor with high privileges, but then not entering the code path to switch to low privileges when another thread has already done so concurrently. --- internal/fs/sd_windows.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index 0a73cbe53c6..bccf74992f4 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -48,13 +48,15 @@ func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err var sd *windows.SECURITY_DESCRIPTOR - if lowerPrivileges.Load() { + // store original value to avoid unrelated changes in the error check + useLowerPrivileges := lowerPrivileges.Load() + if useLowerPrivileges { sd, err = getNamedSecurityInfoLow(filePath) } else { sd, err = getNamedSecurityInfoHigh(filePath) } if err != nil { - if !lowerPrivileges.Load() && isHandlePrivilegeNotHeldError(err) { + if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. lowerPrivileges.Store(true) sd, err = getNamedSecurityInfoLow(filePath) @@ -109,14 +111,16 @@ func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { sacl = nil } - if lowerPrivileges.Load() { + // store original value to avoid unrelated changes in the error check + useLowerPrivileges := lowerPrivileges.Load() + if useLowerPrivileges { err = setNamedSecurityInfoLow(filePath, dacl) } else { err = setNamedSecurityInfoHigh(filePath, owner, group, dacl, sacl) } if err != nil { - if !lowerPrivileges.Load() && isHandlePrivilegeNotHeldError(err) { + if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. lowerPrivileges.Store(true) err = setNamedSecurityInfoLow(filePath, dacl) From 9c70794886930d12bce2807d0e6c07aee99c69d8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 19:36:43 +0200 Subject: [PATCH 579/893] fs: fix error handling for retried get/set of security descriptor The retry code path did not filter `ERROR_NOT_SUPPORTED`. Just call the original function a second time to correctly follow the low privilege code path. --- internal/fs/sd_windows.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index bccf74992f4..0004f1809f8 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -59,10 +59,7 @@ func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. lowerPrivileges.Store(true) - sd, err = getNamedSecurityInfoLow(filePath) - if err != nil { - return nil, fmt.Errorf("get low-level named security info failed with: %w", err) - } + return GetSecurityDescriptor(filePath) } else if errors.Is(err, windows.ERROR_NOT_SUPPORTED) { return nil, nil } else { @@ -123,10 +120,7 @@ func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. lowerPrivileges.Store(true) - err = setNamedSecurityInfoLow(filePath, dacl) - if err != nil { - return fmt.Errorf("set low-level named security info failed with: %w", err) - } + return SetSecurityDescriptor(filePath, securityDescriptor) } else { return fmt.Errorf("set named security info failed with: %w", err) } From 45d05eb691437a06ab8c1079e462421c75b349f3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 19:43:18 +0200 Subject: [PATCH 580/893] add changelog for security descriptor race condition --- changelog/unreleased/issue-5004 | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 changelog/unreleased/issue-5004 diff --git a/changelog/unreleased/issue-5004 b/changelog/unreleased/issue-5004 new file mode 100644 index 00000000000..529b65464f9 --- /dev/null +++ b/changelog/unreleased/issue-5004 @@ -0,0 +1,12 @@ +Bugfix: Fix spurious "A Required Privilege Is Not Held by the Client" error + +On Windows, creating a backup could sometimes print the following error + +``` +error: nodeFromFileInfo [...]: get named security info failed with: a required privilege is not held by the client. +``` + +This has been fixed. + +https://github.com/restic/restic/issues/5004 +https://github.com/restic/restic/pull/5019 From 8eff4e0e5cf4c63ffe5e6bf9d5bd2ecf71dd3efb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 29 Aug 2024 16:32:15 +0200 Subject: [PATCH 581/893] cache: correctly ignore files whose filename is no ID this can for example be the case for temporary files created by the backend implementation. --- internal/backend/cache/backend.go | 5 ++--- internal/backend/cache/backend_test.go | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/internal/backend/cache/backend.go b/internal/backend/cache/backend.go index 58b03dd3862..3754266ba96 100644 --- a/internal/backend/cache/backend.go +++ b/internal/backend/cache/backend.go @@ -231,9 +231,8 @@ func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(f backen wrapFn := func(f backend.FileInfo) error { id, err := restic.ParseID(f.Name) if err != nil { - // returning error here since, if we cannot parse the ID, the file - // is invalid and the list must exit. - return err + // ignore files with invalid name + return nil } ids.Insert(id) diff --git a/internal/backend/cache/backend_test.go b/internal/backend/cache/backend_test.go index dca51c2bf2b..7f83e40cbcd 100644 --- a/internal/backend/cache/backend_test.go +++ b/internal/backend/cache/backend_test.go @@ -296,3 +296,20 @@ func TestAutomaticCacheClear(t *testing.T) { t.Errorf("cache doesn't have file2 after list") } } + +func TestAutomaticCacheClearInvalidFilename(t *testing.T) { + be := mem.New() + c := TestNewCache(t) + + data := test.Random(rand.Int(), 42) + h := backend.Handle{ + Type: backend.IndexFile, + Name: "tmp12345", + } + save(t, be, h, data) + + wbe := c.Wrap(be) + + // list all files in the backend + list(t, wbe, func(_ backend.FileInfo) error { return nil }) +} From d19f706d505bec2de16a8319ea627f77e5507a90 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 29 Aug 2024 16:33:18 +0200 Subject: [PATCH 582/893] Add temporary files repositories in integration tests This is intended to catch problems with temporary files stored in the backend, even if the responsible component forgets to test for those. --- cmd/restic/cmd_init_integration_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmd/restic/cmd_init_integration_test.go b/cmd/restic/cmd_init_integration_test.go index 9b5eed6e08e..4795d5510b9 100644 --- a/cmd/restic/cmd_init_integration_test.go +++ b/cmd/restic/cmd_init_integration_test.go @@ -2,6 +2,8 @@ package main import ( "context" + "os" + "path/filepath" "testing" "github.com/restic/restic/internal/repository" @@ -16,6 +18,11 @@ func testRunInit(t testing.TB, opts GlobalOptions) { rtest.OK(t, runInit(context.TODO(), InitOptions{}, opts, nil)) t.Logf("repository initialized at %v", opts.Repo) + + // create temporary junk files to verify that restic does not trip over them + for _, path := range []string{"index", "snapshots", "keys", "locks", filepath.Join("data", "00")} { + rtest.OK(t, os.WriteFile(filepath.Join(opts.Repo, path, "tmp12345"), []byte("junk file"), 0o600)) + } } func TestInitCopyChunkerParams(t *testing.T) { From dd90e1926b785fba2601e1df2752094ad419506c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 29 Aug 2024 16:35:48 +0200 Subject: [PATCH 583/893] use OrderedListOnceBackend where possible --- cmd/restic/cmd_prune_integration_test.go | 5 ++--- cmd/restic/integration_test.go | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index 746eb5cc9e7..536ec40d886 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -146,10 +146,9 @@ func TestPruneWithDamagedRepository(t *testing.T) { env.gopts.backendTestHook = oldHook }() // prune should fail - rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + rtest.Equals(t, repository.ErrPacksMissing, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { return runPrune(context.TODO(), pruneDefaultOptions, env.gopts, term) - }) == repository.ErrPacksMissing, - "prune should have reported index not complete error") + }), "prune should have reported index not complete error") } // Test repos for edge cases diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 4cecec6bc06..df95031dca5 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -80,7 +80,7 @@ func TestListOnce(t *testing.T) { defer cleanup() env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { - return newListOnceBackend(r), nil + return newOrderedListOnceBackend(r), nil } pruneOpts := PruneOptions{MaxUnused: "0"} checkOpts := CheckOptions{ReadData: true, CheckUnused: true} @@ -148,7 +148,7 @@ func TestFindListOnce(t *testing.T) { defer cleanup() env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { - return newListOnceBackend(r), nil + return newOrderedListOnceBackend(r), nil } testSetupBackupData(t, env) From 4fcedb4bae9c861eb0cdbf738ca5a05c2f17e1df Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 30 Aug 2024 11:25:51 +0200 Subject: [PATCH 584/893] backup: support specifying volume instead of path on Windows "C:" (volume name) versus "C:\" (path) --- changelog/unreleased/issue-2004 | 19 ++++++++++ internal/archiver/archiver.go | 7 +++- internal/archiver/archiver_test.go | 60 ++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/issue-2004 diff --git a/changelog/unreleased/issue-2004 b/changelog/unreleased/issue-2004 new file mode 100644 index 00000000000..45bc07ca814 --- /dev/null +++ b/changelog/unreleased/issue-2004 @@ -0,0 +1,19 @@ +Bugfix: Correctly handle passing volume name to `backup` command + +On Windows, when the specified backup target only included the volume +name without a trailing slash, for example, `C:`, then restoring the +resulting snapshot would result in an error. Note that using `C:\` +as backup target worked correctly. + +Specifying volume names now works correctly. + +To restore snapshots created before this bugfix, use the `:` +syntax. For a snapshot with ID `12345678` and a backup of `C:`, the following +command can be used: + +``` +restic restore 12345678:/C/C:./ --target output/folder +``` + +https://github.com/restic/restic/issues/2004 +https://github.com/restic/restic/pull/5028 diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index e441512982d..e7c346d3a43 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -715,7 +715,12 @@ func resolveRelativeTargets(filesys fs.FS, targets []string) ([]string, error) { debug.Log("targets before resolving: %v", targets) result := make([]string, 0, len(targets)) for _, target := range targets { - target = filesys.Clean(target) + if target != "" && filesys.VolumeName(target) == target { + // special case to allow users to also specify a volume name "C:" instead of a path "C:\" + target = target + filesys.Separator() + } else { + target = filesys.Clean(target) + } pc, _ := pathComponents(filesys, target, false) if len(pc) > 0 { result = append(result, target) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index b519387db4e..c54f9ea3311 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1448,6 +1448,66 @@ func TestArchiverSnapshot(t *testing.T) { } } +func TestResolveRelativeTargetsSpecial(t *testing.T) { + var tests = []struct { + name string + targets []string + expected []string + win bool + }{ + { + name: "basic relative path", + targets: []string{filepath.FromSlash("some/path")}, + expected: []string{filepath.FromSlash("some/path")}, + }, + { + name: "partial relative path", + targets: []string{filepath.FromSlash("../some/path")}, + expected: []string{filepath.FromSlash("../some/path")}, + }, + { + name: "basic absolute path", + targets: []string{filepath.FromSlash("/some/path")}, + expected: []string{filepath.FromSlash("/some/path")}, + }, + { + name: "volume name", + targets: []string{"C:"}, + expected: []string{"C:\\"}, + win: true, + }, + { + name: "volume root path", + targets: []string{"C:\\"}, + expected: []string{"C:\\"}, + win: true, + }, + { + name: "UNC path", + targets: []string{"\\\\server\\volume"}, + expected: []string{"\\\\server\\volume\\"}, + win: true, + }, + { + name: "UNC path with trailing slash", + targets: []string{"\\\\server\\volume\\"}, + expected: []string{"\\\\server\\volume\\"}, + win: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.win && runtime.GOOS != "windows" { + t.Skip("skip test on unix") + } + + targets, err := resolveRelativeTargets(&fs.Local{}, test.targets) + rtest.OK(t, err) + rtest.Equals(t, test.expected, targets) + }) + } +} + func TestArchiverSnapshotSelect(t *testing.T) { var tests = []struct { name string From d15e693045918bd92dbf74a958127242224378bb Mon Sep 17 00:00:00 2001 From: Shivashis Padhi Date: Sun, 11 Aug 2024 22:25:21 +0200 Subject: [PATCH 585/893] restore: Add progress bar to 'restore --verify' --- changelog/unreleased/issue-4795 | 7 +++ cmd/restic/cmd_restore.go | 5 +- internal/restorer/restorer.go | 25 +++++--- internal/restorer/restorer_test.go | 71 ++++++++++++++-------- internal/restorer/restorer_unix_test.go | 10 +-- internal/restorer/restorer_windows_test.go | 6 +- 6 files changed, 83 insertions(+), 41 deletions(-) create mode 100644 changelog/unreleased/issue-4795 diff --git a/changelog/unreleased/issue-4795 b/changelog/unreleased/issue-4795 new file mode 100644 index 00000000000..084335f510c --- /dev/null +++ b/changelog/unreleased/issue-4795 @@ -0,0 +1,7 @@ +Enhancement: `restore --verify` shows progress with a progress bar + +If restore command was run with `--verify` restic didn't show any progress indication, now it shows a progress bar while 'verification' is running. +The progress bar is text only for now and doesn't respect `--json` flag. + +https://github.com/restic/restic/issues/4795 +https://github.com/restic/restic/pull/4989 diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index eda608802d4..d71cb768387 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -220,7 +220,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, msg.P("restoring %s to %s\n", res.Snapshot(), opts.Target) } - err = res.RestoreTo(ctx, opts.Target) + countRestoredFiles, err := res.RestoreTo(ctx, opts.Target) if err != nil { return err } @@ -237,7 +237,8 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, } var count int t0 := time.Now() - count, err = res.VerifyFiles(ctx, opts.Target) + bar := newTerminalProgressMax(!gopts.Quiet && !gopts.JSON && stdoutIsTerminal(), 0, "files verified", term) + count, err = res.VerifyFiles(ctx, opts.Target, countRestoredFiles, bar) if err != nil { return err } diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 00da4e18e82..0e30b82f8fb 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -12,6 +12,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" restoreui "github.com/restic/restic/internal/ui/restore" "golang.org/x/sync/errgroup" @@ -333,12 +334,13 @@ func (res *Restorer) ensureDir(target string) error { // RestoreTo creates the directories and files in the snapshot below dst. // Before an item is created, res.Filter is called. -func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { +func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error) { + restoredFileCount := uint64(0) var err error if !filepath.IsAbs(dst) { dst, err = filepath.Abs(dst) if err != nil { - return errors.Wrap(err, "Abs") + return restoredFileCount, errors.Wrap(err, "Abs") } } @@ -346,7 +348,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { // ensure that the target directory exists and is actually a directory // Using ensureDir is too aggressive here as it also removes unexpected files if err := fs.MkdirAll(dst, 0700); err != nil { - return fmt.Errorf("cannot create target directory: %w", err) + return restoredFileCount, fmt.Errorf("cannot create target directory: %w", err) } } @@ -406,19 +408,22 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { } } res.trackFile(location, updateMetadataOnly) + if !updateMetadataOnly { + restoredFileCount++ + } return nil }) return err }, }) if err != nil { - return err + return 0, err } if !res.opts.DryRun { err = filerestorer.restoreFiles(ctx) if err != nil { - return err + return 0, err } } @@ -466,7 +471,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { return err }, }) - return err + return restoredFileCount, err } func (res *Restorer) removeUnexpectedFiles(ctx context.Context, target, location string, expectedFilenames []string) error { @@ -587,7 +592,7 @@ const nVerifyWorkers = 8 // have been successfully written to dst. It stops when it encounters an // error. It returns that error and the number of files it has successfully // verified. -func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { +func (res *Restorer) VerifyFiles(ctx context.Context, dst string, countRestoredFiles uint64, p *progress.Counter) (int, error) { type mustCheck struct { node *restic.Node path string @@ -598,6 +603,11 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { work = make(chan mustCheck, 2*nVerifyWorkers) ) + if p != nil { + p.SetMax(countRestoredFiles) + defer p.Done() + } + g, ctx := errgroup.WithContext(ctx) // Traverse tree and send jobs to work. @@ -632,6 +642,7 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { if err != nil || ctx.Err() != nil { break } + p.Add(1) atomic.AddUint64(&nchecked, 1) } return err diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index a6de50556b1..7d48950681c 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -22,6 +22,7 @@ import ( "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" restoreui "github.com/restic/restic/internal/ui/restore" "golang.org/x/sync/errgroup" ) @@ -403,13 +404,13 @@ func TestRestorer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + countRestoredFiles, err := res.RestoreTo(ctx, tempdir) if err != nil { t.Fatal(err) } if len(test.ErrorsMust)+len(test.ErrorsMay) == 0 { - _, err = res.VerifyFiles(ctx, tempdir) + _, err = res.VerifyFiles(ctx, tempdir, countRestoredFiles, nil) rtest.OK(t, err) } @@ -501,13 +502,18 @@ func TestRestorerRelative(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, "restore") + countRestoredFiles, err := res.RestoreTo(ctx, "restore") if err != nil { t.Fatal(err) } - nverified, err := res.VerifyFiles(ctx, "restore") + p := progress.NewCounter(time.Second, countRestoredFiles, func(value uint64, total uint64, runtime time.Duration, final bool) {}) + defer p.Done() + nverified, err := res.VerifyFiles(ctx, "restore", countRestoredFiles, p) rtest.OK(t, err) rtest.Equals(t, len(test.Files), nverified) + counterValue, maxValue := p.Get() + rtest.Equals(t, counterValue, uint64(2)) + rtest.Equals(t, maxValue, uint64(2)) for filename, err := range errors { t.Errorf("unexpected error for %v found: %v", filename, err) @@ -524,6 +530,13 @@ func TestRestorerRelative(t *testing.T) { t.Errorf("file %v has wrong content: want %q, got %q", filename, content, data) } } + + // verify that restoring the same snapshot again results in countRestoredFiles == 0 + countRestoredFiles, err = res.RestoreTo(ctx, "restore") + if err != nil { + t.Fatal(err) + } + rtest.Equals(t, uint64(0), countRestoredFiles) }) } } @@ -835,7 +848,7 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) var testPatterns = []struct { @@ -872,9 +885,9 @@ func TestVerifyCancel(t *testing.T) { tempdir := rtest.TempDir(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - - rtest.OK(t, res.RestoreTo(ctx, tempdir)) - err := os.WriteFile(filepath.Join(tempdir, "foo"), []byte("bar"), 0644) + countRestoredFiles, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + err = os.WriteFile(filepath.Join(tempdir, "foo"), []byte("bar"), 0644) rtest.OK(t, err) var errs []error @@ -883,7 +896,7 @@ func TestVerifyCancel(t *testing.T) { return err } - nverified, err := res.VerifyFiles(ctx, tempdir) + nverified, err := res.VerifyFiles(ctx, tempdir, countRestoredFiles, nil) rtest.Equals(t, 0, nverified) rtest.Assert(t, err != nil, "nil error from VerifyFiles") rtest.Equals(t, 1, len(errs)) @@ -915,7 +928,7 @@ func TestRestorerSparseFiles(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err = res.RestoreTo(ctx, tempdir) + _, err = res.RestoreTo(ctx, tempdir) rtest.OK(t, err) filename := filepath.Join(tempdir, "zeros") @@ -952,15 +965,17 @@ func saveSnapshotsAndOverwrite(t *testing.T, baseSnapshot Snapshot, overwriteSna t.Logf("base snapshot saved as %v", id.Str()) res := NewRestorer(repo, sn, baseOptions) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) // overwrite snapshot sn, id = saveSnapshot(t, repo, overwriteSnapshot, noopGetGenericAttributes) t.Logf("overwrite snapshot saved as %v", id.Str()) res = NewRestorer(repo, sn, overwriteOptions) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + countRestoredFiles, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) - _, err := res.VerifyFiles(ctx, tempdir) + _, err = res.VerifyFiles(ctx, tempdir, countRestoredFiles, nil) rtest.OK(t, err) return tempdir @@ -1241,8 +1256,9 @@ func TestRestoreModified(t *testing.T) { t.Logf("snapshot saved as %v", id.Str()) res := NewRestorer(repo, sn, Options{Overwrite: OverwriteIfChanged}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) - n, err := res.VerifyFiles(ctx, tempdir) + countRestoredFiles, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + n, err := res.VerifyFiles(ctx, tempdir, countRestoredFiles, nil) rtest.OK(t, err) rtest.Equals(t, 2, n, "unexpected number of verified files") } @@ -1267,7 +1283,8 @@ func TestRestoreIfChanged(t *testing.T) { t.Logf("snapshot saved as %v", id.Str()) res := NewRestorer(repo, sn, Options{}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) // modify file but maintain size and timestamp path := filepath.Join(tempdir, "foo") @@ -1286,7 +1303,8 @@ func TestRestoreIfChanged(t *testing.T) { for _, overwrite := range []OverwriteBehavior{OverwriteIfChanged, OverwriteAlways} { res = NewRestorer(repo, sn, Options{Overwrite: overwrite}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) data, err := os.ReadFile(path) rtest.OK(t, err) if overwrite == OverwriteAlways { @@ -1322,9 +1340,10 @@ func TestRestoreDryRun(t *testing.T) { t.Logf("snapshot saved as %v", id.Str()) res := NewRestorer(repo, sn, Options{DryRun: true}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) - _, err := os.Stat(tempdir) + _, err = os.Stat(tempdir) rtest.Assert(t, errors.Is(err, os.ErrNotExist), "expected no file to be created, got %v", err) } @@ -1348,7 +1367,8 @@ func TestRestoreDryRunDelete(t *testing.T) { sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) res := NewRestorer(repo, sn, Options{DryRun: true, Delete: true}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err = res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) _, err = os.Stat(tempfile) rtest.Assert(t, err == nil, "expected file to still exist, got error %v", err) @@ -1466,14 +1486,14 @@ func TestRestoreDelete(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) res = NewRestorer(repo, deleteSn, Options{Delete: true}) if test.selectFilter != nil { res.SelectFilter = test.selectFilter } - err = res.RestoreTo(ctx, tempdir) + _, err = res.RestoreTo(ctx, tempdir) rtest.OK(t, err) for fn, shouldExist := range test.fileState { @@ -1506,7 +1526,7 @@ func TestRestoreToFile(t *testing.T) { sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes) res := NewRestorer(repo, sn, Options{}) - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.Assert(t, strings.Contains(err.Error(), "cannot create target directory"), "unexpected error %v", err) } @@ -1538,7 +1558,8 @@ func TestRestorerLongPath(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rtest.OK(t, res.RestoreTo(ctx, tmp)) - _, err = res.VerifyFiles(ctx, tmp) + countRestoredFiles, err := res.RestoreTo(ctx, tmp) + rtest.OK(t, err) + _, err = res.VerifyFiles(ctx, tmp, countRestoredFiles, nil) rtest.OK(t, err) } diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go index 27d990af4be..c4e8149b247 100644 --- a/internal/restorer/restorer_unix_test.go +++ b/internal/restorer/restorer_unix_test.go @@ -37,7 +37,7 @@ func TestRestorerRestoreEmptyHardlinkedFields(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) f1, err := os.Stat(filepath.Join(tempdir, "dirtest/file1")) @@ -96,7 +96,7 @@ func testRestorerProgressBar(t *testing.T, dryRun bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) progress.Finish() @@ -126,7 +126,8 @@ func TestRestorePermissions(t *testing.T) { t.Logf("snapshot saved as %v", id.Str()) res := NewRestorer(repo, sn, Options{}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) for _, overwrite := range []OverwriteBehavior{OverwriteIfChanged, OverwriteAlways} { // tamper with permissions @@ -134,7 +135,8 @@ func TestRestorePermissions(t *testing.T) { rtest.OK(t, os.Chmod(path, 0o700)) res = NewRestorer(repo, sn, Options{Overwrite: overwrite}) - rtest.OK(t, res.RestoreTo(ctx, tempdir)) + _, err := res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) fi, err := os.Stat(path) rtest.OK(t, err) rtest.Equals(t, fs.FileMode(0o600), fi.Mode().Perm(), "unexpected permissions") diff --git a/internal/restorer/restorer_windows_test.go b/internal/restorer/restorer_windows_test.go index 3f6c8472ba0..4764bed2d4c 100644 --- a/internal/restorer/restorer_windows_test.go +++ b/internal/restorer/restorer_windows_test.go @@ -181,7 +181,7 @@ func runAttributeTests(t *testing.T, fileInfo NodeInfo, existingFileAttr FileAtt ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, testDir) + _, err := res.RestoreTo(ctx, testDir) rtest.OK(t, err) mainFilePath := path.Join(testDir, fileInfo.parentDir, fileInfo.name) @@ -562,11 +562,11 @@ func TestRestoreDeleteCaseInsensitive(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := res.RestoreTo(ctx, tempdir) + _, err := res.RestoreTo(ctx, tempdir) rtest.OK(t, err) res = NewRestorer(repo, deleteSn, Options{Delete: true}) - err = res.RestoreTo(ctx, tempdir) + _, err = res.RestoreTo(ctx, tempdir) rtest.OK(t, err) // anotherfile must still exist From c83b529c47d7a32c6150be16b0be55c6be516edb Mon Sep 17 00:00:00 2001 From: Andreas Deininger Date: Sun, 11 Aug 2024 21:38:15 +0200 Subject: [PATCH 586/893] Fix typos --- doc/040_backup.rst | 2 +- internal/restorer/fileswriter_test.go | 2 +- internal/restorer/restorer_windows.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 25c5a8ad110..f1f355c531e 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -585,7 +585,7 @@ Sometimes, it can be useful to directly save the output of a program, for exampl ``mysqldump`` so that the SQL can later be restored. Restic supports this mode of operation; just supply the option ``--stdin-from-command`` when using the ``backup`` action, and write the command in place of the files/directories. To prevent -restic from interpreting the arguments for the commmand, make sure to add ``--`` before +restic from interpreting the arguments for the command, make sure to add ``--`` before the command starts: .. code-block:: console diff --git a/internal/restorer/fileswriter_test.go b/internal/restorer/fileswriter_test.go index c6984792703..9ea8767b871 100644 --- a/internal/restorer/fileswriter_test.go +++ b/internal/restorer/fileswriter_test.go @@ -49,7 +49,7 @@ func TestFilesWriterRecursiveOverwrite(t *testing.T) { // must error if recursive delete is not allowed w := newFilesWriter(1, false) err := w.writeToFile(path, []byte{1}, 0, 2, false) - rtest.Assert(t, errors.Is(err, notEmptyDirError()), "unexepected error got %v", err) + rtest.Assert(t, errors.Is(err, notEmptyDirError()), "unexpected error got %v", err) rtest.Equals(t, 0, len(w.buckets[0].files)) // must replace directory diff --git a/internal/restorer/restorer_windows.go b/internal/restorer/restorer_windows.go index 72337d8ae80..9ddc0a932e1 100644 --- a/internal/restorer/restorer_windows.go +++ b/internal/restorer/restorer_windows.go @@ -8,6 +8,6 @@ import "strings" // toComparableFilename returns a filename suitable for equality checks. On Windows, it returns the // uppercase version of the string. On all other systems, it returns the unmodified filename. func toComparableFilename(path string) string { - // apparently NTFS internally uppercases filenames for comparision + // apparently NTFS internally uppercases filenames for comparison return strings.ToUpper(path) } From 8861421cd65d257a9ad03f09637787b8d80c9f53 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sun, 11 Aug 2024 01:23:47 -0600 Subject: [PATCH 587/893] Fix extended attributes handling for VSS snapshots --- internal/fs/ea_windows.go | 18 +++++++ internal/restic/node_windows.go | 84 +++++++++++++++++++++++---------- 2 files changed, 77 insertions(+), 25 deletions(-) diff --git a/internal/fs/ea_windows.go b/internal/fs/ea_windows.go index d19a1ee6ae8..bf7b02fd407 100644 --- a/internal/fs/ea_windows.go +++ b/internal/fs/ea_windows.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "errors" "fmt" + "strings" "syscall" "unsafe" @@ -298,3 +299,20 @@ func PathSupportsExtendedAttributes(path string) (supported bool, err error) { supported = (fileSystemFlags & windows.FILE_SUPPORTS_EXTENDED_ATTRIBUTES) != 0 return supported, nil } + +// GetVolumePathName returns the volume path name for the given path. +func GetVolumePathName(path string) (volumeName string, err error) { + utf16Path, err := windows.UTF16PtrFromString(path) + if err != nil { + return "", err + } + // Get the volume path (e.g., "D:") + var volumePath [windows.MAX_PATH + 1]uint16 + err = windows.GetVolumePathName(utf16Path, &volumePath[0], windows.MAX_PATH+1) + if err != nil { + return "", err + } + // Trim any trailing backslashes + volumeName = strings.TrimRight(windows.UTF16ToString(volumePath[:]), "\\") + return volumeName, nil +} diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index ceb304d0c72..6adb51f0d44 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -407,38 +407,72 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT // checkAndStoreEASupport checks if the volume of the path supports extended attributes and stores the result in a map // If the result is already in the map, it returns the result from the map. func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { - // Check if it's an extended length path - if strings.HasPrefix(path, uncPathPrefix) { - // Convert \\?\UNC\ extended path to standard path to get the volume name correctly - path = `\\` + path[len(uncPathPrefix):] - } else if strings.HasPrefix(path, extendedPathPrefix) { - //Extended length path prefix needs to be trimmed to get the volume name correctly - path = path[len(extendedPathPrefix):] - } else if strings.HasPrefix(path, globalRootPrefix) { - // EAs are not supported for \\?\GLOBALROOT i.e. VSS snapshots - return false, nil - } else { - // Use the absolute path - path, err = filepath.Abs(path) + var volumeName string + volumeName, err = prepareVolumeName(path) + if err != nil { + return false, err + } + + if volumeName != "" { + // First check if the manually prepared volume name is already in the map + eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeName) + if exists { + return eaSupportedValue.(bool), nil + } + // If not found, check if EA is supported with manually prepared volume name + isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeName + `\`) if err != nil { - return false, fmt.Errorf("failed to get absolute path: %w", err) + return false, err } } - volumeName := filepath.VolumeName(path) - if volumeName == "" { - return false, nil + // If an entry is not found, get the actual volume name using the GetVolumePathName function + volumeNameActual, err := fs.GetVolumePathName(path) + if err != nil { + return false, err } - eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeName) - if exists { - return eaSupportedValue.(bool), nil + if volumeNameActual != volumeName { + // If the actual volume name is different, check cache for the actual volume name + eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeNameActual) + if exists { + return eaSupportedValue.(bool), nil + } + // If the actual volume name is different and is not in the map, again check if the new volume supports extended attributes with the actual volume name + isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeNameActual + `\`) + if err != nil { + return false, err + } } + eaSupportedVolumesMap.Store(volumeNameActual, isEASupportedVolume) + return isEASupportedVolume, err +} - // Add backslash to the volume name to ensure it is a valid path - isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeName + `\`) - if err == nil { - eaSupportedVolumesMap.Store(volumeName, isEASupportedVolume) +// prepareVolumeName prepares the volume name for different cases in Windows +func prepareVolumeName(path string) (volumeName string, err error) { + // Check if it's an extended length path + if strings.HasPrefix(path, globalRootPrefix) { + // Extract the VSS snapshot volume name eg. `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX` + if parts := strings.SplitN(path, `\`, 7); len(parts) >= 6 { + volumeName = strings.Join(parts[:6], `\`) + } else { + volumeName = filepath.VolumeName(path) + } + } else { + if strings.HasPrefix(path, uncPathPrefix) { + // Convert \\?\UNC\ extended path to standard path to get the volume name correctly + path = `\\` + path[len(uncPathPrefix):] + } else if strings.HasPrefix(path, extendedPathPrefix) { + //Extended length path prefix needs to be trimmed to get the volume name correctly + path = path[len(extendedPathPrefix):] + } else { + // Use the absolute path + path, err = filepath.Abs(path) + if err != nil { + return "", fmt.Errorf("failed to get absolute path: %w", err) + } + } + volumeName = filepath.VolumeName(path) } - return isEASupportedVolume, err + return volumeName, nil } // windowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection From 111490b8bebac875d1e51926997dd95521f97e38 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sun, 11 Aug 2024 01:32:55 -0600 Subject: [PATCH 588/893] Add changelog --- changelog/unreleased/pull-4998 | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 changelog/unreleased/pull-4998 diff --git a/changelog/unreleased/pull-4998 b/changelog/unreleased/pull-4998 new file mode 100644 index 00000000000..23ff3dbd2fd --- /dev/null +++ b/changelog/unreleased/pull-4998 @@ -0,0 +1,8 @@ +Bugfix: Fix extended attributes handling for VSS snapshots + +Restic was failing to backup extended attributes for VSS snapshots +after the fix for https://github.com/restic/restic/pull/4980. +Restic now correctly handles extended attributes for VSS snapshots. + +https://github.com/restic/restic/pull/4998 +https://github.com/restic/restic/pull/4980 From 51fad2eecb62209a39742c50087dc73635ad3f85 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sun, 11 Aug 2024 01:48:25 -0600 Subject: [PATCH 589/893] Gracefully handle invalid prepared volume names --- internal/restic/node_windows.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 6adb51f0d44..2ca7e42e6d3 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -417,11 +417,13 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { // First check if the manually prepared volume name is already in the map eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeName) if exists { + // Cache hit, immediately return the cached value return eaSupportedValue.(bool), nil } // If not found, check if EA is supported with manually prepared volume name isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeName + `\`) - if err != nil { + // If the prepared volume name is not valid, we will next fetch the actual volume name. + if err != nil && !errors.Is(err, windows.DNS_ERROR_INVALID_NAME) { return false, err } } @@ -434,6 +436,7 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { // If the actual volume name is different, check cache for the actual volume name eaSupportedValue, exists := eaSupportedVolumesMap.Load(volumeNameActual) if exists { + // Cache hit, immediately return the cached value return eaSupportedValue.(bool), nil } // If the actual volume name is different and is not in the map, again check if the new volume supports extended attributes with the actual volume name From 7642e05eed907d4d5508a7f0bf6d08d1492aea83 Mon Sep 17 00:00:00 2001 From: aneesh-n <99904+aneesh-n@users.noreply.github.com> Date: Sun, 11 Aug 2024 19:25:58 -0600 Subject: [PATCH 590/893] Add test cases and handle volume GUID paths Gracefully handle errors while checking for EA and add debug logs. --- changelog/unreleased/pull-4980 | 1 + changelog/unreleased/pull-4998 | 8 -- internal/fs/ea_windows_test.go | 76 +++++++++++ internal/restic/node_windows.go | 49 ++++--- internal/restic/node_windows_test.go | 196 +++++++++++++++++++++++++++ 5 files changed, 306 insertions(+), 24 deletions(-) delete mode 100644 changelog/unreleased/pull-4998 diff --git a/changelog/unreleased/pull-4980 b/changelog/unreleased/pull-4980 index 264f347faec..5713db7a2b1 100644 --- a/changelog/unreleased/pull-4980 +++ b/changelog/unreleased/pull-4980 @@ -8,5 +8,6 @@ Restic now completely skips the attempt to fetch extended attributes for such volumes where it is not supported. https://github.com/restic/restic/pull/4980 +https://github.com/restic/restic/pull/4998 https://github.com/restic/restic/issues/4955 https://github.com/restic/restic/issues/4950 diff --git a/changelog/unreleased/pull-4998 b/changelog/unreleased/pull-4998 deleted file mode 100644 index 23ff3dbd2fd..00000000000 --- a/changelog/unreleased/pull-4998 +++ /dev/null @@ -1,8 +0,0 @@ -Bugfix: Fix extended attributes handling for VSS snapshots - -Restic was failing to backup extended attributes for VSS snapshots -after the fix for https://github.com/restic/restic/pull/4980. -Restic now correctly handles extended attributes for VSS snapshots. - -https://github.com/restic/restic/pull/4998 -https://github.com/restic/restic/pull/4980 diff --git a/internal/fs/ea_windows_test.go b/internal/fs/ea_windows_test.go index b249f43c405..74afd7aa56a 100644 --- a/internal/fs/ea_windows_test.go +++ b/internal/fs/ea_windows_test.go @@ -10,6 +10,7 @@ import ( "os" "path/filepath" "reflect" + "strings" "syscall" "testing" "unsafe" @@ -245,3 +246,78 @@ func testSetGetEA(t *testing.T, path string, handle windows.Handle, testEAs []Ex t.Fatalf("EAs read from path %s don't match", path) } } + +func TestPathSupportsExtendedAttributes(t *testing.T) { + testCases := []struct { + name string + path string + expected bool + }{ + { + name: "System drive", + path: os.Getenv("SystemDrive") + `\`, + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + supported, err := PathSupportsExtendedAttributes(tc.path) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if supported != tc.expected { + t.Errorf("Expected %v, got %v for path %s", tc.expected, supported, tc.path) + } + }) + } + + // Test with an invalid path + _, err := PathSupportsExtendedAttributes("Z:\\NonExistentPath-UAS664da5s4dyu56das45f5as") + if err == nil { + t.Error("Expected an error for non-existent path, but got nil") + } +} + +func TestGetVolumePathName(t *testing.T) { + tempDirVolume := filepath.VolumeName(os.TempDir()) + testCases := []struct { + name string + path string + expectedPrefix string + }{ + { + name: "Root directory", + path: os.Getenv("SystemDrive") + `\`, + expectedPrefix: os.Getenv("SystemDrive"), + }, + { + name: "Nested directory", + path: os.Getenv("SystemDrive") + `\Windows\System32`, + expectedPrefix: os.Getenv("SystemDrive"), + }, + { + name: "Temp directory", + path: os.TempDir() + `\`, + expectedPrefix: tempDirVolume, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + volumeName, err := GetVolumePathName(tc.path) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if !strings.HasPrefix(volumeName, tc.expectedPrefix) { + t.Errorf("Expected volume name to start with %s, but got %s", tc.expectedPrefix, volumeName) + } + }) + } + + // Test with an invalid path + _, err := GetVolumePathName("Z:\\NonExistentPath") + if err == nil { + t.Error("Expected an error for non-existent path, but got nil") + } +} diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 2ca7e42e6d3..bce01ccad04 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -42,6 +42,7 @@ const ( extendedPathPrefix = `\\?\` uncPathPrefix = `\\?\UNC\` globalRootPrefix = `\\?\GLOBALROOT\` + volumeGUIDPrefix = `\\?\Volume{` ) // mknod is not supported on Windows. @@ -422,15 +423,21 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { } // If not found, check if EA is supported with manually prepared volume name isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeName + `\`) - // If the prepared volume name is not valid, we will next fetch the actual volume name. + // If the prepared volume name is not valid, we will fetch the actual volume name next. if err != nil && !errors.Is(err, windows.DNS_ERROR_INVALID_NAME) { - return false, err + debug.Log("Error checking if extended attributes are supported for prepared volume name %s: %v", volumeName, err) + // There can be multiple errors like path does not exist, bad network path, etc. + // We just gracefully disallow extended attributes for cases. + return false, nil } } // If an entry is not found, get the actual volume name using the GetVolumePathName function volumeNameActual, err := fs.GetVolumePathName(path) if err != nil { - return false, err + debug.Log("Error getting actual volume name %s for path %s: %v", volumeName, path, err) + // There can be multiple errors like path does not exist, bad network path, etc. + // We just gracefully disallow extended attributes for cases. + return false, nil } if volumeNameActual != volumeName { // If the actual volume name is different, check cache for the actual volume name @@ -441,11 +448,19 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { } // If the actual volume name is different and is not in the map, again check if the new volume supports extended attributes with the actual volume name isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeNameActual + `\`) + // Debug log for cases where the prepared volume name is not valid if err != nil { - return false, err + debug.Log("Error checking if extended attributes are supported for actual volume name %s: %v", volumeNameActual, err) + // There can be multiple errors like path does not exist, bad network path, etc. + // We just gracefully disallow extended attributes for cases. + return false, nil + } else { + debug.Log("Checking extended attributes. Prepared volume name: %s, actual volume name: %s, isEASupportedVolume: %v, err: %v", volumeName, volumeNameActual, isEASupportedVolume, err) } } - eaSupportedVolumesMap.Store(volumeNameActual, isEASupportedVolume) + if volumeNameActual != "" { + eaSupportedVolumesMap.Store(volumeNameActual, isEASupportedVolume) + } return isEASupportedVolume, err } @@ -460,17 +475,19 @@ func prepareVolumeName(path string) (volumeName string, err error) { volumeName = filepath.VolumeName(path) } } else { - if strings.HasPrefix(path, uncPathPrefix) { - // Convert \\?\UNC\ extended path to standard path to get the volume name correctly - path = `\\` + path[len(uncPathPrefix):] - } else if strings.HasPrefix(path, extendedPathPrefix) { - //Extended length path prefix needs to be trimmed to get the volume name correctly - path = path[len(extendedPathPrefix):] - } else { - // Use the absolute path - path, err = filepath.Abs(path) - if err != nil { - return "", fmt.Errorf("failed to get absolute path: %w", err) + if !strings.HasPrefix(path, volumeGUIDPrefix) { // Handle volume GUID path + if strings.HasPrefix(path, uncPathPrefix) { + // Convert \\?\UNC\ extended path to standard path to get the volume name correctly + path = `\\` + path[len(uncPathPrefix):] + } else if strings.HasPrefix(path, extendedPathPrefix) { + //Extended length path prefix needs to be trimmed to get the volume name correctly + path = path[len(extendedPathPrefix):] + } else { + // Use the absolute path + path, err = filepath.Abs(path) + if err != nil { + return "", fmt.Errorf("failed to get absolute path: %w", err) + } } } volumeName = filepath.VolumeName(path) diff --git a/internal/restic/node_windows_test.go b/internal/restic/node_windows_test.go index 4fd57bbb757..6ba25559b7a 100644 --- a/internal/restic/node_windows_test.go +++ b/internal/restic/node_windows_test.go @@ -12,6 +12,7 @@ import ( "strings" "syscall" "testing" + "time" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" @@ -329,3 +330,198 @@ func TestRestoreExtendedAttributes(t *testing.T) { } } } + +func TestPrepareVolumeName(t *testing.T) { + currentVolume := filepath.VolumeName(func() string { + // Get the current working directory + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get current working directory: %v", err) + } + return pwd + }()) + // Create a temporary directory for the test + tempDir, err := os.MkdirTemp("", "restic_test_"+time.Now().Format("20060102150405")) + if err != nil { + t.Fatalf("Failed to create temp directory: %v", err) + } + defer os.RemoveAll(tempDir) + + // Create a long file name + longFileName := `\Very\Long\Path\That\Exceeds\260\Characters\` + strings.Repeat(`\VeryLongFolderName`, 20) + `\\LongFile.txt` + longFilePath := filepath.Join(tempDir, longFileName) + + tempDirVolume := filepath.VolumeName(tempDir) + // Create the file + content := []byte("This is a test file with a very long name.") + err = os.MkdirAll(filepath.Dir(longFilePath), 0755) + test.OK(t, err) + if err != nil { + t.Fatalf("Failed to create long folder: %v", err) + } + err = os.WriteFile(longFilePath, content, 0644) + test.OK(t, err) + if err != nil { + t.Fatalf("Failed to create long file: %v", err) + } + osVolumeGUIDPath := getOSVolumeGUIDPath(t) + osVolumeGUIDVolume := filepath.VolumeName(osVolumeGUIDPath) + + testCases := []struct { + name string + path string + expectedVolume string + expectError bool + expectedEASupported bool + isRealPath bool + }{ + { + name: "Network drive path", + path: `Z:\Shared\Documents`, + expectedVolume: `Z:`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Subst drive path", + path: `X:\Virtual\Folder`, + expectedVolume: `X:`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Windows reserved path", + path: `\\.\` + os.Getenv("SystemDrive") + `\System32\drivers\etc\hosts`, + expectedVolume: `\\.\` + os.Getenv("SystemDrive"), + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "Long UNC path", + path: `\\?\UNC\LongServerName\VeryLongShareName\DeepPath\File.txt`, + expectedVolume: `\\LongServerName\VeryLongShareName`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Volume GUID path", + path: osVolumeGUIDPath, + expectedVolume: osVolumeGUIDVolume, + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "Volume GUID path with subfolder", + path: osVolumeGUIDPath + `\Windows`, + expectedVolume: osVolumeGUIDVolume, + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "Standard path", + path: os.Getenv("SystemDrive") + `\Users\`, + expectedVolume: os.Getenv("SystemDrive"), + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "Extended length path", + path: longFilePath, + expectedVolume: tempDirVolume, + expectError: false, + expectedEASupported: true, + isRealPath: true, + }, + { + name: "UNC path", + path: `\\server\share\folder`, + expectedVolume: `\\server\share`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Extended UNC path", + path: `\\?\UNC\server\share\folder`, + expectedVolume: `\\server\share`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Volume Shadow Copy path", + path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1\Users\test`, + expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1`, + expectError: false, + expectedEASupported: false, + }, + { + name: "Relative path", + path: `folder\subfolder`, + + expectedVolume: currentVolume, // Get current volume + expectError: false, + expectedEASupported: true, + }, + { + name: "Empty path", + path: ``, + expectedVolume: currentVolume, + expectError: false, + expectedEASupported: true, + isRealPath: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + isEASupported, err := checkAndStoreEASupport(tc.path) + test.OK(t, err) + test.Equals(t, tc.expectedEASupported, isEASupported) + + volume, err := prepareVolumeName(tc.path) + + if tc.expectError { + test.Assert(t, err != nil, "Expected an error, but got none") + } else { + test.OK(t, err) + } + test.Equals(t, tc.expectedVolume, volume) + + if tc.isRealPath { + isEASupportedVolume, err := fs.PathSupportsExtendedAttributes(volume + `\`) + // If the prepared volume name is not valid, we will next fetch the actual volume name. + test.OK(t, err) + + test.Equals(t, tc.expectedEASupported, isEASupportedVolume) + + actualVolume, err := fs.GetVolumePathName(tc.path) + test.OK(t, err) + test.Equals(t, tc.expectedVolume, actualVolume) + } + }) + } +} + +func getOSVolumeGUIDPath(t *testing.T) string { + // Get the path of the OS drive (usually C:\) + osDrive := os.Getenv("SystemDrive") + "\\" + + // Convert to a volume GUID path + volumeName, err := windows.UTF16PtrFromString(osDrive) + test.OK(t, err) + if err != nil { + return "" + } + + var volumeGUID [windows.MAX_PATH]uint16 + err = windows.GetVolumeNameForVolumeMountPoint(volumeName, &volumeGUID[0], windows.MAX_PATH) + test.OK(t, err) + if err != nil { + return "" + } + + return windows.UTF16ToString(volumeGUID[:]) +} From 61e1f4a916e58c21d0b9bf4c250af81b63fc2480 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 10 Aug 2024 18:06:24 +0200 Subject: [PATCH 591/893] backend: return correct error on upload/request timeout --- internal/backend/watchdog_roundtriper.go | 3 +++ internal/backend/watchdog_roundtriper_test.go | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/internal/backend/watchdog_roundtriper.go b/internal/backend/watchdog_roundtriper.go index e3e10d7fedf..dc270b974f3 100644 --- a/internal/backend/watchdog_roundtriper.go +++ b/internal/backend/watchdog_roundtriper.go @@ -65,6 +65,9 @@ func (w *watchdogRoundtripper) RoundTrip(req *http.Request) (*http.Response, err resp, err := w.rt.RoundTrip(req) if err != nil { + if isTimeout(err) { + err = errRequestTimeout + } return nil, err } diff --git a/internal/backend/watchdog_roundtriper_test.go b/internal/backend/watchdog_roundtriper_test.go index 723a311cb95..f7f90259cea 100644 --- a/internal/backend/watchdog_roundtriper_test.go +++ b/internal/backend/watchdog_roundtriper_test.go @@ -135,7 +135,7 @@ func TestUploadTimeout(t *testing.T) { rtest.OK(t, err) resp, err := rt.RoundTrip(req) - rtest.Equals(t, context.Canceled, err) + rtest.Equals(t, errRequestTimeout, err) // make linter happy if resp != nil { rtest.OK(t, resp.Body.Close()) @@ -162,7 +162,7 @@ func TestProcessingTimeout(t *testing.T) { rtest.OK(t, err) resp, err := rt.RoundTrip(req) - rtest.Equals(t, context.Canceled, err) + rtest.Equals(t, errRequestTimeout, err) // make linter happy if resp != nil { rtest.OK(t, resp.Body.Close()) From 48e3832322e8d03bd60a379d0c5ae1c48977a370 Mon Sep 17 00:00:00 2001 From: Michael Terry Date: Tue, 30 Jul 2024 19:06:18 -0400 Subject: [PATCH 592/893] main: return an exit code (12) for "bad password" errors --- changelog/unreleased/pull-4959 | 6 ++++++ cmd/restic/cmd_backup.go | 1 + cmd/restic/cmd_cat.go | 1 + cmd/restic/cmd_check.go | 1 + cmd/restic/cmd_copy.go | 1 + cmd/restic/cmd_debug.go | 1 + cmd/restic/cmd_diff.go | 1 + cmd/restic/cmd_dump.go | 1 + cmd/restic/cmd_find.go | 1 + cmd/restic/cmd_forget.go | 1 + cmd/restic/cmd_key_add.go | 1 + cmd/restic/cmd_key_list.go | 1 + cmd/restic/cmd_key_passwd.go | 1 + cmd/restic/cmd_key_remove.go | 1 + cmd/restic/cmd_list.go | 1 + cmd/restic/cmd_ls.go | 1 + cmd/restic/cmd_migrate.go | 1 + cmd/restic/cmd_mount.go | 1 + cmd/restic/cmd_prune.go | 1 + cmd/restic/cmd_recover.go | 1 + cmd/restic/cmd_repair_index.go | 1 + cmd/restic/cmd_repair_packs.go | 1 + cmd/restic/cmd_repair_snapshots.go | 1 + cmd/restic/cmd_restore.go | 1 + cmd/restic/cmd_rewrite.go | 1 + cmd/restic/cmd_self_update.go | 1 + cmd/restic/cmd_snapshots.go | 1 + cmd/restic/cmd_stats.go | 1 + cmd/restic/cmd_tag.go | 1 + cmd/restic/global.go | 2 +- cmd/restic/main.go | 5 +++++ doc/075_scripting.rst | 2 ++ 32 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/pull-4959 diff --git a/changelog/unreleased/pull-4959 b/changelog/unreleased/pull-4959 new file mode 100644 index 00000000000..120527e22b4 --- /dev/null +++ b/changelog/unreleased/pull-4959 @@ -0,0 +1,6 @@ +Enhancement: Return exit code 12 for "bad password" + +Restic now returns exit code 12 when it can't open the repository +because of a bad password. + +https://github.com/restic/restic/pull/4959 diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 9957b578493..28b6c7feb4a 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -43,6 +43,7 @@ Exit status is 1 if there was a fatal error (no snapshot created). Exit status is 3 if some source data could not be read (incomplete snapshot created). Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, PreRun: func(_ *cobra.Command, _ []string) { if backupOptions.Host == "" { diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index d3e98b2ffd1..ac03798d259 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -27,6 +27,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 9cccc0609fc..b0749e0221b 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -39,6 +39,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index d7761174a00..40015b13c57 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -38,6 +38,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, RunE: func(cmd *cobra.Command, args []string) error { return runCopy(cmd.Context(), copyOptions, globalOptions, args) diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 74c21df24d4..18b4b763175 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -47,6 +47,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 6793184b136..24f445b6492 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -43,6 +43,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 9c0fe535e68..a5794ad3087 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -38,6 +38,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index aebca594e56..f84ad43c335 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -37,6 +37,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 27b8f4f7490..01fe0e606d0 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -39,6 +39,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index c9f0ef23319..2737410a05b 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, } diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index ae751a48751..1c70cce8a74 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -27,6 +27,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index 723acaaab65..9bb1417494d 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, } diff --git a/cmd/restic/cmd_key_remove.go b/cmd/restic/cmd_key_remove.go index c4c24fdb706..3cb2e0bd789 100644 --- a/cmd/restic/cmd_key_remove.go +++ b/cmd/restic/cmd_key_remove.go @@ -24,6 +24,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 060bca8718d..4aa9f43bb17 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 76e192b6c9e..7c712e48168 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -43,6 +43,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index 7e472ff121c..2cc44bff0a5 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -26,6 +26,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 3e0b159bec0..0b79afe45e1 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -68,6 +68,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 7e706ccf899..e19c2e04b16 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -32,6 +32,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index 4e8b8c07763..0ff6e2d66ee 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -26,6 +26,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index e6b6e9fa500..83c1bfa7f9b 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index b0afefb2de1..290c3734e74 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -27,6 +27,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index fc221ebea29..385854312ad 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -41,6 +41,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index d71cb768387..eb437a11de7 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -36,6 +36,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 463720ee146..d1088d00bd0 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -42,6 +42,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_self_update.go b/cmd/restic/cmd_self_update.go index 0fce41241dc..09c86bf2ca9 100644 --- a/cmd/restic/cmd_self_update.go +++ b/cmd/restic/cmd_self_update.go @@ -28,6 +28,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index 826ab55ec36..442c5737519 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -27,6 +27,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 60ab1e5bcb4..d2641178325 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -53,6 +53,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index ea73955f012..47e3c02ad58 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -29,6 +29,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 846f3339fe1..22aa8a2904e 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -493,7 +493,7 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi } } if err != nil { - if errors.IsFatal(err) { + if errors.IsFatal(err) || errors.Is(err, repository.ErrNoKeyFound) { return nil, err } return nil, errors.Fatalf("%s", err) diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 5818221a587..6661b4f5dc9 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -17,6 +17,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/options" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) @@ -138,6 +139,8 @@ func main() { fmt.Fprintf(os.Stderr, "Warning: %v\n", err) case errors.IsFatal(err): fmt.Fprintf(os.Stderr, "%v\n", err) + case errors.Is(err, repository.ErrNoKeyFound): + fmt.Fprintf(os.Stderr, "Fatal: %v\n", err) case err != nil: fmt.Fprintf(os.Stderr, "%+v\n", err) @@ -160,6 +163,8 @@ func main() { exitCode = 10 case restic.IsAlreadyLocked(err): exitCode = 11 + case errors.Is(err, repository.ErrNoKeyFound): + exitCode = 12 case errors.Is(err, context.Canceled): exitCode = 130 default: diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index fa7fa1b6ec1..438eaa84fbd 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -63,6 +63,8 @@ a more specific description. +-----+----------------------------------------------------+ | 11 | Failed to lock repository (since restic 0.17.0) | +-----+----------------------------------------------------+ +| 12 | Wrong password (since restic 0.17.1) | ++-----+----------------------------------------------------+ | 130 | Restic was interrupted using SIGINT or SIGSTOP | +-----+----------------------------------------------------+ From fac1d9fea17d8ad23073d5d4523ebaca16e42a0e Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 11 Aug 2024 01:42:13 +0530 Subject: [PATCH 593/893] cache: backend add List method and a cache clear functionality * removes files which are no longer in the repository, including index files, snapshot files and pack files from the cache. cache: fix ids set initialisation with NewIDSet() --- internal/backend/cache/backend.go | 40 +++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/internal/backend/cache/backend.go b/internal/backend/cache/backend.go index 94f648cf4d5..92cca4d0ed3 100644 --- a/internal/backend/cache/backend.go +++ b/internal/backend/cache/backend.go @@ -2,11 +2,14 @@ package cache import ( "context" + "fmt" "io" + "os" "sync" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" ) // Backend wraps a restic.Backend and adds a cache. @@ -215,3 +218,40 @@ func (b *Backend) IsNotExist(err error) bool { func (b *Backend) Unwrap() backend.Backend { return b.Backend } + +func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(f backend.FileInfo) error) error { + if !b.Cache.canBeCached(t) { + return b.Backend.List(ctx, t, fn) + } + + // will contain the IDs of the files that are in the repository + ids := restic.NewIDSet() + + // wrap the original function to also add the file to the ids set + wrapFn := func(f backend.FileInfo) error { + id, err := restic.ParseID(f.Name) + if err != nil { + // returning error here since, if we cannot parse the ID, the file + // is invalid and the list must exit. + return err + } + + ids.Insert(id) + + // execute the original function + return fn(f) + } + + err := b.Backend.List(ctx, t, wrapFn) + if err != nil { + return err + } + + // clear the cache for files that are not in the repo anymore, ignore errors + err = b.Cache.Clear(t, ids) + if err != nil { + fmt.Fprintf(os.Stderr, "error clearing %s files in cache: %v\n", t.String(), err) + } + + return nil +} From 0cf17372894d15c9f3a3e04ffe8523971d0f4bd8 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 11 Aug 2024 15:43:03 +0530 Subject: [PATCH 594/893] cache: check for context cancellation before clearing cache --- internal/backend/cache/backend.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/backend/cache/backend.go b/internal/backend/cache/backend.go index 92cca4d0ed3..58b03dd3862 100644 --- a/internal/backend/cache/backend.go +++ b/internal/backend/cache/backend.go @@ -247,6 +247,10 @@ func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(f backen return err } + if ctx.Err() != nil { + return ctx.Err() + } + // clear the cache for files that are not in the repo anymore, ignore errors err = b.Cache.Clear(t, ids) if err != nil { From 1e68fbca90dff5ac02f7665eb9193d3914d0d284 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 11 Aug 2024 15:58:27 +0530 Subject: [PATCH 595/893] repository: removed redundant prepareCache method from Repository * remove the prepareCache method from the Repository * changed the signature of the SetIndex function to no longer return an error --- internal/checker/checker.go | 6 +---- internal/repository/repair_index.go | 6 ++--- internal/repository/repository.go | 37 ++--------------------------- internal/restic/repository.go | 2 +- 4 files changed, 6 insertions(+), 45 deletions(-) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 031e13807f0..d5e7fd1f818 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -146,11 +146,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e return hints, append(errs, err) } - err = c.repo.SetIndex(c.masterIndex) - if err != nil { - debug.Log("SetIndex returned error: %v", err) - errs = append(errs, err) - } + c.repo.SetIndex(c.masterIndex) // compute pack size using index entries c.packs, err = pack.Size(ctx, c.repo, false) diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index 77080925461..c72dcfd00f4 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -52,10 +52,8 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, return err } - err = repo.SetIndex(mi) - if err != nil { - return err - } + repo.SetIndex(mi) + packSizeFromIndex, err = pack.Size(ctx, repo, false) if err != nil { return err diff --git a/internal/repository/repository.go b/internal/repository/repository.go index f7fd65c71d4..3dc248c5eab 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "math" - "os" "runtime" "sort" "sync" @@ -586,9 +585,8 @@ func (r *Repository) ListPacksFromIndex(ctx context.Context, packs restic.IDSet) } // SetIndex instructs the repository to use the given index. -func (r *Repository) SetIndex(i restic.MasterIndex) error { +func (r *Repository) SetIndex(i restic.MasterIndex) { r.idx = i.(*index.MasterIndex) - return r.prepareCache() } func (r *Repository) clearIndex() { @@ -628,12 +626,8 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { return errors.New("index uses feature not supported by repository version 1") } } - if ctx.Err() != nil { - return ctx.Err() - } - // remove index files from the cache which have been removed in the repo - return r.prepareCache() + return ctx.Err() } // createIndexFromPacks creates a new index by reading all given pack files (with sizes). @@ -699,33 +693,6 @@ func (r *Repository) createIndexFromPacks(ctx context.Context, packsize map[rest return invalid, nil } -// prepareCache initializes the local cache. indexIDs is the list of IDs of -// index files still present in the repo. -func (r *Repository) prepareCache() error { - if r.Cache == nil { - return nil - } - - indexIDs := r.idx.IDs() - debug.Log("prepare cache with %d index files", len(indexIDs)) - - // clear old index files - err := r.Cache.Clear(restic.IndexFile, indexIDs) - if err != nil { - fmt.Fprintf(os.Stderr, "error clearing index files in cache: %v\n", err) - } - - packs := r.idx.Packs(restic.NewIDSet()) - - // clear old packs - err = r.Cache.Clear(restic.PackFile, packs) - if err != nil { - fmt.Fprintf(os.Stderr, "error clearing pack files in cache: %v\n", err) - } - - return nil -} - // SearchKey finds a key with the supplied password, afterwards the config is // read and parsed. It tries at most maxKeys key files in the repo. func (r *Repository) SearchKey(ctx context.Context, password string, maxKeys int, keyHint string) error { diff --git a/internal/restic/repository.go b/internal/restic/repository.go index b18b036a733..ce8401b37d7 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -22,7 +22,7 @@ type Repository interface { Key() *crypto.Key LoadIndex(ctx context.Context, p *progress.Counter) error - SetIndex(mi MasterIndex) error + SetIndex(mi MasterIndex) LookupBlob(t BlobType, id ID) []PackedBlob LookupBlobSize(t BlobType, id ID) (size uint, exists bool) From b10d7ccddad646ddd35e29faa26a865d65cfbf20 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 11 Aug 2024 16:07:38 +0530 Subject: [PATCH 596/893] changelog: add unrelease changelog --- changelog/unreleased/issue-4934 | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 changelog/unreleased/issue-4934 diff --git a/changelog/unreleased/issue-4934 b/changelog/unreleased/issue-4934 new file mode 100644 index 00000000000..03194168e2d --- /dev/null +++ b/changelog/unreleased/issue-4934 @@ -0,0 +1,9 @@ +Enhancement: Clear removed snapshots, index and pack files from the local cache + +Restic did not clear removed snapshots from the cache after the `forget` +operation; only indexes and pack files were removed automatically. +Restic now automatically clears removed indexes, packs and snapshots from the +local cache. + +https://github.com/restic/restic/issues/4934 +https://github.com/restic/restic/pull/4981 \ No newline at end of file From 0ca9355bc034055fc1d126ce73c0e7ac507c0186 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 11 Aug 2024 16:44:43 +0530 Subject: [PATCH 597/893] cache: add test for the automated cache clear to cache backend --- internal/backend/cache/backend_test.go | 58 ++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/internal/backend/cache/backend_test.go b/internal/backend/cache/backend_test.go index 7addc275d5a..dca51c2bf2b 100644 --- a/internal/backend/cache/backend_test.go +++ b/internal/backend/cache/backend_test.go @@ -57,6 +57,13 @@ func randomData(n int) (backend.Handle, []byte) { return h, data } +func list(t testing.TB, be backend.Backend, fn func(backend.FileInfo) error) { + err := be.List(context.TODO(), backend.IndexFile, fn) + if err != nil { + t.Fatal(err) + } +} + func TestBackend(t *testing.T) { be := mem.New() c := TestNewCache(t) @@ -238,3 +245,54 @@ func TestErrorBackend(t *testing.T) { wg.Wait() } + +func TestAutomaticCacheClear(t *testing.T) { + be := mem.New() + c := TestNewCache(t) + wbe := c.Wrap(be) + + // add two handles h1 and h2 + h1, data := randomData(2000) + // save h1 directly to the backend + save(t, be, h1, data) + if c.Has(h1) { + t.Errorf("cache has file1 too early") + } + + h2, data2 := randomData(3000) + + // save h2 directly to the backend + save(t, be, h2, data2) + if c.Has(h2) { + t.Errorf("cache has file2 too early") + } + + loadAndCompare(t, wbe, h1, data) + if !c.Has(h1) { + t.Errorf("cache doesn't have file1 after load") + } + + loadAndCompare(t, wbe, h2, data2) + if !c.Has(h2) { + t.Errorf("cache doesn't have file2 after load") + } + + // remove h1 directly from the backend + remove(t, be, h1) + if !c.Has(h1) { + t.Errorf("file1 not in cache any more, should be removed from cache only after list") + } + + // list all files in the backend + list(t, wbe, func(_ backend.FileInfo) error { return nil }) + + // h1 should be removed from the cache + if c.Has(h1) { + t.Errorf("cache has file1 after remove") + } + + // h2 should still be in the cache + if !c.Has(h2) { + t.Errorf("cache doesn't have file2 after list") + } +} From 8a7ae17d4dcd11f8b38e9f0be205baac216bb95f Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 17 Aug 2024 00:18:13 +0530 Subject: [PATCH 598/893] Revert "repository: removed redundant prepareCache method from Repository" This reverts commit 720609f8ba6dcf44b7fe51cd9b543ee44bbbaf38. --- internal/checker/checker.go | 6 ++++- internal/repository/repair_index.go | 6 +++-- internal/repository/repository.go | 37 +++++++++++++++++++++++++++-- internal/restic/repository.go | 2 +- 4 files changed, 45 insertions(+), 6 deletions(-) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index d5e7fd1f818..031e13807f0 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -146,7 +146,11 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e return hints, append(errs, err) } - c.repo.SetIndex(c.masterIndex) + err = c.repo.SetIndex(c.masterIndex) + if err != nil { + debug.Log("SetIndex returned error: %v", err) + errs = append(errs, err) + } // compute pack size using index entries c.packs, err = pack.Size(ctx, c.repo, false) diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index c72dcfd00f4..77080925461 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -52,8 +52,10 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, return err } - repo.SetIndex(mi) - + err = repo.SetIndex(mi) + if err != nil { + return err + } packSizeFromIndex, err = pack.Size(ctx, repo, false) if err != nil { return err diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 3dc248c5eab..f7fd65c71d4 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "math" + "os" "runtime" "sort" "sync" @@ -585,8 +586,9 @@ func (r *Repository) ListPacksFromIndex(ctx context.Context, packs restic.IDSet) } // SetIndex instructs the repository to use the given index. -func (r *Repository) SetIndex(i restic.MasterIndex) { +func (r *Repository) SetIndex(i restic.MasterIndex) error { r.idx = i.(*index.MasterIndex) + return r.prepareCache() } func (r *Repository) clearIndex() { @@ -626,8 +628,12 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { return errors.New("index uses feature not supported by repository version 1") } } + if ctx.Err() != nil { + return ctx.Err() + } - return ctx.Err() + // remove index files from the cache which have been removed in the repo + return r.prepareCache() } // createIndexFromPacks creates a new index by reading all given pack files (with sizes). @@ -693,6 +699,33 @@ func (r *Repository) createIndexFromPacks(ctx context.Context, packsize map[rest return invalid, nil } +// prepareCache initializes the local cache. indexIDs is the list of IDs of +// index files still present in the repo. +func (r *Repository) prepareCache() error { + if r.Cache == nil { + return nil + } + + indexIDs := r.idx.IDs() + debug.Log("prepare cache with %d index files", len(indexIDs)) + + // clear old index files + err := r.Cache.Clear(restic.IndexFile, indexIDs) + if err != nil { + fmt.Fprintf(os.Stderr, "error clearing index files in cache: %v\n", err) + } + + packs := r.idx.Packs(restic.NewIDSet()) + + // clear old packs + err = r.Cache.Clear(restic.PackFile, packs) + if err != nil { + fmt.Fprintf(os.Stderr, "error clearing pack files in cache: %v\n", err) + } + + return nil +} + // SearchKey finds a key with the supplied password, afterwards the config is // read and parsed. It tries at most maxKeys key files in the repo. func (r *Repository) SearchKey(ctx context.Context, password string, maxKeys int, keyHint string) error { diff --git a/internal/restic/repository.go b/internal/restic/repository.go index ce8401b37d7..b18b036a733 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -22,7 +22,7 @@ type Repository interface { Key() *crypto.Key LoadIndex(ctx context.Context, p *progress.Counter) error - SetIndex(mi MasterIndex) + SetIndex(mi MasterIndex) error LookupBlob(t BlobType, id ID) []PackedBlob LookupBlobSize(t BlobType, id ID) (size uint, exists bool) From a8032c932cf414ba14b51d3e9ad5d947855a35b5 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 17 Aug 2024 00:21:49 +0530 Subject: [PATCH 599/893] cache: remove redundant index file cleanup addressing code review comments --- internal/repository/repository.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index f7fd65c71d4..d408e310570 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -706,19 +706,10 @@ func (r *Repository) prepareCache() error { return nil } - indexIDs := r.idx.IDs() - debug.Log("prepare cache with %d index files", len(indexIDs)) - - // clear old index files - err := r.Cache.Clear(restic.IndexFile, indexIDs) - if err != nil { - fmt.Fprintf(os.Stderr, "error clearing index files in cache: %v\n", err) - } - packs := r.idx.Packs(restic.NewIDSet()) // clear old packs - err = r.Cache.Clear(restic.PackFile, packs) + err := r.Cache.Clear(restic.PackFile, packs) if err != nil { fmt.Fprintf(os.Stderr, "error clearing pack files in cache: %v\n", err) } From f6e8d925902d1c968456b5629a99d50361d66113 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 17 Aug 2024 00:24:19 +0530 Subject: [PATCH 600/893] changelog: update changelog --- changelog/unreleased/issue-4934 | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/changelog/unreleased/issue-4934 b/changelog/unreleased/issue-4934 index 03194168e2d..6891ca20419 100644 --- a/changelog/unreleased/issue-4934 +++ b/changelog/unreleased/issue-4934 @@ -1,9 +1,8 @@ -Enhancement: Clear removed snapshots, index and pack files from the local cache +Enhancement: Clear removed snapshots from local cache of the current host -Restic did not clear removed snapshots from the cache after the `forget` -operation; only indexes and pack files were removed automatically. -Restic now automatically clears removed indexes, packs and snapshots from the -local cache. +Restic only removed snapshots from the cache on the host that runs the `forget` command. +On other hosts that use the same repository, the old snapshots remained in the cache. +Restic now, automatically clears old snapshots from the local cache of the current host. https://github.com/restic/restic/issues/4934 https://github.com/restic/restic/pull/4981 \ No newline at end of file From 12089054d8cb17005261773227215320493e2ca0 Mon Sep 17 00:00:00 2001 From: Andreas Deininger Date: Sat, 17 Aug 2024 12:39:41 +0200 Subject: [PATCH 601/893] GitHub test actions: fix warnings 'Restore cache failed' --- .github/workflows/tests.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3ca7a9edbce..2ffeb5ff212 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -66,6 +66,9 @@ jobs: GOPROXY: https://proxy.golang.org steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up Go ${{ matrix.go }} uses: actions/setup-go@v5 with: @@ -139,9 +142,6 @@ jobs: echo $Env:USERPROFILE\tar\bin >> $Env:GITHUB_PATH if: matrix.os == 'windows-latest' - - name: Check out code - uses: actions/checkout@v4 - - name: Build with build.go run: | go run build.go @@ -230,14 +230,14 @@ jobs: name: Cross Compile for subset ${{ matrix.subset }} steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up Go ${{ env.latest_go }} uses: actions/setup-go@v5 with: go-version: ${{ env.latest_go }} - - name: Check out code - uses: actions/checkout@v4 - - name: Cross-compile for subset ${{ matrix.subset }} run: | mkdir build-output build-output-debug @@ -252,14 +252,14 @@ jobs: # allow annotating code in the PR checks: write steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up Go ${{ env.latest_go }} uses: actions/setup-go@v5 with: go-version: ${{ env.latest_go }} - - name: Check out code - uses: actions/checkout@v4 - - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: From 00f63d72fa3431d4c2a5471f5b2457e7e10c9ca8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 18 Aug 2024 19:41:58 +0200 Subject: [PATCH 602/893] Mention RESTIC_HOST environment variable in docs --- doc/040_backup.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index f1f355c531e..b53ae8d095d 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -704,6 +704,7 @@ environment variables. The following lists these environment variables: RESTIC_TLS_CLIENT_CERT Location of TLS client certificate and private key (replaces --tls-client-cert) RESTIC_CACHE_DIR Location of the cache directory RESTIC_COMPRESSION Compression mode (only available for repository format version 2) + RESTIC_HOST Only consider snapshots for this host / Set the hostname for the snapshot manually (replaces --host) RESTIC_PROGRESS_FPS Frames per second by which the progress bar is updated RESTIC_PACK_SIZE Target size for pack files RESTIC_READ_CONCURRENCY Concurrency for file reads From a5f2d0cf565ba48bb3ae1ea8f7050abc19d387bf Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 18 Aug 2024 19:45:54 +0200 Subject: [PATCH 603/893] Improve description for no password on secondary repo --- cmd/restic/secondary_repo.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/secondary_repo.go b/cmd/restic/secondary_repo.go index 9a3eb5fe2c6..44621afa18f 100644 --- a/cmd/restic/secondary_repo.go +++ b/cmd/restic/secondary_repo.go @@ -50,7 +50,7 @@ func initSecondaryRepoOptions(f *pflag.FlagSet, opts *secondaryRepoOptions, repo f.StringVarP(&opts.PasswordFile, "from-password-file", "", "", "`file` to read the source repository password from (default: $RESTIC_FROM_PASSWORD_FILE)") f.StringVarP(&opts.KeyHint, "from-key-hint", "", "", "key ID of key to try decrypting the source repository first (default: $RESTIC_FROM_KEY_HINT)") f.StringVarP(&opts.PasswordCommand, "from-password-command", "", "", "shell `command` to obtain the source repository password from (default: $RESTIC_FROM_PASSWORD_COMMAND)") - f.BoolVar(&opts.InsecureNoPassword, "from-insecure-no-password", false, "use an empty password for the source repository, must be passed to every restic command (insecure)") + f.BoolVar(&opts.InsecureNoPassword, "from-insecure-no-password", false, "use an empty password for the source repository (insecure)") opts.Repo = os.Getenv("RESTIC_FROM_REPOSITORY") opts.RepositoryFile = os.Getenv("RESTIC_FROM_REPOSITORY_FILE") From 61aaddac280087ab73d01d7ad2060bd603ab0ae8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 15 Aug 2024 21:17:49 +0200 Subject: [PATCH 604/893] restic: restore timestamps after extended attributes restoring the xattr containing resource forks on macOS apparently modifies the file modification timestamps. Thus, restore the timestamp after xattrs. --- changelog/unreleased/issue-4969 | 7 +++++++ internal/restic/node.go | 12 ++++++------ 2 files changed, 13 insertions(+), 6 deletions(-) create mode 100644 changelog/unreleased/issue-4969 diff --git a/changelog/unreleased/issue-4969 b/changelog/unreleased/issue-4969 new file mode 100644 index 00000000000..ce76a738987 --- /dev/null +++ b/changelog/unreleased/issue-4969 @@ -0,0 +1,7 @@ +Bugfix: Correctly restore timestamp for files with resource forks on macOS + +On macOS, timestamps were incorrectly restored for files with resource forks. +This has been fixed. + +https://github.com/restic/restic/issues/4969 +https://github.com/restic/restic/pull/5006 diff --git a/internal/restic/node.go b/internal/restic/node.go index 7c19882276a..6afdff64a09 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -249,22 +249,22 @@ func (node Node) restoreMetadata(path string, warn func(msg string)) error { firsterr = errors.WithStack(err) } - if err := node.RestoreTimestamps(path); err != nil { - debug.Log("error restoring timestamps for dir %v: %v", path, err) + if err := node.restoreExtendedAttributes(path); err != nil { + debug.Log("error restoring extended attributes for %v: %v", path, err) if firsterr == nil { firsterr = err } } - if err := node.restoreExtendedAttributes(path); err != nil { - debug.Log("error restoring extended attributes for %v: %v", path, err) + if err := node.restoreGenericAttributes(path, warn); err != nil { + debug.Log("error restoring generic attributes for %v: %v", path, err) if firsterr == nil { firsterr = err } } - if err := node.restoreGenericAttributes(path, warn); err != nil { - debug.Log("error restoring generic attributes for %v: %v", path, err) + if err := node.RestoreTimestamps(path); err != nil { + debug.Log("error restoring timestamps for %v: %v", path, err) if firsterr == nil { firsterr = err } From bc1aecfb15da21d335409efa558de2224225fc65 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 25 Aug 2024 23:13:54 +0200 Subject: [PATCH 605/893] restore: test timestamps for macOS resource forks are restored correctly --- internal/restic/node_test.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index 7991d33e0cb..642beadc58c 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -197,6 +197,20 @@ var nodeTests = []Node{ {"user.foo", []byte("bar")}, }, }, + { + Name: "testXattrFileMacOSResourceFork", + Type: "file", + Content: IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0604, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + ExtendedAttributes: []ExtendedAttribute{ + {"com.apple.ResourceFork", []byte("bar")}, + }, + }, } func TestNodeRestoreAt(t *testing.T) { @@ -216,6 +230,11 @@ func TestNodeRestoreAt(t *testing.T) { extAttrArr[i].Name = strings.ToUpper(extAttrArr[i].Name) } } + for _, attr := range test.ExtendedAttributes { + if strings.HasPrefix(attr.Name, "com.apple.") && runtime.GOOS != "darwin" { + t.Skipf("attr %v only relevant on macOS", attr.Name) + } + } // tempdir might be backed by a filesystem that does not support // extended attributes From cb16add8c82ab4f5087bd044893ab382c250e89e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 25 Aug 2024 23:14:39 +0200 Subject: [PATCH 606/893] restic: cleanup redundant code in test case --- internal/restic/node_test.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index 642beadc58c..ab7f66e5b50 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -248,10 +248,6 @@ func TestNodeRestoreAt(t *testing.T) { rtest.OK(t, test.CreateAt(context.TODO(), nodePath, nil)) rtest.OK(t, test.RestoreMetadata(nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) })) - if test.Type == "dir" { - rtest.OK(t, test.RestoreTimestamps(nodePath)) - } - fi, err := os.Lstat(nodePath) rtest.OK(t, err) From e5a08e6808666c4929142722c35f9c9da1fe10bc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Aug 2024 23:48:45 +0200 Subject: [PATCH 607/893] group commands and make features/options visible --- cmd/restic/cmd_backup.go | 1 + cmd/restic/cmd_cache.go | 1 + cmd/restic/cmd_cat.go | 1 + cmd/restic/cmd_check.go | 1 + cmd/restic/cmd_copy.go | 1 + cmd/restic/cmd_debug.go | 5 +++-- cmd/restic/cmd_diff.go | 1 + cmd/restic/cmd_dump.go | 1 + cmd/restic/cmd_features.go | 2 +- cmd/restic/cmd_find.go | 1 + cmd/restic/cmd_forget.go | 1 + cmd/restic/cmd_init.go | 1 + cmd/restic/cmd_key.go | 1 + cmd/restic/cmd_list.go | 1 + cmd/restic/cmd_ls.go | 1 + cmd/restic/cmd_migrate.go | 1 + cmd/restic/cmd_mount.go | 1 + cmd/restic/cmd_options.go | 2 +- cmd/restic/cmd_prune.go | 1 + cmd/restic/cmd_recover.go | 1 + cmd/restic/cmd_repair.go | 5 +++-- cmd/restic/cmd_restore.go | 1 + cmd/restic/cmd_rewrite.go | 1 + cmd/restic/cmd_snapshots.go | 1 + cmd/restic/cmd_stats.go | 1 + cmd/restic/cmd_tag.go | 1 + cmd/restic/cmd_unlock.go | 1 + cmd/restic/main.go | 16 ++++++++++++++++ 28 files changed, 47 insertions(+), 6 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 28b6c7feb4a..562108a3370 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -55,6 +55,7 @@ Exit status is 12 if the password is incorrect. backupOptions.Host = hostname } }, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() diff --git a/cmd/restic/cmd_cache.go b/cmd/restic/cmd_cache.go index e71d383656f..e54c734518e 100644 --- a/cmd/restic/cmd_cache.go +++ b/cmd/restic/cmd_cache.go @@ -28,6 +28,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { return runCache(cacheOptions, globalOptions, args) diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index ac03798d259..6160c54dfc2 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -29,6 +29,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runCat(cmd.Context(), globalOptions, args) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index b0749e0221b..dcf7f27df5a 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -41,6 +41,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 40015b13c57..cfe574d3527 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -40,6 +40,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { return runCopy(cmd.Context(), copyOptions, globalOptions, args) }, diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 18b4b763175..2a48762d106 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -29,8 +29,9 @@ import ( ) var cmdDebug = &cobra.Command{ - Use: "debug", - Short: "Debug commands", + Use: "debug", + Short: "Debug commands", + GroupID: cmdGroupDefault, } var cmdDebugDump = &cobra.Command{ diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 24f445b6492..594e387e895 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -45,6 +45,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runDiff(cmd.Context(), diffOptions, globalOptions, args) diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index a5794ad3087..7d6652e1744 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -40,6 +40,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runDump(cmd.Context(), dumpOptions, globalOptions, args) diff --git a/cmd/restic/cmd_features.go b/cmd/restic/cmd_features.go index 497013696ea..a2f04be311c 100644 --- a/cmd/restic/cmd_features.go +++ b/cmd/restic/cmd_features.go @@ -31,7 +31,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. `, - Hidden: true, + GroupID: cmdGroupAdvanced, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { if len(args) != 0 { diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index f84ad43c335..cb5c0e5e080 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -39,6 +39,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runFind(cmd.Context(), findOptions, globalOptions, args) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 01fe0e606d0..58a9d25b71d 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -41,6 +41,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() diff --git a/cmd/restic/cmd_init.go b/cmd/restic/cmd_init.go index 3c0319e55d8..2a2aae1dc85 100644 --- a/cmd/restic/cmd_init.go +++ b/cmd/restic/cmd_init.go @@ -26,6 +26,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runInit(cmd.Context(), initOptions, globalOptions, args) diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go index c687eca53d6..80e892f20c6 100644 --- a/cmd/restic/cmd_key.go +++ b/cmd/restic/cmd_key.go @@ -11,6 +11,7 @@ var cmdKey = &cobra.Command{ The "key" command allows you to set multiple access keys or passwords per repository. `, + GroupID: cmdGroupDefault, } func init() { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 4aa9f43bb17..1a4791e311d 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -26,6 +26,7 @@ Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { return runList(cmd.Context(), globalOptions, args) }, diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 7c712e48168..69e2781037e 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -46,6 +46,7 @@ Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { return runLs(cmd.Context(), lsOptions, globalOptions, args) }, diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index 2cc44bff0a5..5c3e425edfc 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -29,6 +29,7 @@ Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() defer cancel() diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 0b79afe45e1..2f57a6d1f7d 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -71,6 +71,7 @@ Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, DisableAutoGenTag: true, + GroupID: cmdGroupDefault, RunE: func(cmd *cobra.Command, args []string) error { return runMount(cmd.Context(), mountOptions, globalOptions, args) }, diff --git a/cmd/restic/cmd_options.go b/cmd/restic/cmd_options.go index 4cd574b6811..9c07b262674 100644 --- a/cmd/restic/cmd_options.go +++ b/cmd/restic/cmd_options.go @@ -20,7 +20,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. `, - Hidden: true, + GroupID: cmdGroupAdvanced, DisableAutoGenTag: true, Run: func(_ *cobra.Command, _ []string) { fmt.Printf("All Extended Options:\n") diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index e19c2e04b16..e8473bd6f2f 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -34,6 +34,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { term, cancel := setupTermstatus() diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index 0ff6e2d66ee..a6ef59cc205 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -28,6 +28,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return runRecover(cmd.Context(), globalOptions) diff --git a/cmd/restic/cmd_repair.go b/cmd/restic/cmd_repair.go index aefe02f3c46..65a903a49c2 100644 --- a/cmd/restic/cmd_repair.go +++ b/cmd/restic/cmd_repair.go @@ -5,8 +5,9 @@ import ( ) var cmdRepair = &cobra.Command{ - Use: "repair", - Short: "Repair the repository", + Use: "repair", + Short: "Repair the repository", + GroupID: cmdGroupDefault, } func init() { diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index eb437a11de7..c58b0b80df2 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -38,6 +38,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index d1088d00bd0..7788016b78e 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -44,6 +44,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runRewrite(cmd.Context(), rewriteOptions, globalOptions, args) diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index 442c5737519..42677918fcf 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -29,6 +29,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runSnapshots(cmd.Context(), snapshotOptions, globalOptions, args) diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index d2641178325..c4438c19272 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -55,6 +55,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runStats(cmd.Context(), statsOptions, globalOptions, args) diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index 47e3c02ad58..c7bf725e92d 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -31,6 +31,7 @@ Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runTag(cmd.Context(), tagOptions, globalOptions, args) diff --git a/cmd/restic/cmd_unlock.go b/cmd/restic/cmd_unlock.go index 96eef7e02db..d87cde065e6 100644 --- a/cmd/restic/cmd_unlock.go +++ b/cmd/restic/cmd_unlock.go @@ -19,6 +19,7 @@ EXIT STATUS Exit status is 0 if the command was successful. Exit status is 1 if there was any error. `, + GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return runUnlock(cmd.Context(), unlockOptions, globalOptions) diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 6661b4f5dc9..26e45bb3833 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -83,6 +83,22 @@ The full documentation can be found at https://restic.readthedocs.io/ . }, } +var cmdGroupDefault = "default" +var cmdGroupAdvanced = "advanced" + +func init() { + cmdRoot.AddGroup( + &cobra.Group{ + ID: cmdGroupDefault, + Title: "Available Commands:", + }, + &cobra.Group{ + ID: cmdGroupAdvanced, + Title: "Advanced Options:", + }, + ) +} + // Distinguish commands that need the password from those that work without, // so we don't run $RESTIC_PASSWORD_COMMAND for no reason (it might prompt the // user for authentication). From 424740f62c61ce0d20379aa8cf34e2cd14624541 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Aug 2024 23:49:20 +0200 Subject: [PATCH 608/893] Add missing DisableAutoGenTag flag for commands --- cmd/restic/cmd_copy.go | 1 + cmd/restic/cmd_debug.go | 1 + cmd/restic/cmd_key.go | 1 + cmd/restic/cmd_repair.go | 1 + 4 files changed, 4 insertions(+) diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index cfe574d3527..cd92193ac38 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -41,6 +41,7 @@ Exit status is 11 if the repository is already locked. Exit status is 12 if the password is incorrect. `, GroupID: cmdGroupDefault, + DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return runCopy(cmd.Context(), copyOptions, globalOptions, args) }, diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index 2a48762d106..b9219249220 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -32,6 +32,7 @@ var cmdDebug = &cobra.Command{ Use: "debug", Short: "Debug commands", GroupID: cmdGroupDefault, + DisableAutoGenTag: true, } var cmdDebugDump = &cobra.Command{ diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go index 80e892f20c6..a94caa0d843 100644 --- a/cmd/restic/cmd_key.go +++ b/cmd/restic/cmd_key.go @@ -11,6 +11,7 @@ var cmdKey = &cobra.Command{ The "key" command allows you to set multiple access keys or passwords per repository. `, + DisableAutoGenTag: true, GroupID: cmdGroupDefault, } diff --git a/cmd/restic/cmd_repair.go b/cmd/restic/cmd_repair.go index 65a903a49c2..6a1a1f9dc34 100644 --- a/cmd/restic/cmd_repair.go +++ b/cmd/restic/cmd_repair.go @@ -8,6 +8,7 @@ var cmdRepair = &cobra.Command{ Use: "repair", Short: "Repair the repository", GroupID: cmdGroupDefault, + DisableAutoGenTag: true, } func init() { From a99b82450811046944038fef0b10b27d0f68aede Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Aug 2024 23:52:21 +0200 Subject: [PATCH 609/893] update docs --- doc/manual_rest.rst | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst index a7a0f96e0c3..031f4fc528c 100644 --- a/doc/manual_rest.rst +++ b/doc/manual_rest.rst @@ -28,8 +28,6 @@ Usage help is available: dump Print a backed-up file to stdout find Find a file, a directory or restic IDs forget Remove snapshots from the repository - generate Generate manual pages and auto-completion files (bash, fish, zsh, powershell) - help Help about any command init Initialize a new repository key Manage keys (passwords) list List objects in the repository @@ -41,11 +39,19 @@ Usage help is available: repair Repair the repository restore Extract the data from a snapshot rewrite Rewrite snapshots to exclude unwanted files - self-update Update the restic binary snapshots List all snapshots stats Scan the repository and show basic statistics tag Modify tags on snapshots unlock Remove locks other processes created + + Advanced Options: + features Print list of feature flags + options Print list of extended options + + Additional Commands: + generate Generate manual pages and auto-completion files (bash, fish, zsh, powershell) + help Help about any command + self-update Update the restic binary version Print version information Flags: From 8206cd19c8b7cb97e5b4cce68c05e6890c069921 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 22 Aug 2024 23:16:12 +0200 Subject: [PATCH 610/893] backend/retry: don't trip circuit breaker if context is canceled When the context used for a load operation is canceled, then the result is always an error independent of whether the file could be retrieved from the backend. Do not false positively trip the circuit breaker in this case. The old behavior was problematic when trying to lock a repository. When `Lock.checkForOtherLocks` listed multiple lock files in parallel and one of them fails to load, then all other loads were canceled. This cancelation was remembered by the circuit breaker, such that locking retries would fail. --- changelog/unreleased/pull-5011 | 10 ++++++++ internal/backend/retry/backend_retry.go | 5 ++-- internal/backend/retry/backend_retry_test.go | 24 ++++++++++++++++++++ 3 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 changelog/unreleased/pull-5011 diff --git a/changelog/unreleased/pull-5011 b/changelog/unreleased/pull-5011 new file mode 100644 index 00000000000..8bd5ef5323a --- /dev/null +++ b/changelog/unreleased/pull-5011 @@ -0,0 +1,10 @@ +Bugfix: Fix rare failures to retry locking a repository + +Restic 0.17.0 could in rare cases fail to retry locking a repository if +one of the lock files failed to load. The lock operation failed with error +`unable to create lock in backend: circuit breaker open for file ` + +The error handling has been fixed to correctly retry locking the repository. + +https://github.com/restic/restic/issues/5005 +https://github.com/restic/restic/pull/5011 diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index 8d0f42bfdc3..92c285c4b36 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -209,9 +209,10 @@ func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offse return be.Backend.Load(ctx, h, length, offset, consumer) }) - if feature.Flag.Enabled(feature.BackendErrorRedesign) && err != nil && !be.IsPermanentError(err) { + if feature.Flag.Enabled(feature.BackendErrorRedesign) && err != nil && ctx.Err() == nil && !be.IsPermanentError(err) { // We've exhausted the retries, the file is likely inaccessible. By excluding permanent - // errors, not found or truncated files are not recorded. + // errors, not found or truncated files are not recorded. Also ignore errors if the context + // was canceled. be.failedLoads.LoadOrStore(key, time.Now()) } diff --git a/internal/backend/retry/backend_retry_test.go b/internal/backend/retry/backend_retry_test.go index fd76200d406..ffb8ae18699 100644 --- a/internal/backend/retry/backend_retry_test.go +++ b/internal/backend/retry/backend_retry_test.go @@ -357,6 +357,30 @@ func TestBackendLoadCircuitBreaker(t *testing.T) { test.Equals(t, notFound, err, "expected circuit breaker to reset, got %v") } +func TestBackendLoadCircuitBreakerCancel(t *testing.T) { + cctx, cancel := context.WithCancel(context.Background()) + be := mock.NewBackend() + be.OpenReaderFn = func(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { + cancel() + return nil, errors.New("something") + } + nilRd := func(rd io.Reader) (err error) { + return nil + } + + TestFastRetries(t) + retryBackend := New(be, 2, nil, nil) + // canceling the context should not trip the circuit breaker + err := retryBackend.Load(cctx, backend.Handle{Name: "other"}, 0, 0, nilRd) + test.Equals(t, context.Canceled, err, "unexpected error") + + // reset context and check that the cirucit breaker does not return an error + cctx, cancel = context.WithCancel(context.Background()) + defer cancel() + err = retryBackend.Load(cctx, backend.Handle{Name: "other"}, 0, 0, nilRd) + test.Equals(t, context.Canceled, err, "unexpected error") +} + func TestBackendStatNotExists(t *testing.T) { // stat should not retry if the error matches IsNotExist notFound := errors.New("not found") From 6eece31dc3ba736f061b162de1498503b65265ff Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Aug 2024 23:24:43 +0200 Subject: [PATCH 611/893] lock: introduce short delay between failed locking retries Failed locking attempts were immediately retried up to three times without any delay between the retries. If a lock file is not found while checking for other locks, with the reworked backend retries there is no delay between those retries. This is a problem if a backend requires a few seconds to reflect file deletions in the file listings. To work around this problem, introduce a short exponentially increasing delay between the retries. The number of retries is now increased to 4. This results in delays of 5, 10 and 20 seconds between the retries. --- .../unreleased/{pull-5011 => issue-5005} | 3 +++ internal/restic/lock.go | 27 ++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) rename changelog/unreleased/{pull-5011 => issue-5005} (76%) diff --git a/changelog/unreleased/pull-5011 b/changelog/unreleased/issue-5005 similarity index 76% rename from changelog/unreleased/pull-5011 rename to changelog/unreleased/issue-5005 index 8bd5ef5323a..90c164b07ea 100644 --- a/changelog/unreleased/pull-5011 +++ b/changelog/unreleased/issue-5005 @@ -5,6 +5,9 @@ one of the lock files failed to load. The lock operation failed with error `unable to create lock in backend: circuit breaker open for file ` The error handling has been fixed to correctly retry locking the repository. +In addition, restic now waits a few seconds between locking retries to +increase chances of success. https://github.com/restic/restic/issues/5005 https://github.com/restic/restic/pull/5011 +https://github.com/restic/restic/pull/5012 diff --git a/internal/restic/lock.go b/internal/restic/lock.go index 49c7cedf22d..969d0593db1 100644 --- a/internal/restic/lock.go +++ b/internal/restic/lock.go @@ -103,10 +103,14 @@ func NewExclusiveLock(ctx context.Context, repo Unpacked) (*Lock, error) { var waitBeforeLockCheck = 200 * time.Millisecond +// delay increases by factor 2 on each retry +var initialWaitBetweenLockRetries = 5 * time.Second + // TestSetLockTimeout can be used to reduce the lock wait timeout for tests. func TestSetLockTimeout(t testing.TB, d time.Duration) { t.Logf("setting lock timeout to %v", d) waitBeforeLockCheck = d + initialWaitBetweenLockRetries = d } func newLock(ctx context.Context, repo Unpacked, excl bool) (*Lock, error) { @@ -170,8 +174,17 @@ func (l *Lock) checkForOtherLocks(ctx context.Context) error { if l.lockID != nil { checkedIDs.Insert(*l.lockID) } + delay := initialWaitBetweenLockRetries // retry locking a few times - for i := 0; i < 3; i++ { + for i := 0; i < 4; i++ { + if i != 0 { + // sleep between retries to give backend some time to settle + if err := cancelableDelay(ctx, delay); err != nil { + return err + } + delay *= 2 + } + // Store updates in new IDSet to prevent data races var m sync.Mutex newCheckedIDs := NewIDSet(checkedIDs.List()...) @@ -213,6 +226,18 @@ func (l *Lock) checkForOtherLocks(ctx context.Context) error { return err } +func cancelableDelay(ctx context.Context, delay time.Duration) error { + // delay next try a bit + timer := time.NewTimer(delay) + select { + case <-ctx.Done(): + timer.Stop() + return ctx.Err() + case <-timer.C: + } + return nil +} + // createLock acquires the lock by creating a file in the repository. func (l *Lock) createLock(ctx context.Context) (ID, error) { id, err := SaveJSONUnpacked(ctx, l.repo, LockFile, l) From 64d628bd75062da7ccdef8d58e53152cda1763c7 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 25 Aug 2024 21:52:34 +0200 Subject: [PATCH 612/893] make timeout for slow requests configurable --- changelog/unreleased/issue-4970 | 13 +++++++++++++ cmd/restic/global.go | 1 + doc/faq.rst | 14 ++++++++++++++ internal/backend/http_transport.go | 9 ++++++++- 4 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/issue-4970 diff --git a/changelog/unreleased/issue-4970 b/changelog/unreleased/issue-4970 new file mode 100644 index 00000000000..524e91b7566 --- /dev/null +++ b/changelog/unreleased/issue-4970 @@ -0,0 +1,13 @@ +Enhancement: Make timeout for stuck requests customizable + +Restic monitors connections to the backend to detect stuck requests. If a request +does not return any data within five minutes, restic assumes the request is stuck and +retries it. However, for large repositories it sometimes takes longer than that to +collect a list of all files, causing the following error: + +`List(data) returned error, retrying after 1s: [...]: request timeout` + +It is now possible to increase the timeout using the `--stuck-request-timeout` option. + +https://github.com/restic/restic/issues/4970 +https://github.com/restic/restic/pull/5014 diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 22aa8a2904e..375b57f98bd 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -140,6 +140,7 @@ func init() { f.UintVar(&globalOptions.PackSize, "pack-size", 0, "set target pack `size` in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE)") f.StringSliceVarP(&globalOptions.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)") f.StringVar(&globalOptions.HTTPUserAgent, "http-user-agent", "", "set a http user agent for outgoing http requests") + f.DurationVar(&globalOptions.StuckRequestTimeout, "stuck-request-timeout", 5*time.Minute, "`duration` after which to retry stuck requests") // Use our "generate" command instead of the cobra provided "completion" command cmdRoot.CompletionOptions.DisableDefaultCmd = true diff --git a/doc/faq.rst b/doc/faq.rst index b26398f8c8b..74dd77d7143 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -228,3 +228,17 @@ Restic backup command fails to find a valid file in Windows If the name of a file in Windows contains an invalid character, Restic will not be able to read the file. To solve this issue, consider renaming the particular file. + +What can I do in case of "request timeout" errors? +-------------------------------------------------- + +Restic monitors connections to the backend to detect stuck requests. If a request +does not return any data within five minutes, restic assumes the request is stuck and +retries it. However, for large repositories it sometimes takes longer than that to +collect a list of all files, causing the following error: + +:: + + List(data) returned error, retrying after 1s: [...]: request timeout + +In this case you can increase the timeout using the ``--stuck-request-timeout`` option. diff --git a/internal/backend/http_transport.go b/internal/backend/http_transport.go index 5162d35715e..5a3856e412e 100644 --- a/internal/backend/http_transport.go +++ b/internal/backend/http_transport.go @@ -31,6 +31,9 @@ type TransportOptions struct { // Specify Custom User-Agent for the http Client HTTPUserAgent string + + // Timeout after which to retry stuck requests + StuckRequestTimeout time.Duration } // readPEMCertKey reads a file and returns the PEM encoded certificate and key @@ -143,7 +146,11 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { } if feature.Flag.Enabled(feature.BackendErrorRedesign) { - rt = newWatchdogRoundtripper(rt, 5*time.Minute, 128*1024) + if opts.StuckRequestTimeout == 0 { + opts.StuckRequestTimeout = 5 * time.Minute + } + + rt = newWatchdogRoundtripper(rt, opts.StuckRequestTimeout, 128*1024) } // wrap in the debug round tripper (if active) From 6b4f16f77bf70684c17912426f8a3fa2ce5f8ecb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 25 Aug 2024 23:52:33 +0200 Subject: [PATCH 613/893] doc/backup: link to exit code for scripting section --- doc/040_backup.rst | 7 ++++--- doc/075_scripting.rst | 2 ++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index b53ae8d095d..4c9a44c748f 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -778,11 +778,12 @@ environment variables and configuration files; see their respective manuals. Exit status codes ***************** -Restic returns one of the following exit status codes after the backup command is run: +Restic returns an exit status code after the backup command is run: * 0 when the backup was successful (snapshot with all source files created) * 1 when there was a fatal error (no snapshot created) * 3 when some source files could not be read (incomplete snapshot with remaining files created) +* further exit codes are documented in :ref:`exit-codes`. Fatal errors occur for example when restic is unable to write to the backup destination, when there are network connectivity issues preventing successful communication, or when an invalid @@ -795,5 +796,5 @@ file read errors that occurred while running the backup. If there are errors of restic will still try to complete the backup run with all the other files, and create a snapshot that then contains all but the unreadable files. -One can use these exit status codes in scripts and other automation tools, to make them aware of -the outcome of the backup run. To manually inspect the exit code in e.g. Linux, run ``echo $?``. +For use of these exit status codes in scripts and other automation tools, see :ref:`exit-codes`. +To manually inspect the exit code in e.g. Linux, run ``echo $?``. diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 438eaa84fbd..9fa0da6d049 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -39,6 +39,8 @@ Note that restic will also return exit code ``1`` if a different error is encoun If there are no errors, restic will return a zero exit code and print the repository metadata. +.. _exit-codes: + Exit codes ********** From 5980daea64e3c1111957a819ed833390add58148 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 25 Aug 2024 23:53:12 +0200 Subject: [PATCH 614/893] doc/backup: move exit status codes section up --- doc/040_backup.rst | 48 +++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 4c9a44c748f..696b235cce6 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -686,6 +686,30 @@ created as it would only be written at the very (successful) end of the backup operation. Previous snapshots will still be there and will still work. +Exit status codes +***************** + +Restic returns an exit status code after the backup command is run: + +* 0 when the backup was successful (snapshot with all source files created) +* 1 when there was a fatal error (no snapshot created) +* 3 when some source files could not be read (incomplete snapshot with remaining files created) +* further exit codes are documented in :ref:`exit-codes`. + +Fatal errors occur for example when restic is unable to write to the backup destination, when +there are network connectivity issues preventing successful communication, or when an invalid +password or command line argument is provided. When restic returns this exit status code, one +should not expect a snapshot to have been created. + +Source file read errors occur when restic fails to read one or more files or directories that +it was asked to back up, e.g. due to permission problems. Restic displays the number of source +file read errors that occurred while running the backup. If there are errors of this type, +restic will still try to complete the backup run with all the other files, and create a +snapshot that then contains all but the unreadable files. + +For use of these exit status codes in scripts and other automation tools, see :ref:`exit-codes`. +To manually inspect the exit code in e.g. Linux, run ``echo $?``. + Environment Variables ********************* @@ -774,27 +798,3 @@ See :ref:`caching` for the rules concerning cache locations when The external programs that restic may execute include ``rclone`` (for rclone backends) and ``ssh`` (for the SFTP backend). These may respond to further environment variables and configuration files; see their respective manuals. - -Exit status codes -***************** - -Restic returns an exit status code after the backup command is run: - -* 0 when the backup was successful (snapshot with all source files created) -* 1 when there was a fatal error (no snapshot created) -* 3 when some source files could not be read (incomplete snapshot with remaining files created) -* further exit codes are documented in :ref:`exit-codes`. - -Fatal errors occur for example when restic is unable to write to the backup destination, when -there are network connectivity issues preventing successful communication, or when an invalid -password or command line argument is provided. When restic returns this exit status code, one -should not expect a snapshot to have been created. - -Source file read errors occur when restic fails to read one or more files or directories that -it was asked to back up, e.g. due to permission problems. Restic displays the number of source -file read errors that occurred while running the backup. If there are errors of this type, -restic will still try to complete the backup run with all the other files, and create a -snapshot that then contains all but the unreadable files. - -For use of these exit status codes in scripts and other automation tools, see :ref:`exit-codes`. -To manually inspect the exit code in e.g. Linux, run ``echo $?``. From f451001f75bb3f7d946cc14962026df1d25f9d16 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 12:17:43 +0200 Subject: [PATCH 615/893] doc: use regional urls for Amazon S3 and add generic s3 provider section Split description for non-Amazon S3 providers into separate section. The section now also includes the `s3.bucket-lookup` extended option. Switch to using regional URLs for Amazon S3 to replace the need for setting the region. --- doc/030_preparing_a_new_repo.rst | 64 ++++++++++++++++++-------------- 1 file changed, 36 insertions(+), 28 deletions(-) diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 87975f9fad2..462a66d75a9 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -249,28 +249,22 @@ while creating the bucket. $ export AWS_ACCESS_KEY_ID= $ export AWS_SECRET_ACCESS_KEY= +When using temporary credentials make sure to include the session token via +the environment variable ``AWS_SESSION_TOKEN``. + You can then easily initialize a repository that uses your Amazon S3 as -a backend. If the bucket does not exist it will be created in the -default location: +a backend. Make sure to use the endpoint for the correct region. The example +uses ``us-east-1``. If the bucket does not exist it will be created in that region: .. code-block:: console - $ restic -r s3:s3.amazonaws.com/bucket_name init + $ restic -r s3:s3.us-east-1.amazonaws.com/bucket_name init enter password for new repository: enter password again: - created restic repository eefee03bbd at s3:s3.amazonaws.com/bucket_name + created restic repository eefee03bbd at s3:s3.us-east-1.amazonaws.com/bucket_name Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. -If needed, you can manually specify the region to use by either setting the -environment variable ``AWS_DEFAULT_REGION`` or calling restic with an option -parameter like ``-o s3.region="us-east-1"``. If the region is not specified, -the default region is used. Afterwards, the S3 server (at least for AWS, -``s3.amazonaws.com``) will redirect restic to the correct endpoint. - -When using temporary credentials make sure to include the session token via -then environment variable ``AWS_SESSION_TOKEN``. - Until version 0.8.0, restic used a default prefix of ``restic``, so the files in the bucket were placed in a directory named ``restic``. If you want to access a repository created with an older version of restic, specify the path @@ -278,25 +272,14 @@ after the bucket name like this: .. code-block:: console - $ restic -r s3:s3.amazonaws.com/bucket_name/restic [...] + $ restic -r s3:s3.us-east-1.amazonaws.com/bucket_name/restic [...] -For an S3-compatible server that is not Amazon (like Minio, see below), -or is only available via HTTP, you can specify the URL to the server -like this: ``s3:http://server:port/bucket_name``. - .. note:: restic expects `path-style URLs `__ - like for example ``s3.us-west-2.amazonaws.com/bucket_name``. + like for example ``s3.us-west-2.amazonaws.com/bucket_name`` for Amazon S3. Virtual-hosted–style URLs like ``bucket_name.s3.us-west-2.amazonaws.com``, where the bucket name is part of the hostname are not supported. These must be converted to path-style URLs instead, for example ``s3.us-west-2.amazonaws.com/bucket_name``. - -.. note:: Certain S3-compatible servers do not properly implement the - ``ListObjectsV2`` API, most notably Ceph versions before v14.2.5. On these - backends, as a temporary workaround, you can provide the - ``-o s3.list-objects-v1=true`` option to use the older - ``ListObjects`` API instead. This option may be removed in future - versions of restic. - + See below for configuration options for S3-compatible storage from other providers. Minio Server ************ @@ -321,13 +304,38 @@ this command. .. code-block:: console - $ ./restic -r s3:http://localhost:9000/restic init + $ restic -r s3:http://localhost:9000/restic init enter password for new repository: enter password again: created restic repository 6ad29560f5 at s3:http://localhost:9000/restic1 Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. +S3-compatible Storage +********************* + +For an S3-compatible server that is not Amazon, you can specify the URL to the server +like this: ``s3:https://server:port/bucket_name``. + +If needed, you can manually specify the region to use by either setting the +environment variable ``AWS_DEFAULT_REGION`` or calling restic with an option +parameter like ``-o s3.region="us-east-1"``. If the region is not specified, +the default region ``us-east-1`` is used. + +To select between path-style and virtual-hosted access, the extended option +``-o s3.bucket-lookup=auto`` can be used. It supports the following values: + +- ``auto``: Default behavior. Uses ``dns`` for Amazon and Google endpoints. Uses + ``path`` for all other endpoints +- ``dns``: Use virtual-hosted-style bucket access +- ``path``: Use path-style bucket access + +Certain S3-compatible servers do not properly implement the ``ListObjectsV2`` API, +most notably Ceph versions before v14.2.5. On these backends, as a temporary +workaround, you can provide the ``-o s3.list-objects-v1=true`` option to use the +older ``ListObjects`` API instead. This option may be removed in future versions +of restic. + Wasabi ************ From 262e85c37fa68d3af021d973a0b1569c304cf9ec Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 12:21:13 +0200 Subject: [PATCH 616/893] doc: shrink wasabi / alibaba cloud example Remove descriptions for both providers and shorten the example to the minimum. --- doc/030_preparing_a_new_repo.rst | 60 ++++++-------------------------- 1 file changed, 10 insertions(+), 50 deletions(-) diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 462a66d75a9..a169f34ccf2 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -337,73 +337,33 @@ older ``ListObjects`` API instead. This option may be removed in future versions of restic. Wasabi -************ - -`Wasabi `__ is a low cost Amazon S3 conformant object storage provider. -Due to its S3 conformance, Wasabi can be used as a storage provider for a restic repository. +****** -- Create a Wasabi bucket using the `Wasabi Console `__. -- Determine the correct Wasabi service URL for your bucket `here `__. +S3 storage from `Wasabi `__ can be used as follows. -You must first setup the following environment variables with the -credentials of your Wasabi account. +- Determine the correct Wasabi service URL for your bucket `here `__. +- Set environment variables with the necessary account credentials .. code-block:: console $ export AWS_ACCESS_KEY_ID= $ export AWS_SECRET_ACCESS_KEY= - -Now you can easily initialize restic to use Wasabi as a backend with -this command. - -.. code-block:: console - - $ ./restic -r s3:https:/// init - enter password for new repository: - enter password again: - created restic repository xxxxxxxxxx at s3:https:/// - Please note that knowledge of your password is required to access - the repository. Losing your password means that your data is irrecoverably lost. + $ restic -r s3:https:/// init Alibaba Cloud (Aliyun) Object Storage System (OSS) ************************************************** -`Alibaba OSS `__ is an -encrypted, secure, cost-effective, and easy-to-use object storage -service that enables you to store, back up, and archive large amounts -of data in the cloud. - -Alibaba OSS is S3 compatible so it can be used as a storage provider -for a restic repository with a couple of extra parameters. +S3 storage from `Alibaba OSS `__ can be used as follows. -- Determine the correct `Alibaba OSS region endpoint `__ - this will be something like ``oss-eu-west-1.aliyuncs.com`` -- You'll need the region name too - this will be something like ``oss-eu-west-1`` - -You must first setup the following environment variables with the -credentials of your Alibaba OSS account. +- Determine the correct `Alibaba OSS region endpoint `__ - this will be something like ``oss-eu-west-1.aliyuncs.com`` +- You will need the region name too - this will be something like ``oss-eu-west-1`` +- Set environment variables with the necessary account credentials .. code-block:: console $ export AWS_ACCESS_KEY_ID= $ export AWS_SECRET_ACCESS_KEY= - -Now you can easily initialize restic to use Alibaba OSS as a backend with -this command. - -.. code-block:: console - - $ ./restic -o s3.bucket-lookup=dns -o s3.region= -r s3:https:/// init - enter password for new backend: - enter password again: - created restic backend xxxxxxxxxx at s3:https:/// - Please note that knowledge of your password is required to access - the repository. Losing your password means that your data is irrecoverably lost. - -For example with an actual endpoint: - -.. code-block:: console - - $ restic -o s3.bucket-lookup=dns -o s3.region=oss-eu-west-1 -r s3:https://oss-eu-west-1.aliyuncs.com/bucketname init + $ restic -o s3.bucket-lookup=dns -o s3.region= -r s3:https:/// init OpenStack Swift *************** From 97eb81564a68aeaca746c36929a9ec82dcb95cf6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 12:24:02 +0200 Subject: [PATCH 617/893] doc: fix typos --- doc/030_preparing_a_new_repo.rst | 2 +- doc/manual_rest.rst | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index a169f34ccf2..fd5b3112748 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -307,7 +307,7 @@ this command. $ restic -r s3:http://localhost:9000/restic init enter password for new repository: enter password again: - created restic repository 6ad29560f5 at s3:http://localhost:9000/restic1 + created restic repository 6ad29560f5 at s3:http://localhost:9000/restic Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst index 031f4fc528c..d1e5817f324 100644 --- a/doc/manual_rest.rst +++ b/doc/manual_rest.rst @@ -8,7 +8,7 @@ Usage help is available: .. code-block:: console - $ ./restic --help + $ restic --help restic is a backup program which allows saving multiple revisions of files and directories in an encrypted repository stored on different backends. @@ -91,7 +91,7 @@ command: .. code-block:: console - $ ./restic backup --help + $ restic backup --help The "backup" command creates a new snapshot and saves the files and directories given as the arguments. From a45d21e2b9c72a35af96b046e46183f548945ef8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 14:38:32 +0200 Subject: [PATCH 618/893] doc: describe how to handle rewrite encoding error --- doc/045_working_with_repos.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 8dba8439f1d..f31e75c8427 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -305,6 +305,13 @@ In order to preview the changes which ``rewrite`` would make, you can use the modifying the repository. Instead restic will only print the actions it would perform. +.. note:: The ``rewrite`` command verifies that it does not modify snapshots in + unexpected ways and fails with an ``cannot encode tree at "[...]" without loosing information`` + error otherwise. This can occur when rewriting a snapshot created by a newer + version of restic or some third-party implementation. + + To convert a snapshot into the format expected by the ``rewrite`` command + use ``restic repair snapshots ``. Modifying metadata of snapshots =============================== From 71e8068d863b299a96c5af02eb0403c011260d32 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 14:38:45 +0200 Subject: [PATCH 619/893] doc: mark S3 layout as deprecated --- doc/design.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/doc/design.rst b/doc/design.rst index 7fb8b71b2ea..26f1f333fcc 100644 --- a/doc/design.rst +++ b/doc/design.rst @@ -126,8 +126,8 @@ the option ``-o local.layout=default``, valid values are ``default`` and ``s3legacy``. The option for the sftp backend is named ``sftp.layout``, for the s3 backend ``s3.layout``. -S3 Legacy Layout ----------------- +S3 Legacy Layout (deprecated) +----------------------------- Unfortunately during development the Amazon S3 backend uses slightly different paths (directory names use singular instead of plural for ``key``, @@ -152,8 +152,7 @@ the ``data`` directory. The S3 Legacy repository layout looks like this: /snapshot └── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec -The S3 backend understands and accepts both forms, new backends are -always created with the default layout for compatibility reasons. +Restic 0.17 is the last version that supports the legacy layout. Pack Format =========== From 7ea558db999d401fd27b12f1285c64e4e788c940 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 14:40:04 +0200 Subject: [PATCH 620/893] doc: JSON encoder must be deterministic --- doc/design.rst | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/doc/design.rst b/doc/design.rst index 26f1f333fcc..d83ac8b91b2 100644 --- a/doc/design.rst +++ b/doc/design.rst @@ -233,7 +233,9 @@ Individual files for the index, locks or snapshots are encrypted and authenticated like Data and Tree Blobs, so the outer structure is ``IV || Ciphertext || MAC`` again. In repository format version 1 the plaintext always consists of a JSON document which must either be an -object or an array. +object or an array. The JSON encoder must deterministically encode the +document and should match the behavior of the Go standard library implementation +in ``encoding/json``. Repository format version 2 adds support for compression. The plaintext now starts with a header to indicate the encoding version to distinguish @@ -472,6 +474,10 @@ A snapshot references a tree by the SHA-256 hash of the JSON string representation of its contents. Trees and data are saved in pack files in a subdirectory of the directory ``data``. +The JSON encoder must deterministically encode the document and should +match the behavior of the Go standard library implementation in ``encoding/json``. +This ensures that trees can be properly deduplicated. + The command ``restic cat blob`` can be used to inspect the tree referenced above (piping the output of the command to ``jq .`` so that the JSON is indented): From 55ff4e046e029d7150295668d8d3ee139f13bf57 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 14:40:38 +0200 Subject: [PATCH 621/893] doc: full tree blob data structure is in the code --- doc/design.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/doc/design.rst b/doc/design.rst index d83ac8b91b2..c974e997a44 100644 --- a/doc/design.rst +++ b/doc/design.rst @@ -512,12 +512,11 @@ this metadata is generated: - The name is quoted using `strconv.Quote `__ before being saved. This handles non-unicode names, but also changes the representation of names containing ``"`` or ``\``. - - The filemode saved is the mode defined by `fs.FileMode `__ masked by ``os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky`` - -When the entry references a directory, the field ``subtree`` contains the plain text -ID of another tree object. +- When the entry references a directory, the field ``subtree`` contains the plain text + ID of another tree object. +- Check the implementation for a full struct definition. When the command ``restic cat blob`` is used, the plaintext ID is needed to print a tree. The tree referenced above can be dumped as follows: From 8828c76f92e29af09043015a41e547a74bef3b8d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 15:32:43 +0200 Subject: [PATCH 622/893] rest: improve handling of HTTP2 goaway The HTTP client can only retry HTTP2 requests after receiving a GOAWAY response if it can rewind the body. As we use a custom data type, explicitly provide an implementation of `GetBody`. --- changelog/unreleased/pull-5018 | 13 +++++++++++++ internal/backend/rest/rest.go | 6 ++++++ 2 files changed, 19 insertions(+) create mode 100644 changelog/unreleased/pull-5018 diff --git a/changelog/unreleased/pull-5018 b/changelog/unreleased/pull-5018 new file mode 100644 index 00000000000..1b7b9f428f0 --- /dev/null +++ b/changelog/unreleased/pull-5018 @@ -0,0 +1,13 @@ +Bugfix: Improve HTTP2 support for rest backend + +If rest-server tried to gracefully shut down an HTTP2 connection still used by the client, +this could result in the following error. + +``` +http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error +``` + +This has been fixed. + +https://github.com/restic/restic/pull/5018 +https://forum.restic.net/t/receiving-http2-goaway-messages-with-windows-restic-v0-17-0/8367 diff --git a/internal/backend/rest/rest.go b/internal/backend/rest/rest.go index 1af88ec3f8f..d0a08175b57 100644 --- a/internal/backend/rest/rest.go +++ b/internal/backend/rest/rest.go @@ -143,6 +143,12 @@ func (b *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindR if err != nil { return errors.WithStack(err) } + req.GetBody = func() (io.ReadCloser, error) { + if err := rd.Rewind(); err != nil { + return nil, err + } + return io.NopCloser(rd), nil + } req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Accept", ContentTypeV2) From 3e4c1ea1966824bf5e0994beb412c56eb1bb3d5e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 19:31:21 +0200 Subject: [PATCH 623/893] fs: fix race condition in get/set security descriptor Calling `Load()` twice for an atomic variable can return different values each time. This resulted in trying to read the security descriptor with high privileges, but then not entering the code path to switch to low privileges when another thread has already done so concurrently. --- internal/fs/sd_windows.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index 0a73cbe53c6..bccf74992f4 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -48,13 +48,15 @@ func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err var sd *windows.SECURITY_DESCRIPTOR - if lowerPrivileges.Load() { + // store original value to avoid unrelated changes in the error check + useLowerPrivileges := lowerPrivileges.Load() + if useLowerPrivileges { sd, err = getNamedSecurityInfoLow(filePath) } else { sd, err = getNamedSecurityInfoHigh(filePath) } if err != nil { - if !lowerPrivileges.Load() && isHandlePrivilegeNotHeldError(err) { + if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. lowerPrivileges.Store(true) sd, err = getNamedSecurityInfoLow(filePath) @@ -109,14 +111,16 @@ func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { sacl = nil } - if lowerPrivileges.Load() { + // store original value to avoid unrelated changes in the error check + useLowerPrivileges := lowerPrivileges.Load() + if useLowerPrivileges { err = setNamedSecurityInfoLow(filePath, dacl) } else { err = setNamedSecurityInfoHigh(filePath, owner, group, dacl, sacl) } if err != nil { - if !lowerPrivileges.Load() && isHandlePrivilegeNotHeldError(err) { + if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. lowerPrivileges.Store(true) err = setNamedSecurityInfoLow(filePath, dacl) From ac5bc7c2f97eae7ebfe3526f1b1b0e4ebfcb82f4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 19:36:43 +0200 Subject: [PATCH 624/893] fs: fix error handling for retried get/set of security descriptor The retry code path did not filter `ERROR_NOT_SUPPORTED`. Just call the original function a second time to correctly follow the low privilege code path. --- internal/fs/sd_windows.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index bccf74992f4..0004f1809f8 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -59,10 +59,7 @@ func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. lowerPrivileges.Store(true) - sd, err = getNamedSecurityInfoLow(filePath) - if err != nil { - return nil, fmt.Errorf("get low-level named security info failed with: %w", err) - } + return GetSecurityDescriptor(filePath) } else if errors.Is(err, windows.ERROR_NOT_SUPPORTED) { return nil, nil } else { @@ -123,10 +120,7 @@ func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. lowerPrivileges.Store(true) - err = setNamedSecurityInfoLow(filePath, dacl) - if err != nil { - return fmt.Errorf("set low-level named security info failed with: %w", err) - } + return SetSecurityDescriptor(filePath, securityDescriptor) } else { return fmt.Errorf("set named security info failed with: %w", err) } From a12a6edfd1b1826f8787be00a094c6399ec2130f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 19:43:18 +0200 Subject: [PATCH 625/893] add changelog for security descriptor race condition --- changelog/unreleased/issue-5004 | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 changelog/unreleased/issue-5004 diff --git a/changelog/unreleased/issue-5004 b/changelog/unreleased/issue-5004 new file mode 100644 index 00000000000..529b65464f9 --- /dev/null +++ b/changelog/unreleased/issue-5004 @@ -0,0 +1,12 @@ +Bugfix: Fix spurious "A Required Privilege Is Not Held by the Client" error + +On Windows, creating a backup could sometimes print the following error + +``` +error: nodeFromFileInfo [...]: get named security info failed with: a required privilege is not held by the client. +``` + +This has been fixed. + +https://github.com/restic/restic/issues/5004 +https://github.com/restic/restic/pull/5019 From 1f4c9d280629f2afde9c6a294590f73962b37e83 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 29 Aug 2024 16:32:15 +0200 Subject: [PATCH 626/893] cache: correctly ignore files whose filename is no ID this can for example be the case for temporary files created by the backend implementation. --- internal/backend/cache/backend.go | 5 ++--- internal/backend/cache/backend_test.go | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/internal/backend/cache/backend.go b/internal/backend/cache/backend.go index 58b03dd3862..3754266ba96 100644 --- a/internal/backend/cache/backend.go +++ b/internal/backend/cache/backend.go @@ -231,9 +231,8 @@ func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(f backen wrapFn := func(f backend.FileInfo) error { id, err := restic.ParseID(f.Name) if err != nil { - // returning error here since, if we cannot parse the ID, the file - // is invalid and the list must exit. - return err + // ignore files with invalid name + return nil } ids.Insert(id) diff --git a/internal/backend/cache/backend_test.go b/internal/backend/cache/backend_test.go index dca51c2bf2b..7f83e40cbcd 100644 --- a/internal/backend/cache/backend_test.go +++ b/internal/backend/cache/backend_test.go @@ -296,3 +296,20 @@ func TestAutomaticCacheClear(t *testing.T) { t.Errorf("cache doesn't have file2 after list") } } + +func TestAutomaticCacheClearInvalidFilename(t *testing.T) { + be := mem.New() + c := TestNewCache(t) + + data := test.Random(rand.Int(), 42) + h := backend.Handle{ + Type: backend.IndexFile, + Name: "tmp12345", + } + save(t, be, h, data) + + wbe := c.Wrap(be) + + // list all files in the backend + list(t, wbe, func(_ backend.FileInfo) error { return nil }) +} From 361fbbf58fc15f9604cf2922e425a129e50ede3b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 29 Aug 2024 16:33:18 +0200 Subject: [PATCH 627/893] Add temporary files repositories in integration tests This is intended to catch problems with temporary files stored in the backend, even if the responsible component forgets to test for those. --- cmd/restic/cmd_init_integration_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmd/restic/cmd_init_integration_test.go b/cmd/restic/cmd_init_integration_test.go index 9b5eed6e08e..4795d5510b9 100644 --- a/cmd/restic/cmd_init_integration_test.go +++ b/cmd/restic/cmd_init_integration_test.go @@ -2,6 +2,8 @@ package main import ( "context" + "os" + "path/filepath" "testing" "github.com/restic/restic/internal/repository" @@ -16,6 +18,11 @@ func testRunInit(t testing.TB, opts GlobalOptions) { rtest.OK(t, runInit(context.TODO(), InitOptions{}, opts, nil)) t.Logf("repository initialized at %v", opts.Repo) + + // create temporary junk files to verify that restic does not trip over them + for _, path := range []string{"index", "snapshots", "keys", "locks", filepath.Join("data", "00")} { + rtest.OK(t, os.WriteFile(filepath.Join(opts.Repo, path, "tmp12345"), []byte("junk file"), 0o600)) + } } func TestInitCopyChunkerParams(t *testing.T) { From 174f20dc4a80c5431901f276ddf2c4aed154f49d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 29 Aug 2024 16:35:48 +0200 Subject: [PATCH 628/893] use OrderedListOnceBackend where possible --- cmd/restic/cmd_prune_integration_test.go | 5 ++--- cmd/restic/integration_test.go | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index 746eb5cc9e7..536ec40d886 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -146,10 +146,9 @@ func TestPruneWithDamagedRepository(t *testing.T) { env.gopts.backendTestHook = oldHook }() // prune should fail - rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + rtest.Equals(t, repository.ErrPacksMissing, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { return runPrune(context.TODO(), pruneDefaultOptions, env.gopts, term) - }) == repository.ErrPacksMissing, - "prune should have reported index not complete error") + }), "prune should have reported index not complete error") } // Test repos for edge cases diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 4cecec6bc06..df95031dca5 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -80,7 +80,7 @@ func TestListOnce(t *testing.T) { defer cleanup() env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { - return newListOnceBackend(r), nil + return newOrderedListOnceBackend(r), nil } pruneOpts := PruneOptions{MaxUnused: "0"} checkOpts := CheckOptions{ReadData: true, CheckUnused: true} @@ -148,7 +148,7 @@ func TestFindListOnce(t *testing.T) { defer cleanup() env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { - return newListOnceBackend(r), nil + return newOrderedListOnceBackend(r), nil } testSetupBackupData(t, env) From ba71141f0a20318ef4c1a171673f146aadf0eb18 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 30 Aug 2024 11:25:51 +0200 Subject: [PATCH 629/893] backup: support specifying volume instead of path on Windows "C:" (volume name) versus "C:\" (path) --- changelog/unreleased/issue-2004 | 19 ++++++++++ internal/archiver/archiver.go | 7 +++- internal/archiver/archiver_test.go | 60 ++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/issue-2004 diff --git a/changelog/unreleased/issue-2004 b/changelog/unreleased/issue-2004 new file mode 100644 index 00000000000..45bc07ca814 --- /dev/null +++ b/changelog/unreleased/issue-2004 @@ -0,0 +1,19 @@ +Bugfix: Correctly handle passing volume name to `backup` command + +On Windows, when the specified backup target only included the volume +name without a trailing slash, for example, `C:`, then restoring the +resulting snapshot would result in an error. Note that using `C:\` +as backup target worked correctly. + +Specifying volume names now works correctly. + +To restore snapshots created before this bugfix, use the `:` +syntax. For a snapshot with ID `12345678` and a backup of `C:`, the following +command can be used: + +``` +restic restore 12345678:/C/C:./ --target output/folder +``` + +https://github.com/restic/restic/issues/2004 +https://github.com/restic/restic/pull/5028 diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index e441512982d..e7c346d3a43 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -715,7 +715,12 @@ func resolveRelativeTargets(filesys fs.FS, targets []string) ([]string, error) { debug.Log("targets before resolving: %v", targets) result := make([]string, 0, len(targets)) for _, target := range targets { - target = filesys.Clean(target) + if target != "" && filesys.VolumeName(target) == target { + // special case to allow users to also specify a volume name "C:" instead of a path "C:\" + target = target + filesys.Separator() + } else { + target = filesys.Clean(target) + } pc, _ := pathComponents(filesys, target, false) if len(pc) > 0 { result = append(result, target) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index b519387db4e..c54f9ea3311 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1448,6 +1448,66 @@ func TestArchiverSnapshot(t *testing.T) { } } +func TestResolveRelativeTargetsSpecial(t *testing.T) { + var tests = []struct { + name string + targets []string + expected []string + win bool + }{ + { + name: "basic relative path", + targets: []string{filepath.FromSlash("some/path")}, + expected: []string{filepath.FromSlash("some/path")}, + }, + { + name: "partial relative path", + targets: []string{filepath.FromSlash("../some/path")}, + expected: []string{filepath.FromSlash("../some/path")}, + }, + { + name: "basic absolute path", + targets: []string{filepath.FromSlash("/some/path")}, + expected: []string{filepath.FromSlash("/some/path")}, + }, + { + name: "volume name", + targets: []string{"C:"}, + expected: []string{"C:\\"}, + win: true, + }, + { + name: "volume root path", + targets: []string{"C:\\"}, + expected: []string{"C:\\"}, + win: true, + }, + { + name: "UNC path", + targets: []string{"\\\\server\\volume"}, + expected: []string{"\\\\server\\volume\\"}, + win: true, + }, + { + name: "UNC path with trailing slash", + targets: []string{"\\\\server\\volume\\"}, + expected: []string{"\\\\server\\volume\\"}, + win: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.win && runtime.GOOS != "windows" { + t.Skip("skip test on unix") + } + + targets, err := resolveRelativeTargets(&fs.Local{}, test.targets) + rtest.OK(t, err) + rtest.Equals(t, test.expected, targets) + }) + } +} + func TestArchiverSnapshotSelect(t *testing.T) { var tests = []struct { name string From 259caf942dd2b74ad629ba4d12d00cdeba58a973 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 30 Aug 2024 14:58:32 +0200 Subject: [PATCH 630/893] cleanup changelogs --- changelog/unreleased/issue-2004 | 10 +++++----- changelog/unreleased/issue-4795 | 7 ++++--- changelog/unreleased/issue-4934 | 12 +++++++----- changelog/unreleased/issue-4944 | 7 ++++--- changelog/unreleased/issue-4945 | 6 ++++-- changelog/unreleased/issue-4953 | 4 ++-- changelog/unreleased/issue-4957 | 8 ++++---- changelog/unreleased/issue-4969 | 4 ++-- changelog/unreleased/issue-4970 | 9 +++++---- changelog/unreleased/issue-4975 | 3 ++- changelog/unreleased/issue-5004 | 2 +- changelog/unreleased/issue-5005 | 15 +++++++++------ changelog/unreleased/pull-4958 | 6 +++--- changelog/unreleased/pull-4959 | 6 +++--- changelog/unreleased/pull-4977 | 17 +++++++++-------- changelog/unreleased/pull-4980 | 13 ++++++------- changelog/unreleased/pull-5018 | 8 ++++---- 17 files changed, 74 insertions(+), 63 deletions(-) diff --git a/changelog/unreleased/issue-2004 b/changelog/unreleased/issue-2004 index 45bc07ca814..a15651d75fe 100644 --- a/changelog/unreleased/issue-2004 +++ b/changelog/unreleased/issue-2004 @@ -1,15 +1,15 @@ -Bugfix: Correctly handle passing volume name to `backup` command +Bugfix: Correctly handle volume names in `backup` command on Windows On Windows, when the specified backup target only included the volume name without a trailing slash, for example, `C:`, then restoring the resulting snapshot would result in an error. Note that using `C:\` as backup target worked correctly. -Specifying volume names now works correctly. +Specifying volume names is now handled correctly. -To restore snapshots created before this bugfix, use the `:` -syntax. For a snapshot with ID `12345678` and a backup of `C:`, the following -command can be used: +To restore snapshots created before this bugfix, use the : +syntax. For example, to restore a snapshot with ID `12345678` that backed up +`C:`, use the following command: ``` restic restore 12345678:/C/C:./ --target output/folder diff --git a/changelog/unreleased/issue-4795 b/changelog/unreleased/issue-4795 index 084335f510c..ff86f09312c 100644 --- a/changelog/unreleased/issue-4795 +++ b/changelog/unreleased/issue-4795 @@ -1,7 +1,8 @@ -Enhancement: `restore --verify` shows progress with a progress bar +Enhancement: Display progress bar for `restore --verify` -If restore command was run with `--verify` restic didn't show any progress indication, now it shows a progress bar while 'verification' is running. -The progress bar is text only for now and doesn't respect `--json` flag. +When the `restore` command is run with `--verify`, it now displays a progress +bar while the verification step is running. The progress bar is not shown when +the `--json` flag is specified. https://github.com/restic/restic/issues/4795 https://github.com/restic/restic/pull/4989 diff --git a/changelog/unreleased/issue-4934 b/changelog/unreleased/issue-4934 index 6891ca20419..56e22ad28c0 100644 --- a/changelog/unreleased/issue-4934 +++ b/changelog/unreleased/issue-4934 @@ -1,8 +1,10 @@ -Enhancement: Clear removed snapshots from local cache of the current host +Enhancement: Automatically clear removed snapshots from cache -Restic only removed snapshots from the cache on the host that runs the `forget` command. -On other hosts that use the same repository, the old snapshots remained in the cache. -Restic now, automatically clears old snapshots from the local cache of the current host. +Previously, restic only removed snapshots from the cache on the host where the +`forget` command was executed. On other hosts that use the same repository, the +old snapshots remained in the cache. + +Restic now automatically clears old snapshots from the local cache of the current host. https://github.com/restic/restic/issues/4934 -https://github.com/restic/restic/pull/4981 \ No newline at end of file +https://github.com/restic/restic/pull/4981 diff --git a/changelog/unreleased/issue-4944 b/changelog/unreleased/issue-4944 index 02f5ae3418c..738da8e5742 100644 --- a/changelog/unreleased/issue-4944 +++ b/changelog/unreleased/issue-4944 @@ -1,8 +1,9 @@ Enhancement: Print JSON-formatted errors during `restore --json` -Restic printed any restore errors directly to the console as freeform -text messages, even with `--json`. Restic now prints them as JSON formatted -messages when `--json` is passed. +Restic printed any `restore` errors directly to the console as freeform text +messages, even when using the `--json` option. + +Now, when `--json` is specified, restic prints them as JSON formatted messages. https://github.com/restic/restic/issues/4944 https://github.com/restic/restic/pull/4946 diff --git a/changelog/unreleased/issue-4945 b/changelog/unreleased/issue-4945 index 7bbf69fac2b..024b30b211f 100644 --- a/changelog/unreleased/issue-4945 +++ b/changelog/unreleased/issue-4945 @@ -1,7 +1,9 @@ Bugfix: Include missing backup error text with `--json` -Restic was not actually providing the text of an error message during -backup if `--json` was passed, instead only printing `"error": {}`. +Previously, when running a backup with the `--json` option, restic failed to +include the actual error message in the output, resulting in `"error": {}` +being displayed. + Restic now includes the error text in JSON output. https://github.com/restic/restic/issues/4945 diff --git a/changelog/unreleased/issue-4953 b/changelog/unreleased/issue-4953 index 78a266aff09..c542377fcba 100644 --- a/changelog/unreleased/issue-4953 +++ b/changelog/unreleased/issue-4953 @@ -1,7 +1,7 @@ Bugfix: Correctly handle long paths on older Windows versions -When using older Windows versions, like Windows Server 2012, restic 0.17.0 -failed to back up files with long paths. This has been fixed. +On older Windows versions, like Windows Server 2012, restic 0.17.0 failed to +back up files with long paths. This problem has now been resolved. https://github.com/restic/restic/issues/4953 https://github.com/restic/restic/pull/4954 diff --git a/changelog/unreleased/issue-4957 b/changelog/unreleased/issue-4957 index d18e28ec9b4..5da7463ae30 100644 --- a/changelog/unreleased/issue-4957 +++ b/changelog/unreleased/issue-4957 @@ -1,8 +1,8 @@ -Bugfix: Fix delayed cancelation of some commands +Bugfix: Fix delayed cancellation of certain commands -Since restic 0.17.0, some commands no longer promptly reacted to being canceled -via Ctrl-C (SIGINT) and continued to run for a limited amount of time. The most -affected commands were `diff`,`find`, `ls`, `stats` and `rewrite`. +Since restic 0.17.0, some commands did not immediately respond to cancellation +via Ctrl-C (SIGINT) and continued running for a short period. The most affected +commands were `diff`,`find`, `ls`, `stats` and `rewrite`. This has been fixed. diff --git a/changelog/unreleased/issue-4969 b/changelog/unreleased/issue-4969 index ce76a738987..9015c2eabba 100644 --- a/changelog/unreleased/issue-4969 +++ b/changelog/unreleased/issue-4969 @@ -1,7 +1,7 @@ Bugfix: Correctly restore timestamp for files with resource forks on macOS -On macOS, timestamps were incorrectly restored for files with resource forks. -This has been fixed. +On macOS, timestamps were not restored for files with resource forks. This has +been fixed. https://github.com/restic/restic/issues/4969 https://github.com/restic/restic/pull/5006 diff --git a/changelog/unreleased/issue-4970 b/changelog/unreleased/issue-4970 index 524e91b7566..2fc9300c995 100644 --- a/changelog/unreleased/issue-4970 +++ b/changelog/unreleased/issue-4970 @@ -1,9 +1,10 @@ Enhancement: Make timeout for stuck requests customizable -Restic monitors connections to the backend to detect stuck requests. If a request -does not return any data within five minutes, restic assumes the request is stuck and -retries it. However, for large repositories it sometimes takes longer than that to -collect a list of all files, causing the following error: +Restic monitors connections to the backend to detect stuck requests. If a +request does not return any data within five minutes, restic assumes the +request is stuck and retries it. However, for large repositories this timeout +might be insufficient to collect a list of all files, causing the following +error: `List(data) returned error, retrying after 1s: [...]: request timeout` diff --git a/changelog/unreleased/issue-4975 b/changelog/unreleased/issue-4975 index 0e29935f576..2503f46da3e 100644 --- a/changelog/unreleased/issue-4975 +++ b/changelog/unreleased/issue-4975 @@ -1,6 +1,7 @@ Bugfix: Prevent `backup --stdin-from-command` from panicking -If --stdin-from-command is used, restic now checks whether there is a command behind it. +Restic would previously crash if `--stdin-from-command` was specified without +providing a command. This issue has been fixed. https://github.com/restic/restic/issues/4975 https://github.com/restic/restic/pull/4976 diff --git a/changelog/unreleased/issue-5004 b/changelog/unreleased/issue-5004 index 529b65464f9..a0df9478e13 100644 --- a/changelog/unreleased/issue-5004 +++ b/changelog/unreleased/issue-5004 @@ -1,6 +1,6 @@ Bugfix: Fix spurious "A Required Privilege Is Not Held by the Client" error -On Windows, creating a backup could sometimes print the following error +On Windows, creating a backup could sometimes trigger the following error: ``` error: nodeFromFileInfo [...]: get named security info failed with: a required privilege is not held by the client. diff --git a/changelog/unreleased/issue-5005 b/changelog/unreleased/issue-5005 index 90c164b07ea..eb712b96775 100644 --- a/changelog/unreleased/issue-5005 +++ b/changelog/unreleased/issue-5005 @@ -1,12 +1,15 @@ Bugfix: Fix rare failures to retry locking a repository -Restic 0.17.0 could in rare cases fail to retry locking a repository if -one of the lock files failed to load. The lock operation failed with error -`unable to create lock in backend: circuit breaker open for file ` +Restic 0.17.0 could in rare cases fail to retry locking a repository if one of +the lock files failed to load, resulting in the error: -The error handling has been fixed to correctly retry locking the repository. -In addition, restic now waits a few seconds between locking retries to -increase chances of success. +``` +unable to create lock in backend: circuit breaker open for file +``` + +This issue has been addressed. The error handling now properly retries the +locking operation. In addition, restic waits a few seconds between locking +retries to increase chances of successful locking. https://github.com/restic/restic/issues/5005 https://github.com/restic/restic/pull/5011 diff --git a/changelog/unreleased/pull-4958 b/changelog/unreleased/pull-4958 index bbb28a97b8e..02574ad3340 100644 --- a/changelog/unreleased/pull-4958 +++ b/changelog/unreleased/pull-4958 @@ -1,7 +1,7 @@ Bugfix: Don't ignore metadata-setting errors during restore -Restic was accidentally ignoring errors when setting timestamps, -attributes, or file modes during restore. It will now report those -errors (unless it's just a permission error when not running as root). +Restic ignored errors when setting timestamps, attributes, or file modes during +a restore. It now reports those, except for permission errors when running +without root privileges. https://github.com/restic/restic/pull/4958 diff --git a/changelog/unreleased/pull-4959 b/changelog/unreleased/pull-4959 index 120527e22b4..80b2780b22c 100644 --- a/changelog/unreleased/pull-4959 +++ b/changelog/unreleased/pull-4959 @@ -1,6 +1,6 @@ -Enhancement: Return exit code 12 for "bad password" +Enhancement: Return exit code 12 for "bad password" errors -Restic now returns exit code 12 when it can't open the repository -because of a bad password. +Restic now returns exit code 12 when it cannot open the repository due to an +incorrect password. https://github.com/restic/restic/pull/4959 diff --git a/changelog/unreleased/pull-4977 b/changelog/unreleased/pull-4977 index 702df29a7c4..85e4091c56b 100644 --- a/changelog/unreleased/pull-4977 +++ b/changelog/unreleased/pull-4977 @@ -1,14 +1,15 @@ -Change: let `backup` store files with incomplete metadata +Change: `backup` includes files with incomplete metadata -If restic failed to read the extended metadata for a file or folder while -creating a backup, then the file or folder was not included in the resulting -snapshot. Instead, only a warning message was printed along with exiting -with exit code 3. +If restic failed to read extended metadata for a file or folder during a +backup, then the file or folder was not included in the resulting snapshot. +Instead, a warning message was printed along with returning exit code 3 once +the backup was finished. + +Now, restic also includes items for which the extended metadata could not be +read in a snapshot. The warning message has been updated to: -Now, restic also includes items for which the extended metadata could not -be read in a snapshot. The warning message has been changed to read ``` -incomplete metadata for /path/to/file: details on error +incomplete metadata for /path/to/file:
``` https://github.com/restic/restic/issues/4953 diff --git a/changelog/unreleased/pull-4980 b/changelog/unreleased/pull-4980 index 5713db7a2b1..4b1de54bfa9 100644 --- a/changelog/unreleased/pull-4980 +++ b/changelog/unreleased/pull-4980 @@ -1,11 +1,10 @@ -Bugfix: Skip EA processing in Windows for volumes that do not support EA +Bugfix: Skip extended attribute processing on unsupported Windows volumes -Restic was failing to backup files on some windows paths like network -drives because of errors while fetching extended attributes. -Either they return error codes like windows.E_NOT_SET or -windows.ERROR_INVALID_FUNCTION or it results in slower backups. -Restic now completely skips the attempt to fetch extended attributes -for such volumes where it is not supported. +For restic 0.17.0, backups of certain Windows paths, such as network drives, +failed due to errors while fetching extended attributes. + +Restic now skips extended attribute processing for volumes where they are not +supported. https://github.com/restic/restic/pull/4980 https://github.com/restic/restic/pull/4998 diff --git a/changelog/unreleased/pull-5018 b/changelog/unreleased/pull-5018 index 1b7b9f428f0..84f36355c2f 100644 --- a/changelog/unreleased/pull-5018 +++ b/changelog/unreleased/pull-5018 @@ -1,13 +1,13 @@ -Bugfix: Improve HTTP2 support for rest backend +Bugfix: Improve HTTP/2 support for REST backend -If rest-server tried to gracefully shut down an HTTP2 connection still used by the client, -this could result in the following error. +If `rest-server` tried to gracefully shut down an HTTP/2 connection still in +use by the client, it could result in the following error: ``` http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error ``` -This has been fixed. +This issue has been resolved. https://github.com/restic/restic/pull/5018 https://forum.restic.net/t/receiving-http2-goaway-messages-with-windows-restic-v0-17-0/8367 From 943b6ccfba90843b08eadd1639f1f4178d779359 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 20:07:21 +0200 Subject: [PATCH 631/893] index: remove support for legacy index format --- cmd/restic/cmd_check.go | 8 --- cmd/restic/cmd_debug.go | 2 +- cmd/restic/cmd_list.go | 2 +- internal/checker/checker.go | 18 +---- internal/feature/registry.go | 2 - internal/repository/index/index.go | 57 +-------------- internal/repository/index/index_parallel.go | 7 +- .../repository/index/index_parallel_test.go | 4 +- internal/repository/index/index_test.go | 69 +++---------------- internal/repository/index/master_index.go | 17 +++-- internal/repository/repair_index.go | 2 +- internal/repository/repository_test.go | 8 +-- 12 files changed, 28 insertions(+), 168 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index dcf7f27df5a..e8596ae332a 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -245,17 +245,12 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args errorsFound := false suggestIndexRebuild := false - suggestLegacyIndexRebuild := false mixedFound := false for _, hint := range hints { switch hint.(type) { case *checker.ErrDuplicatePacks: term.Print(hint.Error()) suggestIndexRebuild = true - case *checker.ErrOldIndexFormat: - printer.E("error: %v\n", hint) - suggestLegacyIndexRebuild = true - errorsFound = true case *checker.ErrMixedPack: term.Print(hint.Error()) mixedFound = true @@ -268,9 +263,6 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args if suggestIndexRebuild { term.Print("Duplicate packs are non-critical, you can run `restic repair index' to correct this.\n") } - if suggestLegacyIndexRebuild { - printer.E("error: Found indexes using the legacy format, you must run `restic repair index' to correct this.\n") - } if mixedFound { term.Print("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n") } diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index b9219249220..4ce17f899e2 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -143,7 +143,7 @@ func printPacks(ctx context.Context, repo *repository.Repository, wr io.Writer) } func dumpIndexes(ctx context.Context, repo restic.ListerLoaderUnpacked, wr io.Writer) error { - return index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error { + return index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, err error) error { Printf("index_id: %v\n", id) if err != nil { return err diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 1a4791e311d..f6c07d66f51 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -60,7 +60,7 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error { case "locks": t = restic.LockFile case "blobs": - return index.ForAllIndexes(ctx, repo, repo, func(_ restic.ID, idx *index.Index, _ bool, err error) error { + return index.ForAllIndexes(ctx, repo, repo, func(_ restic.ID, idx *index.Index, err error) error { if err != nil { return err } diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 031e13807f0..e0c1766d7be 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -75,16 +75,6 @@ func (e *ErrMixedPack) Error() string { return fmt.Sprintf("pack %v contains a mix of tree and data blobs", e.PackID.Str()) } -// ErrOldIndexFormat is returned when an index with the old format is -// found. -type ErrOldIndexFormat struct { - restic.ID -} - -func (err *ErrOldIndexFormat) Error() string { - return fmt.Sprintf("index %v has old format", err.ID) -} - func (c *Checker) LoadSnapshots(ctx context.Context) error { var err error c.snapshots, err = restic.MemorizeList(ctx, c.repo, restic.SnapshotFile) @@ -112,14 +102,8 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e debug.Log("Start") packToIndex := make(map[restic.ID]restic.IDSet) - err := c.masterIndex.Load(ctx, c.repo, p, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error { + err := c.masterIndex.Load(ctx, c.repo, p, func(id restic.ID, idx *index.Index, err error) error { debug.Log("process index %v, err %v", id, err) - - if oldFormat { - debug.Log("index %v has old format", id) - hints = append(hints, &ErrOldIndexFormat{id}) - } - err = errors.Wrapf(err, "error loading index %v", id) if err != nil { diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 6b8f6b3977d..8bdb5480e29 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -6,7 +6,6 @@ var Flag = New() // flag names are written in kebab-case const ( BackendErrorRedesign FlagName = "backend-error-redesign" - DeprecateLegacyIndex FlagName = "deprecate-legacy-index" DeprecateS3LegacyLayout FlagName = "deprecate-s3-legacy-layout" DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" ExplicitS3AnonymousAuth FlagName = "explicit-s3-anonymous-auth" @@ -16,7 +15,6 @@ const ( func init() { Flag.SetFlags(map[FlagName]FlagDesc{ BackendErrorRedesign: {Type: Beta, Description: "enforce timeouts for stuck HTTP requests and use new backend error handling design."}, - DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, DeprecateS3LegacyLayout: {Type: Beta, Description: "disable support for S3 legacy layout used up to restic 0.7.0. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your S3 repository if necessary."}, DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, ExplicitS3AnonymousAuth: {Type: Beta, Description: "forbid anonymous S3 authentication unless `-o s3.unsafe-anonymous-auth=true` is set"}, diff --git a/internal/repository/index/index.go b/internal/repository/index/index.go index 36ac2560fce..14e4543bd41 100644 --- a/internal/repository/index/index.go +++ b/internal/repository/index/index.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/json" - "fmt" "io" "math" "sync" @@ -12,7 +11,6 @@ import ( "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/debug" @@ -489,34 +487,15 @@ func (idx *Index) merge(idx2 *Index) error { return nil } -// isErrOldIndex returns true if the error may be caused by an old index -// format. -func isErrOldIndex(err error) bool { - e, ok := err.(*json.UnmarshalTypeError) - return ok && e.Value == "array" -} - // DecodeIndex unserializes an index from buf. -func DecodeIndex(buf []byte, id restic.ID) (idx *Index, oldFormat bool, err error) { +func DecodeIndex(buf []byte, id restic.ID) (idx *Index, err error) { debug.Log("Start decoding index") idxJSON := &jsonIndex{} err = json.Unmarshal(buf, idxJSON) if err != nil { debug.Log("Error %v", err) - - if isErrOldIndex(err) { - if feature.Flag.Enabled(feature.DeprecateLegacyIndex) { - return nil, false, fmt.Errorf("index seems to use the legacy format. update it using `restic repair index`") - } - - debug.Log("index is probably old format, trying that") - idx, err = decodeOldIndex(buf) - idx.ids = append(idx.ids, id) - return idx, err == nil, err - } - - return nil, false, errors.Wrap(err, "DecodeIndex") + return nil, errors.Wrap(err, "DecodeIndex") } idx = NewIndex() @@ -537,38 +516,6 @@ func DecodeIndex(buf []byte, id restic.ID) (idx *Index, oldFormat bool, err erro idx.ids = append(idx.ids, id) idx.final = true - debug.Log("done") - return idx, false, nil -} - -// DecodeOldIndex loads and unserializes an index in the old format from rd. -func decodeOldIndex(buf []byte) (idx *Index, err error) { - debug.Log("Start decoding old index") - list := []*packJSON{} - - err = json.Unmarshal(buf, &list) - if err != nil { - debug.Log("Error %#v", err) - return nil, errors.Wrap(err, "Decode") - } - - idx = NewIndex() - for _, pack := range list { - packID := idx.addToPacks(pack.ID) - - for _, blob := range pack.Blobs { - idx.store(packID, restic.Blob{ - BlobHandle: restic.BlobHandle{ - Type: blob.Type, - ID: blob.ID}, - Offset: blob.Offset, - Length: blob.Length, - // no compressed length in the old index format - }) - } - } - idx.final = true - debug.Log("done") return idx, nil } diff --git a/internal/repository/index/index_parallel.go b/internal/repository/index/index_parallel.go index 3d5621a2ddc..fda5123d396 100644 --- a/internal/repository/index/index_parallel.go +++ b/internal/repository/index/index_parallel.go @@ -12,7 +12,7 @@ import ( // It is guaranteed that the function is not run concurrently. If the callback // returns an error, this function is cancelled and also returns that error. func ForAllIndexes(ctx context.Context, lister restic.Lister, repo restic.LoaderUnpacked, - fn func(id restic.ID, index *Index, oldFormat bool, err error) error) error { + fn func(id restic.ID, index *Index, err error) error) error { // decoding an index can take quite some time such that this can be both CPU- or IO-bound // as the whole index is kept in memory anyways, a few workers too much don't matter @@ -22,15 +22,14 @@ func ForAllIndexes(ctx context.Context, lister restic.Lister, repo restic.Loader return restic.ParallelList(ctx, lister, restic.IndexFile, workerCount, func(ctx context.Context, id restic.ID, _ int64) error { var err error var idx *Index - oldFormat := false buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id) if err == nil { - idx, oldFormat, err = DecodeIndex(buf, id) + idx, err = DecodeIndex(buf, id) } m.Lock() defer m.Unlock() - return fn(id, idx, oldFormat, err) + return fn(id, idx, err) }) } diff --git a/internal/repository/index/index_parallel_test.go b/internal/repository/index/index_parallel_test.go index 38dafb50750..96f1c2a6a35 100644 --- a/internal/repository/index/index_parallel_test.go +++ b/internal/repository/index/index_parallel_test.go @@ -27,7 +27,7 @@ func TestRepositoryForAllIndexes(t *testing.T) { // check that all expected indexes are loaded without errors indexIDs := restic.NewIDSet() var indexErr error - rtest.OK(t, index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, oldFormat bool, err error) error { + rtest.OK(t, index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, err error) error { if err != nil { indexErr = err } @@ -40,7 +40,7 @@ func TestRepositoryForAllIndexes(t *testing.T) { // must failed with the returned error iterErr := errors.New("error to pass upwards") - err := index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, oldFormat bool, err error) error { + err := index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, err error) error { return iterErr }) diff --git a/internal/repository/index/index_test.go b/internal/repository/index/index_test.go index bf752d3d31f..93803603ded 100644 --- a/internal/repository/index/index_test.go +++ b/internal/repository/index/index_test.go @@ -8,7 +8,6 @@ import ( "sync" "testing" - "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -53,11 +52,9 @@ func TestIndexSerialize(t *testing.T) { rtest.OK(t, err) idx2ID := restic.NewRandomID() - idx2, oldFormat, err := index.DecodeIndex(wr.Bytes(), idx2ID) + idx2, err := index.DecodeIndex(wr.Bytes(), idx2ID) rtest.OK(t, err) - rtest.Assert(t, idx2 != nil, - "nil returned for decoded index") - rtest.Assert(t, !oldFormat, "new index format recognized as old format") + rtest.Assert(t, idx2 != nil, "nil returned for decoded index") indexID, err := idx2.IDs() rtest.OK(t, err) rtest.Equals(t, indexID, restic.IDs{idx2ID}) @@ -123,13 +120,10 @@ func TestIndexSerialize(t *testing.T) { rtest.OK(t, err) rtest.Equals(t, restic.IDs{id}, ids) - idx3, oldFormat, err := index.DecodeIndex(wr3.Bytes(), id) + idx3, err := index.DecodeIndex(wr3.Bytes(), id) rtest.OK(t, err) - rtest.Assert(t, idx3 != nil, - "nil returned for decoded index") - rtest.Assert(t, idx3.Final(), - "decoded index is not final") - rtest.Assert(t, !oldFormat, "new index format recognized as old format") + rtest.Assert(t, idx3 != nil, "nil returned for decoded index") + rtest.Assert(t, idx3.Final(), "decoded index is not final") // all new blobs must be in the index for _, testBlob := range newtests { @@ -246,31 +240,6 @@ var docExampleV2 = []byte(` } `) -var docOldExample = []byte(` -[ { - "id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c", - "blobs": [ - { - "id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce", - "type": "data", - "offset": 0, - "length": 38 - },{ - "id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae", - "type": "tree", - "offset": 38, - "length": 112 - }, - { - "id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66", - "type": "data", - "offset": 150, - "length": 123 - } - ] -} ] -`) - var exampleTests = []struct { id, packID restic.ID tpe restic.BlobType @@ -312,9 +281,8 @@ func TestIndexUnserialize(t *testing.T) { {docExampleV1, 1}, {docExampleV2, 2}, } { - idx, oldFormat, err := index.DecodeIndex(task.idxBytes, restic.NewRandomID()) + idx, err := index.DecodeIndex(task.idxBytes, restic.NewRandomID()) rtest.OK(t, err) - rtest.Assert(t, !oldFormat, "new index format recognized as old format") for _, test := range exampleTests { list := idx.Lookup(restic.BlobHandle{ID: test.id, Type: test.tpe}, nil) @@ -387,7 +355,7 @@ func BenchmarkDecodeIndex(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _, _, err := index.DecodeIndex(benchmarkIndexJSON, id) + _, err := index.DecodeIndex(benchmarkIndexJSON, id) rtest.OK(b, err) } } @@ -400,7 +368,7 @@ func BenchmarkDecodeIndexParallel(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { - _, _, err := index.DecodeIndex(benchmarkIndexJSON, id) + _, err := index.DecodeIndex(benchmarkIndexJSON, id) rtest.OK(b, err) } }) @@ -426,27 +394,6 @@ func BenchmarkEncodeIndex(b *testing.B) { } } -func TestIndexUnserializeOld(t *testing.T) { - defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateLegacyIndex, false)() - - idx, oldFormat, err := index.DecodeIndex(docOldExample, restic.NewRandomID()) - rtest.OK(t, err) - rtest.Assert(t, oldFormat, "old index format recognized as new format") - - for _, test := range exampleTests { - list := idx.Lookup(restic.BlobHandle{ID: test.id, Type: test.tpe}, nil) - if len(list) != 1 { - t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list) - } - blob := list[0] - - rtest.Equals(t, test.packID, blob.PackID) - rtest.Equals(t, test.tpe, blob.Type) - rtest.Equals(t, test.offset, blob.Offset) - rtest.Equals(t, test.length, blob.Length) - } -} - func TestIndexPacks(t *testing.T) { idx := index.NewIndex() packs := restic.NewIDSet() diff --git a/internal/repository/index/master_index.go b/internal/repository/index/master_index.go index 9b5c4f9f8b5..2600fe350dd 100644 --- a/internal/repository/index/master_index.go +++ b/internal/repository/index/master_index.go @@ -265,7 +265,7 @@ func (mi *MasterIndex) MergeFinalIndexes() error { return nil } -func (mi *MasterIndex) Load(ctx context.Context, r restic.ListerLoaderUnpacked, p *progress.Counter, cb func(id restic.ID, idx *Index, oldFormat bool, err error) error) error { +func (mi *MasterIndex) Load(ctx context.Context, r restic.ListerLoaderUnpacked, p *progress.Counter, cb func(id restic.ID, idx *Index, err error) error) error { indexList, err := restic.MemorizeList(ctx, r, restic.IndexFile) if err != nil { return err @@ -284,12 +284,12 @@ func (mi *MasterIndex) Load(ctx context.Context, r restic.ListerLoaderUnpacked, defer p.Done() } - err = ForAllIndexes(ctx, indexList, r, func(id restic.ID, idx *Index, oldFormat bool, err error) error { + err = ForAllIndexes(ctx, indexList, r, func(id restic.ID, idx *Index, err error) error { if p != nil { p.Add(1) } if cb != nil { - err = cb(id, idx, oldFormat, err) + err = cb(id, idx, err) } if err != nil { return err @@ -365,8 +365,7 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud var rewriteWg sync.WaitGroup type rewriteTask struct { - idx *Index - oldFormat bool + idx *Index } rewriteCh := make(chan rewriteTask) loader := func() error { @@ -376,13 +375,13 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud if err != nil { return fmt.Errorf("LoadUnpacked(%v): %w", id.Str(), err) } - idx, oldFormat, err := DecodeIndex(buf, id) + idx, err := DecodeIndex(buf, id) if err != nil { return err } select { - case rewriteCh <- rewriteTask{idx, oldFormat}: + case rewriteCh <- rewriteTask{idx}: case <-wgCtx.Done(): return wgCtx.Err() } @@ -411,8 +410,8 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud defer close(saveCh) newIndex := NewIndex() for task := range rewriteCh { - // always rewrite indexes using the old format, that include a pack that must be removed or that are not full - if !task.oldFormat && len(task.idx.Packs().Intersect(excludePacks)) == 0 && IndexFull(task.idx) { + // always rewrite indexes that include a pack that must be removed or that are not full + if len(task.idx.Packs().Intersect(excludePacks)) == 0 && IndexFull(task.idx) { // make sure that each pack is only stored exactly once in the index excludePacks.Merge(task.idx.Packs()) // index is already up to date diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index 77080925461..bff7ec5da4f 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -33,7 +33,7 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, } else { printer.P("loading indexes...\n") mi := index.NewMasterIndex() - err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error { + err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, err error) error { if err != nil { printer.E("removing invalid index %v: %v\n", id, err) obsoleteIndexes = append(obsoleteIndexes, id) diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index ea21ea3f3d6..3467a9cfade 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -4,10 +4,8 @@ import ( "bytes" "context" "crypto/sha256" - "fmt" "io" "math/rand" - "os" "path/filepath" "strings" "sync" @@ -261,11 +259,7 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (* return nil, err } - idx, oldFormat, err := index.DecodeIndex(buf, id) - if oldFormat { - fmt.Fprintf(os.Stderr, "index %v has old format\n", id.Str()) - } - return idx, err + return index.DecodeIndex(buf, id) } func TestRepositoryLoadUnpackedBroken(t *testing.T) { From 60245970280c6e6442cc57777bd570cb49f66448 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 20:28:39 +0200 Subject: [PATCH 632/893] drop support for s3legacy layout --- cmd/restic/cmd_check.go | 3 - cmd/restic/cmd_restore_integration_test.go | 36 +--- doc/design.rst | 11 +- internal/backend/layout/layout.go | 167 --------------- internal/backend/layout/layout_default.go | 7 + internal/backend/layout/layout_s3legacy.go | 79 ------- internal/backend/layout/layout_test.go | 200 ------------------ internal/backend/local/config.go | 3 +- internal/backend/local/layout_test.go | 11 +- internal/backend/local/local.go | 11 +- internal/backend/s3/s3.go | 10 +- internal/backend/sftp/config.go | 1 - internal/backend/sftp/layout_test.go | 11 +- internal/backend/sftp/sftp.go | 28 +-- .../testdata/repo-layout-s3legacy.tar.gz | Bin 38096 -> 0 bytes internal/checker/checker.go | 17 -- internal/feature/registry.go | 2 - internal/migrations/s3_layout.go | 123 ----------- internal/repository/s3_backend.go | 12 -- 19 files changed, 34 insertions(+), 698 deletions(-) delete mode 100644 internal/backend/layout/layout_s3legacy.go delete mode 100644 internal/backend/testdata/repo-layout-s3legacy.tar.gz delete mode 100644 internal/migrations/s3_layout.go delete mode 100644 internal/repository/s3_backend.go diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index e8596ae332a..fc460e39e38 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -296,9 +296,6 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args errorsFound = true printer.E("%v\n", err) } - } else if err == checker.ErrLegacyLayout { - errorsFound = true - printer.E("error: repository still uses the S3 legacy layout\nYou must run `restic migrate s3legacy` to correct this.\n") } else { errorsFound = true printer.E("%v\n", err) diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index b0543850bbb..42cd1f87dca 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -12,7 +12,6 @@ import ( "testing" "time" - "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/termstatus" @@ -403,36 +402,21 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) { "meta data of intermediate directory hasn't been restore") } -func TestRestoreLocalLayout(t *testing.T) { - defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() +func TestRestoreDefaultLayout(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() - var tests = []struct { - filename string - layout string - }{ - {"repo-layout-default.tar.gz", ""}, - {"repo-layout-s3legacy.tar.gz", ""}, - {"repo-layout-default.tar.gz", "default"}, - {"repo-layout-s3legacy.tar.gz", "s3legacy"}, - } - - for _, test := range tests { - datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename) - - rtest.SetupTarTestFixture(t, env.base, datafile) + datafile := filepath.Join("..", "..", "internal", "backend", "testdata", "repo-layout-default.tar.gz") - env.gopts.extended["local.layout"] = test.layout + rtest.SetupTarTestFixture(t, env.base, datafile) - // check the repo - testRunCheck(t, env.gopts) + // check the repo + testRunCheck(t, env.gopts) - // restore latest snapshot - target := filepath.Join(env.base, "restore") - testRunRestoreLatest(t, env.gopts, target, nil, nil) + // restore latest snapshot + target := filepath.Join(env.base, "restore") + testRunRestoreLatest(t, env.gopts, target, nil, nil) - rtest.RemoveAll(t, filepath.Join(env.base, "repo")) - rtest.RemoveAll(t, target) - } + rtest.RemoveAll(t, filepath.Join(env.base, "repo")) + rtest.RemoveAll(t, target) } diff --git a/doc/design.rst b/doc/design.rst index c974e997a44..62b7e9bf967 100644 --- a/doc/design.rst +++ b/doc/design.rst @@ -119,16 +119,11 @@ A local repository can be initialized with the ``restic init`` command, e.g.: $ restic -r /tmp/restic-repo init -The local and sftp backends will auto-detect and accept all layouts described -in the following sections, so that remote repositories mounted locally e.g. via -fuse can be accessed. The layout auto-detection can be overridden by specifying -the option ``-o local.layout=default``, valid values are ``default`` and -``s3legacy``. The option for the sftp backend is named ``sftp.layout``, for the -s3 backend ``s3.layout``. - S3 Legacy Layout (deprecated) ----------------------------- +Restic 0.17 is the last version that supports the legacy layout. + Unfortunately during development the Amazon S3 backend uses slightly different paths (directory names use singular instead of plural for ``key``, ``lock``, and ``snapshot`` files), and the pack files are stored directly below @@ -152,8 +147,6 @@ the ``data`` directory. The S3 Legacy repository layout looks like this: /snapshot └── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec -Restic 0.17 is the last version that supports the legacy layout. - Pack Format =========== diff --git a/internal/backend/layout/layout.go b/internal/backend/layout/layout.go index 052fd66ca24..cd69efc34de 100644 --- a/internal/backend/layout/layout.go +++ b/internal/backend/layout/layout.go @@ -1,18 +1,7 @@ package layout import ( - "context" - "fmt" - "os" - "path/filepath" - "regexp" - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/feature" - "github.com/restic/restic/internal/fs" - "github.com/restic/restic/internal/restic" ) // Layout computes paths for file name storage. @@ -23,159 +12,3 @@ type Layout interface { Paths() []string Name() string } - -// Filesystem is the abstraction of a file system used for a backend. -type Filesystem interface { - Join(...string) string - ReadDir(context.Context, string) ([]os.FileInfo, error) - IsNotExist(error) bool -} - -// ensure statically that *LocalFilesystem implements Filesystem. -var _ Filesystem = &LocalFilesystem{} - -// LocalFilesystem implements Filesystem in a local path. -type LocalFilesystem struct { -} - -// ReadDir returns all entries of a directory. -func (l *LocalFilesystem) ReadDir(_ context.Context, dir string) ([]os.FileInfo, error) { - f, err := fs.Open(dir) - if err != nil { - return nil, err - } - - entries, err := f.Readdir(-1) - if err != nil { - return nil, errors.Wrap(err, "Readdir") - } - - err = f.Close() - if err != nil { - return nil, errors.Wrap(err, "Close") - } - - return entries, nil -} - -// Join combines several path components to one. -func (l *LocalFilesystem) Join(paths ...string) string { - return filepath.Join(paths...) -} - -// IsNotExist returns true for errors that are caused by not existing files. -func (l *LocalFilesystem) IsNotExist(err error) bool { - return os.IsNotExist(err) -} - -var backendFilenameLength = len(restic.ID{}) * 2 -var backendFilename = regexp.MustCompile(fmt.Sprintf("^[a-fA-F0-9]{%d}$", backendFilenameLength)) - -func hasBackendFile(ctx context.Context, fs Filesystem, dir string) (bool, error) { - entries, err := fs.ReadDir(ctx, dir) - if err != nil && fs.IsNotExist(err) { - return false, nil - } - - if err != nil { - return false, errors.Wrap(err, "ReadDir") - } - - for _, e := range entries { - if backendFilename.MatchString(e.Name()) { - return true, nil - } - } - - return false, nil -} - -// ErrLayoutDetectionFailed is returned by DetectLayout() when the layout -// cannot be detected automatically. -var ErrLayoutDetectionFailed = errors.New("auto-detecting the filesystem layout failed") - -var ErrLegacyLayoutFound = errors.New("detected legacy S3 layout. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your repository") - -// DetectLayout tries to find out which layout is used in a local (or sftp) -// filesystem at the given path. If repo is nil, an instance of LocalFilesystem -// is used. -func DetectLayout(ctx context.Context, repo Filesystem, dir string) (Layout, error) { - debug.Log("detect layout at %v", dir) - if repo == nil { - repo = &LocalFilesystem{} - } - - // key file in the "keys" dir (DefaultLayout) - foundKeysFile, err := hasBackendFile(ctx, repo, repo.Join(dir, defaultLayoutPaths[backend.KeyFile])) - if err != nil { - return nil, err - } - - // key file in the "key" dir (S3LegacyLayout) - foundKeyFile, err := hasBackendFile(ctx, repo, repo.Join(dir, s3LayoutPaths[backend.KeyFile])) - if err != nil { - return nil, err - } - - if foundKeysFile && !foundKeyFile { - debug.Log("found default layout at %v", dir) - return &DefaultLayout{ - Path: dir, - Join: repo.Join, - }, nil - } - - if foundKeyFile && !foundKeysFile { - if feature.Flag.Enabled(feature.DeprecateS3LegacyLayout) { - return nil, ErrLegacyLayoutFound - } - - debug.Log("found s3 layout at %v", dir) - return &S3LegacyLayout{ - Path: dir, - Join: repo.Join, - }, nil - } - - debug.Log("layout detection failed") - return nil, ErrLayoutDetectionFailed -} - -// ParseLayout parses the config string and returns a Layout. When layout is -// the empty string, DetectLayout is used. If that fails, defaultLayout is used. -func ParseLayout(ctx context.Context, repo Filesystem, layout, defaultLayout, path string) (l Layout, err error) { - debug.Log("parse layout string %q for backend at %v", layout, path) - switch layout { - case "default": - l = &DefaultLayout{ - Path: path, - Join: repo.Join, - } - case "s3legacy": - if feature.Flag.Enabled(feature.DeprecateS3LegacyLayout) { - return nil, ErrLegacyLayoutFound - } - - l = &S3LegacyLayout{ - Path: path, - Join: repo.Join, - } - case "": - l, err = DetectLayout(ctx, repo, path) - - // use the default layout if auto detection failed - if errors.Is(err, ErrLayoutDetectionFailed) && defaultLayout != "" { - debug.Log("error: %v, use default layout %v", err, defaultLayout) - return ParseLayout(ctx, repo, defaultLayout, "", path) - } - - if err != nil { - return nil, err - } - debug.Log("layout detected: %v", l) - default: - return nil, errors.Errorf("unknown backend layout string %q, may be one of: default, s3legacy", layout) - } - - return l, nil -} diff --git a/internal/backend/layout/layout_default.go b/internal/backend/layout/layout_default.go index 9a8419f10a8..3f73a941d92 100644 --- a/internal/backend/layout/layout_default.go +++ b/internal/backend/layout/layout_default.go @@ -23,6 +23,13 @@ var defaultLayoutPaths = map[backend.FileType]string{ backend.KeyFile: "keys", } +func NewDefaultLayout(path string, join func(...string) string) *DefaultLayout { + return &DefaultLayout{ + Path: path, + Join: join, + } +} + func (l *DefaultLayout) String() string { return "" } diff --git a/internal/backend/layout/layout_s3legacy.go b/internal/backend/layout/layout_s3legacy.go deleted file mode 100644 index 8b90789d84f..00000000000 --- a/internal/backend/layout/layout_s3legacy.go +++ /dev/null @@ -1,79 +0,0 @@ -package layout - -import ( - "github.com/restic/restic/internal/backend" -) - -// S3LegacyLayout implements the old layout used for s3 cloud storage backends, as -// described in the Design document. -type S3LegacyLayout struct { - URL string - Path string - Join func(...string) string -} - -var s3LayoutPaths = map[backend.FileType]string{ - backend.PackFile: "data", - backend.SnapshotFile: "snapshot", - backend.IndexFile: "index", - backend.LockFile: "lock", - backend.KeyFile: "key", -} - -func (l *S3LegacyLayout) String() string { - return "" -} - -// Name returns the name for this layout. -func (l *S3LegacyLayout) Name() string { - return "s3legacy" -} - -// join calls Join with the first empty elements removed. -func (l *S3LegacyLayout) join(url string, items ...string) string { - for len(items) > 0 && items[0] == "" { - items = items[1:] - } - - path := l.Join(items...) - if path == "" || path[0] != '/' { - if url != "" && url[len(url)-1] != '/' { - url += "/" - } - } - - return url + path -} - -// Dirname returns the directory path for a given file type and name. -func (l *S3LegacyLayout) Dirname(h backend.Handle) string { - if h.Type == backend.ConfigFile { - return l.URL + l.Join(l.Path, "/") - } - - return l.join(l.URL, l.Path, s3LayoutPaths[h.Type]) + "/" -} - -// Filename returns a path to a file, including its name. -func (l *S3LegacyLayout) Filename(h backend.Handle) string { - name := h.Name - - if h.Type == backend.ConfigFile { - name = "config" - } - - return l.join(l.URL, l.Path, s3LayoutPaths[h.Type], name) -} - -// Paths returns all directory names -func (l *S3LegacyLayout) Paths() (dirs []string) { - for _, p := range s3LayoutPaths { - dirs = append(dirs, l.Join(l.Path, p)) - } - return dirs -} - -// Basedir returns the base dir name for type t. -func (l *S3LegacyLayout) Basedir(t backend.FileType) (dirname string, subdirs bool) { - return l.Join(l.Path, s3LayoutPaths[t]), false -} diff --git a/internal/backend/layout/layout_test.go b/internal/backend/layout/layout_test.go index 55a0749c9e0..de5ae7d699c 100644 --- a/internal/backend/layout/layout_test.go +++ b/internal/backend/layout/layout_test.go @@ -1,7 +1,6 @@ package layout import ( - "context" "fmt" "path" "path/filepath" @@ -10,7 +9,6 @@ import ( "testing" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) @@ -232,42 +230,6 @@ func TestRESTLayoutURLs(t *testing.T) { "https://hostname.foo:1234/prefix/repo/config", "https://hostname.foo:1234/prefix/repo/", }, - { - &S3LegacyLayout{URL: "https://hostname.foo", Path: "/", Join: path.Join}, - backend.Handle{Type: backend.PackFile, Name: "foobar"}, - "https://hostname.foo/data/foobar", - "https://hostname.foo/data/", - }, - { - &S3LegacyLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "", Join: path.Join}, - backend.Handle{Type: backend.LockFile, Name: "foobar"}, - "https://hostname.foo:1234/prefix/repo/lock/foobar", - "https://hostname.foo:1234/prefix/repo/lock/", - }, - { - &S3LegacyLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join}, - backend.Handle{Type: backend.ConfigFile, Name: "foobar"}, - "https://hostname.foo:1234/prefix/repo/config", - "https://hostname.foo:1234/prefix/repo/", - }, - { - &S3LegacyLayout{URL: "", Path: "", Join: path.Join}, - backend.Handle{Type: backend.PackFile, Name: "foobar"}, - "data/foobar", - "data/", - }, - { - &S3LegacyLayout{URL: "", Path: "", Join: path.Join}, - backend.Handle{Type: backend.LockFile, Name: "foobar"}, - "lock/foobar", - "lock/", - }, - { - &S3LegacyLayout{URL: "", Path: "/", Join: path.Join}, - backend.Handle{Type: backend.ConfigFile, Name: "foobar"}, - "/config", - "/", - }, } for _, test := range tests { @@ -284,165 +246,3 @@ func TestRESTLayoutURLs(t *testing.T) { }) } } - -func TestS3LegacyLayout(t *testing.T) { - path := rtest.TempDir(t) - - var tests = []struct { - backend.Handle - filename string - }{ - { - backend.Handle{Type: backend.PackFile, Name: "0123456"}, - filepath.Join(path, "data", "0123456"), - }, - { - backend.Handle{Type: backend.ConfigFile, Name: "CFG"}, - filepath.Join(path, "config"), - }, - { - backend.Handle{Type: backend.SnapshotFile, Name: "123456"}, - filepath.Join(path, "snapshot", "123456"), - }, - { - backend.Handle{Type: backend.IndexFile, Name: "123456"}, - filepath.Join(path, "index", "123456"), - }, - { - backend.Handle{Type: backend.LockFile, Name: "123456"}, - filepath.Join(path, "lock", "123456"), - }, - { - backend.Handle{Type: backend.KeyFile, Name: "123456"}, - filepath.Join(path, "key", "123456"), - }, - } - - l := &S3LegacyLayout{ - Path: path, - Join: filepath.Join, - } - - t.Run("Paths", func(t *testing.T) { - dirs := l.Paths() - - want := []string{ - filepath.Join(path, "data"), - filepath.Join(path, "snapshot"), - filepath.Join(path, "index"), - filepath.Join(path, "lock"), - filepath.Join(path, "key"), - } - - sort.Strings(want) - sort.Strings(dirs) - - if !reflect.DeepEqual(dirs, want) { - t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs) - } - }) - - for _, test := range tests { - t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) { - filename := l.Filename(test.Handle) - if filename != test.filename { - t.Fatalf("wrong filename, want %v, got %v", test.filename, filename) - } - }) - } -} - -func TestDetectLayout(t *testing.T) { - defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() - path := rtest.TempDir(t) - - var tests = []struct { - filename string - want string - }{ - {"repo-layout-default.tar.gz", "*layout.DefaultLayout"}, - {"repo-layout-s3legacy.tar.gz", "*layout.S3LegacyLayout"}, - } - - var fs = &LocalFilesystem{} - for _, test := range tests { - for _, fs := range []Filesystem{fs, nil} { - t.Run(fmt.Sprintf("%v/fs-%T", test.filename, fs), func(t *testing.T) { - rtest.SetupTarTestFixture(t, path, filepath.Join("../testdata", test.filename)) - - layout, err := DetectLayout(context.TODO(), fs, filepath.Join(path, "repo")) - if err != nil { - t.Fatal(err) - } - - if layout == nil { - t.Fatal("wanted some layout, but detect returned nil") - } - - layoutName := fmt.Sprintf("%T", layout) - if layoutName != test.want { - t.Fatalf("want layout %v, got %v", test.want, layoutName) - } - - rtest.RemoveAll(t, filepath.Join(path, "repo")) - }) - } - } -} - -func TestParseLayout(t *testing.T) { - defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() - path := rtest.TempDir(t) - - var tests = []struct { - layoutName string - defaultLayoutName string - want string - }{ - {"default", "", "*layout.DefaultLayout"}, - {"s3legacy", "", "*layout.S3LegacyLayout"}, - {"", "", "*layout.DefaultLayout"}, - } - - rtest.SetupTarTestFixture(t, path, filepath.Join("..", "testdata", "repo-layout-default.tar.gz")) - - for _, test := range tests { - t.Run(test.layoutName, func(t *testing.T) { - layout, err := ParseLayout(context.TODO(), &LocalFilesystem{}, test.layoutName, test.defaultLayoutName, filepath.Join(path, "repo")) - if err != nil { - t.Fatal(err) - } - - if layout == nil { - t.Fatal("wanted some layout, but detect returned nil") - } - - // test that the functions work (and don't panic) - _ = layout.Dirname(backend.Handle{Type: backend.PackFile}) - _ = layout.Filename(backend.Handle{Type: backend.PackFile, Name: "1234"}) - _ = layout.Paths() - - layoutName := fmt.Sprintf("%T", layout) - if layoutName != test.want { - t.Fatalf("want layout %v, got %v", test.want, layoutName) - } - }) - } -} - -func TestParseLayoutInvalid(t *testing.T) { - path := rtest.TempDir(t) - - var invalidNames = []string{ - "foo", "bar", "local", - } - - for _, name := range invalidNames { - t.Run(name, func(t *testing.T) { - layout, err := ParseLayout(context.TODO(), nil, name, "", path) - if err == nil { - t.Fatalf("expected error not found for layout name %v, layout is %v", name, layout) - } - }) - } -} diff --git a/internal/backend/local/config.go b/internal/backend/local/config.go index e08f05550bb..782f132d06d 100644 --- a/internal/backend/local/config.go +++ b/internal/backend/local/config.go @@ -9,8 +9,7 @@ import ( // Config holds all information needed to open a local repository. type Config struct { - Path string - Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect) (deprecated)"` + Path string Connections uint `option:"connections" help:"set a limit for the number of concurrent operations (default: 2)"` } diff --git a/internal/backend/local/layout_test.go b/internal/backend/local/layout_test.go index 00c91376af8..cac89e55261 100644 --- a/internal/backend/local/layout_test.go +++ b/internal/backend/local/layout_test.go @@ -6,30 +6,22 @@ import ( "testing" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) func TestLayout(t *testing.T) { - defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { filename string - layout string failureExpected bool packfiles map[string]bool }{ - {"repo-layout-default.tar.gz", "", false, map[string]bool{ + {"repo-layout-default.tar.gz", false, map[string]bool{ "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, }}, - {"repo-layout-s3legacy.tar.gz", "", false, map[string]bool{ - "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, - "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, - "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, - }}, } for _, test := range tests { @@ -39,7 +31,6 @@ func TestLayout(t *testing.T) { repo := filepath.Join(path, "repo") be, err := Open(context.TODO(), Config{ Path: repo, - Layout: test.layout, Connections: 2, }) if err != nil { diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go index f041d608a18..ff7e3d35d45 100644 --- a/internal/backend/local/local.go +++ b/internal/backend/local/local.go @@ -37,13 +37,8 @@ func NewFactory() location.Factory { return location.NewLimitedBackendFactory("local", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open)) } -const defaultLayout = "default" - func open(ctx context.Context, cfg Config) (*Local, error) { - l, err := layout.ParseLayout(ctx, &layout.LocalFilesystem{}, cfg.Layout, defaultLayout, cfg.Path) - if err != nil { - return nil, err - } + l := layout.NewDefaultLayout(cfg.Path, filepath.Join) fi, err := fs.Stat(l.Filename(backend.Handle{Type: backend.ConfigFile})) m := util.DeriveModesFromFileInfo(fi, err) @@ -58,14 +53,14 @@ func open(ctx context.Context, cfg Config) (*Local, error) { // Open opens the local backend as specified by config. func Open(ctx context.Context, cfg Config) (*Local, error) { - debug.Log("open local backend at %v (layout %q)", cfg.Path, cfg.Layout) + debug.Log("open local backend at %v", cfg.Path) return open(ctx, cfg) } // Create creates all the necessary files and directories for a new local // backend at dir. Afterwards a new config blob should be created. func Create(ctx context.Context, cfg Config) (*Local, error) { - debug.Log("create local backend at %v (layout %q)", cfg.Path, cfg.Layout) + debug.Log("create local backend at %v", cfg.Path) be, err := open(ctx, cfg) if err != nil { diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index 019f8471b53..5ef9528919f 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -37,8 +37,6 @@ func NewFactory() location.Factory { return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open) } -const defaultLayout = "default" - func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) { debug.Log("open, config %#v", cfg) @@ -83,15 +81,9 @@ func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, erro be := &Backend{ client: client, cfg: cfg, + Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), } - l, err := layout.ParseLayout(ctx, be, cfg.Layout, defaultLayout, cfg.Prefix) - if err != nil { - return nil, err - } - - be.Layout = l - return be, nil } diff --git a/internal/backend/sftp/config.go b/internal/backend/sftp/config.go index aa8ac7bff5e..daefbf441d9 100644 --- a/internal/backend/sftp/config.go +++ b/internal/backend/sftp/config.go @@ -13,7 +13,6 @@ import ( type Config struct { User, Host, Port, Path string - Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect) (deprecated)"` Command string `option:"command" help:"specify command to create sftp connection"` Args string `option:"args" help:"specify arguments for ssh"` diff --git a/internal/backend/sftp/layout_test.go b/internal/backend/sftp/layout_test.go index 8bb7eac0199..9e143d4fdd2 100644 --- a/internal/backend/sftp/layout_test.go +++ b/internal/backend/sftp/layout_test.go @@ -8,7 +8,6 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/sftp" - "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) @@ -17,25 +16,18 @@ func TestLayout(t *testing.T) { t.Skip("sftp server binary not available") } - defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { filename string - layout string failureExpected bool packfiles map[string]bool }{ - {"repo-layout-default.tar.gz", "", false, map[string]bool{ + {"repo-layout-default.tar.gz", false, map[string]bool{ "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, }}, - {"repo-layout-s3legacy.tar.gz", "", false, map[string]bool{ - "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, - "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, - "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, - }}, } for _, test := range tests { @@ -46,7 +38,6 @@ func TestLayout(t *testing.T) { be, err := sftp.Open(context.TODO(), sftp.Config{ Command: fmt.Sprintf("%q -e", sftpServer), Path: repo, - Layout: test.layout, Connections: 5, }) if err != nil { diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index d766591b76f..8ac6781e9c7 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -121,7 +121,13 @@ func startClient(cfg Config) (*SFTP, error) { } _, posixRename := client.HasExtension("posix-rename@openssh.com") - return &SFTP{c: client, cmd: cmd, result: ch, posixRename: posixRename}, nil + return &SFTP{ + c: client, + cmd: cmd, + result: ch, + posixRename: posixRename, + Layout: layout.NewDefaultLayout(cfg.Path, path.Join), + }, nil } // clientError returns an error if the client has exited. Otherwise, nil is @@ -152,14 +158,6 @@ func Open(ctx context.Context, cfg Config) (*SFTP, error) { } func open(ctx context.Context, sftp *SFTP, cfg Config) (*SFTP, error) { - var err error - sftp.Layout, err = layout.ParseLayout(ctx, sftp, cfg.Layout, defaultLayout, cfg.Path) - if err != nil { - return nil, err - } - - debug.Log("layout: %v\n", sftp.Layout) - fi, err := sftp.c.Stat(sftp.Layout.Filename(backend.Handle{Type: backend.ConfigFile})) m := util.DeriveModesFromFileInfo(fi, err) debug.Log("using (%03O file, %03O dir) permissions", m.File, m.Dir) @@ -195,11 +193,6 @@ func (r *SFTP) mkdirAllDataSubdirs(ctx context.Context, nconn uint) error { return g.Wait() } -// Join combines path components with slashes (according to the sftp spec). -func (r *SFTP) Join(p ...string) string { - return path.Join(p...) -} - // ReadDir returns the entries for a directory. func (r *SFTP) ReadDir(_ context.Context, dir string) ([]os.FileInfo, error) { fi, err := r.c.ReadDir(dir) @@ -266,11 +259,6 @@ func Create(ctx context.Context, cfg Config) (*SFTP, error) { return nil, err } - sftp.Layout, err = layout.ParseLayout(ctx, sftp, cfg.Layout, defaultLayout, cfg.Path) - if err != nil { - return nil, err - } - sftp.Modes = util.DefaultModes // test if config file already exists @@ -582,7 +570,7 @@ func (r *SFTP) deleteRecursive(ctx context.Context, name string) error { return ctx.Err() } - itemName := r.Join(name, fi.Name()) + itemName := path.Join(name, fi.Name()) if fi.IsDir() { err := r.deleteRecursive(ctx, itemName) if err != nil { diff --git a/internal/backend/testdata/repo-layout-s3legacy.tar.gz b/internal/backend/testdata/repo-layout-s3legacy.tar.gz deleted file mode 100644 index 2b7d852cc9ab4cf8da442461bf9b8a0ded688855..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38096 zcmV(!K;^$5iwFSYuhv)q1MFINR1{a=Mg$RMDT)+Z5Ht}1nLfK%B1lJ;-c^|0ommzH zmb!paL@a=ahN6gwf`|x$s3=vc2#8<@sWt>#UR3M~-|Ck)Z<3S5llMf-IWNy2d(YgR zo!{(z?(-}6UO$rWqb>4f0SJKuz{eKE(N8S^BFLxqCy0PBfT1Xa<1i`$fB*twA}Z(? zey>jm^ba8XR8&ML4)B@#NI(Cvc%iX{-$(xay$Qa*yHCIunh1Y|f9zvF2tg<^&i`28 z*Zgx}0)g2K2!lyB0|hZUiZV!&z#tYt2N(o|Q!Ef=pd1*4=o}meaU6vx5 z{CyiA2}b^ee;9**&A*U-2oO>Eb3*M;-uH4=+|f8uc_AfM zY~qtE^$nHEs%h-l{zBs(qJFP-2Q6E3ZL&7W8<{@)Wl>YSu~@5yixcOF=K3U4(PCh- z*tTA2xJG^dz%C1mw{d$D9~SIrpVA$=-zLIgr~j3wwg#7aYErJZ1wYNwlWHyQS}EC9 z79LfzM(xqgX`ONNHL+Ys;ilSSrh;OeYm%E&eS+dOv9;M9$=}>v%vjnmuW^#>aCXM% z?sKshWy1RReWxDBw=L$+T;O^ibR!Kr2OIb8r?HO*dMr4iE-qu=+tzgAQJHyztoe$3 zxw%HAs?JNjd47lOoqgBZeeEItN8#7{XRuKc!srOVVlx;72gOK~;xG_|pg05tz$n0m zL5hx(5QGpyufQMyI)tzR5Z z3+h{#T;FGNChWW*B2bchE2UPjLaqAv)@xHvgEGXW(QZO%MBdvv=A9Eaq;<|J$vNF= zMm@_#15PYRB;)e9nhSerJG8uR9?jA@5)m<_=R)KPbW}p=Oo|xgJzd{OOZ~AV@AciM zx6b79pT97}9%y}Q8DEx|Z!_@LiMvuGC#lf7u#i5bWpCER#VoD{LUrkj zbGwc~M-&YvZfvkq+_u&wxj%mP6=`IBq-yD?pK8k29`Jt{eyx8N1wt4~#|ac7F@g;P zEP%lxae_f-p#%(|bO!P<8$dZ6gn_XzoB^^CKBY}7J3v0{%~KB?-{J-fZKeunX*jQQ@_<}%XdO3H)0 z5u>o`Q!i=H?`AUj1iviL;0Mzx?MAj!PWTI%6?Yabp=G$PUuZh7lA-~0&#>xkvJQ{*d#;> zlL2A<&tY>2Ivc?OHXHd98=s5+7>@p$e_{Lw$Ls$w!FpA;A4vp|9911vNJx(s0Bb?0 zElAfv0UeaF7+_!k2ngqa1_0^+fU2hI$AR!Byhz~>gkvV`aQE>K_^HFohtC&2N>kO7 zLkTba*?uAX0O2K5RR@IVnyP-nR#TP#>C>NB7a+XHh~;Q)%&_p~Q=~7`oF5WMH(+^M zc^R+N6Cf-T3NiH!GP2bO@YN-Kw6%F5bSqy0q;C)Dd4Ma~225YRJxcoH0xHB2F}5)Y zwDH2$8A9INWy^$CIYa;d*Cu${S6h@m^r-D*{^o)O)F?cnTeWhX#29C&t)LGIQTy1G0k{SX0*au7Pg5EBQ$ z(2!$pyVA$s!`1^g!QhpYzlIgt-wrpmbhj}M2)4KMCADph`4$FFJZ6ZMhRG_@C)Cl| z(ShNk3j_LgnAgXD81^^t|4ILBA8(4s{TrV4`}myxAsGIx|3~q0{f`MwR#n`nQC4Q9 zl`&Uc;`o$CW~Rq6-Iu*7Eq8N2#`V(5_mK?-Dj$jr{ZQV`?w8>9H%z?m@4fN;y?cTz zU&q(>ZH=m~8m;?vi}ZRDi(IQ!G9sQ`+ok5^%GqPpaZB%-WMy!%nCbUIMkI8>@;k%sTS(reVW%JM)72(mcha~Gtgk&Z{}cG6 z{{S!kKPCVBDgTGS@%(>G@QHt(H-`-Vax>t+;2(v?_y5KOzu+ImP!1i&FcuDR5IV*{ z2?PN!7$HF%CLoy2Az_fhSOkvY!dd`eQ79oS3@`)&K@Nlf^gp!mx%iJF*l+g#Kn%ym zT{8y zoZ8s*CCmD1WryEBue)<1WAtp6tyDts?)$q7o0k#tS{+u>YrQOMnaS&|mSS8H_oq3gT9~!#YMRE)PU^`_aJn$O&yska zXnHW{@e9Y$NN3;8>Y=8J8hWipdq=mV$k_(Ex14t=npW(Ves%JRH&oCe9lxvQE4pm< zht?);X+FSl9@RQ6k!q*aePh04sTjAZEuq9NmDO9fqvncB#Tydu9OLX+ zyQj4itSNgh5b08nKKYHW$-yDWBv2+vS$<7ONnTS!Z2J5m>Ic>QT$?ixvd<~oKJwyO zapq)B-q&XNkMRrrlXMQnp(uy};gC=b7z1Q5SRgEP32YJo7((3>6bHpPFdapOos+hjfl$=)B%`Z%2xxT+?8VxL(StxjS^;mrQ75RO)2cxC- zKIzl^ASrLOApf~^UXyO=L;aC=4+WQ5kCx;uU z;l%As;G2uvRUW8Rwm+KYCLY~vaM`0R{hREq%TDZ!*e@+>P!W{hRq?N`;+&vpT@xj% zAXXGj6WnE8bm8ov?d+=mh7k+x@e^Om}XaMrvjF+6?mo|w{wlJBMEPPGhG zWr{VpD8(kT7K-h9a&+3FN9EQ{CJ(TuK~2>a_DN!!^&;~nA_Fp}$n3D_8h$&ihuLXl zvi>pneoLnHD43^ERuwk!o?6_&gL7{$t=28&Hr}wpi_W_&@od+c|77#M^*fOr*Q+lr z*sjcT51f&$RH3;}_E}L$sibp%nO58Y8m7Cvqvs5?>EX-HYXv3y&tG5dRg>Y|bhCcx z=8XeOqjXG)GhXBs-f^*UXzB$}D7`5D#gP{Ys*NYQ>%(s;#bg#=7+LYX=i>Pqch{Qj zmbl(=E?#kT@|mKT7PD2Fb>X>v_tf<#NSu;5XTSg5#r4gs4bSGKe4n8tg>J~_#b?Zn zI&{gSos;fk`1b5mjp{vZ5k}|p9C{SsMfG#NU(|)2t7^L{9#Jz)l`dgAjPN@RtDBXg zYQ6=JD%~~ z5x2LBwdi$eF?pOwVZK?k* zKKcKT3uXV1{U02}KJEWN?jnC!(I1c~j{11c!@ObY@T;f2Pcbt1# zphZW;L&1|Q%UEom`2N7?rE5xWaEn#f;%>Qtik=)l=d$nU?-!s0gJtH@3T+=~FEcC( zX$ljjUrOwb?ewU)@g5sKdbMbW!#(}jjk1QY9d8N2* zv3stTyIEdU(%EI|j>+6vb)v7@9B-S?x|}vz92|K(WI=c8wOw;ii%p7^Q&F-wO;w|NN6 z9Mo>^*%+EEC3$qBU5AH3OS5K)qpM85QB)gwz?qe)eZhz6@o)bBqklF)XK*kGr_(tE zh){GE&Sp_Ue@v&t04n?vM;HJ`0W2Kmz!aNA2|68P04N|V8vr&y{=wPbkN8~vFVy>Q z&i{ZoG+zIY3A~RFF45`2l1ze~8-j)^YeIxXGKQ zm3`9Z|k{j+qP}nwr$(CZQHhO+qTiSJ!#UU?NLvfZ~nk0o5_`#WH*-z1AE!d z^(l5G+LJ&_mFm{zBXwNQ%iW`(Y*~824WkEWWzd$$WYXWC62vuz3Rs-G5_R_ib%!;( z2k!2hKkzEb6yBFng=B`D3oE=2wWwVrU~$G+k(jwav?PapR(%|XbVuM1@}0qGT}^Wf zfdC!(c2mg~wR<3mK*1R0Mr9`R7slyqT}bVHw8lb}nQuH@flKEd8nT~_PRKUz6I_2{ zKZo-QaZPn4rb(*WE!9UU#gq5iAX&L3BIKKEI1t)g)v=eM6_$d74c;@?W}y)2_Z3vb zQE4FJ#)dP9YECma(_(1Z^8ap_|I+_g`k(waFkoh7HsLTeX5nBrHDP9?XJqDJG%;af zU^6u}U}I!AVP`ZoU}rL5Fg9T@U}fOoU^QVeG-fnrWo2PtWo9+{-_Q6z@}J?q=Rd~( z_5b64-2eO+|Nr01Rs^x{nYI>;bu0UzE<6tq9N@t+Rz%G) zp`&u@cXa=Jo@pm1FXjCXb&Y8;+gs-4IyJ3yGD6A}nnwUKm4YCN0G6rmb!RK)3-)*P z2A$~l0Ts@-{oAOsBm2$Q1?w^fcXziWQN9g02I4z|kkklrjRDgot|HQR)G;+Vi=z!8 zg^EJAe*OKb5+>C6T7wD^ApDpuxK8VLzm%f134VX6=7Sy(2wC*=3#iyHX=at`dULT9 zGY>LOLWvfomA4}hMH{I=YZiCclSz8tg~raPWa{%*t(Bih7yuSqjS{0kcJj;=e~m+p z4KhovU!!*uZxhXEEnTw)d<@E~?avh^v;|bUnG;(V#RgBtajnI^i2;c?2>D5dK6UIm z+}Gw*2|`=*6-S_j0H`*Mh5lhk7_V*p9v+H0>bW>IzsI&C?hKJZPa+zBQ zYG6R_A|d7hmpTXZ5ztNgcX*2WtM*3hATEbcR=Iv{`s~g%^Gpb=^vHT?il5>K#rhvM z1xpCzs8Wj357xixZLbc2)!A*;de#IPe<~h=-@7nUMP8HjApoFv3PweJPV*WAafsD* z3#@Q?BIX*n;%TO_8w_h|aMBdc9x_T>J_ zU*0L(VI$zYZ-*lHZLOi(ohDf|c+P>~**j;dAWK>2>;wu9}NFfXpwuxvO_9`8iNccLvi zjqwF){Dfr+1zk>`5*;lCZI>u8ImaYe>%`dtP-P_x>f2FRn{TZa{l3_;jZydssNWS| z=mJy2Ef2PF(jg`*()2gkEj8b3OWhLtxeiE4f6db?OyEj0v(F=xaw)h*D#-p_s4N;u z4nEYw3iQvJv2%hMtYgTB;h&EJR}Q6V`5ud~K>O8w)4h|I4PoS=?1%xrx4>dJ`2Im1 z8$_6eCef}%@A#oxcnC-3bb+U>j4`r&!y+z@$2LfsE)(j@y27M`Hy(+YZz{_bF6geV zAfX7c2Z-B3q0i7Yg-*68G03k;*noz1`?m8XkV(G|cXnRa9oP5JBQ`7uO@ro>X7E^y=E2wtGIuC@W zvH2el=rt6*vzaug$zK!oUZ8+|N*XE25g(7~8FfK5SO~btBu3Wx#dYhl_oo3}Z|

yhwo;cMz${J@-o@KYqq( z(C-Hm2E_D|_zVY}uMfbgV^%6iXH{djE~#vOE!PR>{!|t|0PJlxl+C#N$O-vbOxPU* zT7LDuGQwjXM0Q=zSb=^4CT)0%EDZ?_zznxY#S&g_S)@5Sk)$Pm*hAlnY;5@iBfX!@ z&SwsMU&SlFwzW#DbP)FH-iF3YRBET}cM;d?Qr-H}nr7BdHxW!RJolms2{LFNkl3ah ze#~)7d!@Q6dUMbwLm0%YXA_`tMM9&eW@$40rc!;ZYs&X->* z|2#h}gr6&EAlw!~ytd2l*5v>sa{VG#aOgU>sFgjI@kpmwa=}`A{T(eE6S$ONob<0p zY)7n~4s+GaI?qXe6#fn{4UwS8XW3@8N3&`nw+Z3tY$kSthN`CQKiCbrnuV7bSM=HS zct4-|r~>V?f*4&)bs}M}b9cq#rDi4y=p!q&CJ%q7&0y09sF_&}V8$;Qn#Mj>L=)g4 zgCKAiw;wgGN2TL+X*kYysSHIX;d!g;uN}5XIp;bg0sqqr0!x91#1TNYeAOhEleEk* zO=Fi{%2Eh@73QaQeqc-txbI~dwv<*F==7{s516&}G0K22a}mi)v=i8aN@J!DaLsua z&kZH}n*RPhpbaFG;l${7uWuQ7rXGk}7c)7A3>56Rnx<9+D&Am4*hg;v=BcB88ZY~x z%nJKHi0>{>B(`_IZJn^4^`w5od`#ydFKTLnO8-20w9Nglc-Xf{3(rjLvn+8ag`z?B zaH`G(CJ$UF*U(A}Vl%n6HP9LJW0-XNGF80u1N(?N)RY)Vy#9Hv#odU~=|dOvkF&Rl0I8997#r6c{Eg5Sg_s^MXEL_8IKX9JEa zS2$SMaylg&-66QpU3m!-=Vhn8>?V3+!QWSb{)M8GZ6l?pr1EgZU@tr(_>iFbdSI$l z_`J$-x%nDX`?Pvi-&nK}uCTQIE(A|E1Imp-Dmjgftj6Io>FWZXs=$Sp^@ZB06Y38f zW0?*igfOP+KxQ#%u=;MN1)MuR|C83{n2HRgPc`INw5s}tY66+&lkvO#gXoTXOq6c%IUmwuI=NRWFFjO4KESxXiAs7_}IEn7}nZEFDnmOnn{r>hYSbJMWZOPX%4Mt4MX-lx*_|Zm_ zRmZCAfL90r^F4c-4g*ueC5qY#_TJz}RVaM@=Oij%rc>IO)M! zCrmo|kv$_8Q{}3(C&<5S2zhWeN#zwp+bEwZ?yT7)!}DR}YH^h3CSn;e=?K=;=O)&w ziS4%B9DPUP*6D~cD4b8C?F8aPFi*bwvA_0@ONJg3_Ysnf>9D`aYr#v8um6Zt%Q~V} z7reKRhGlR1`lzhGx#4g%$-m36hpZbeXRqJv_xVb6(3Nlb0t9_3*28>YVlBNm-G*689yP?{fltxglOuh{5lqCXMVbO@jI5561>FAIX}Ifp zZbL2v$MC~Ul~2*;O8HJ1*&!I}M5CA^APH`;1=fQH3xq0e)Wf;%YjPAw^_D(m^cjyA z5hN>1Ze_H65V?mcYarY5woZ47$7{;+vG=UJG8_C(c)aoYIH7_=?VdYxIanMBJ(!Sc zpRCOJZsYKeOXtnbZL(Tgkoq}_a%7XPWpWfn0|T zi(7K@a@PaezB}3vBvL96U{iu-h~peU#7K4W#9y3ub+qyN;jNt#4AWBpoF?WgAI>_l zc}fSWewQE(oTx0u_^dKZ8yf>KqMiWz2?+(c+oUfH$?2i%O)Sf@04|tJ$EBsc0+rSx zI9XfD6PxnnS}s;!-onILE?}-Sb~$2Td=Dlv>3Syg0BHUjyW4XSKAYRbmkmX=Ma+TYxRHDo03?=GXf7VBXLQ#{@ne^S<3 z$sPGl>%UQ^NeBtk?5&H7S!+h|((2kYQY@IR1`kssKdBY&An!~Hz)2yM3s15`c;=W)|4*9$R-Lm*EOX!R>LQL4Pue3AuaFhQAc=mY=r z6V%ipo*vpF+9iVbX;;xtOSo#u&A>-fbz#6s#zz`IiF88H%86*?)Mmy}%$V6r2UEyX z|Mg_>)PrQx)t%NeSw215wA=POVnf9A(LtCblQYL)+dv%MOv96#2R1DojouIbRYR*8 zCO`II!TBm?+Z)9_l;~Ca+;zJbGmy`;-1FfJPkLN$(^;Ih z1+B4N=;b@HZ{cFJB=05jsRcs&IEH4G`#9Wo9op4!&;>wI}kizT{L8#Z>!8l1shW*x$n{f zO)hC8Nye8uB$tZFu7N%2PNSxwf;FDj%A~xiQTsG->1E%SIo3DNj8ln6|D5(3tFQ^x zIdQby<6)~Q#_omem$C1qMm0Zot26!qPO$NLQGxJ4R;fmX0INpJ4AX@5Rg(baYE1!OC)b6s1Bh#;ei?#AWl+;psn=o{dVJni|fYr5MH! z4x@qQ?mU~2-XQtfd(4PzH*uJbq}PCGjS%n;=ZlHB0uyf7j-+VA10y}GiC0rBD20_piCI+w?u_1o#j z(9C}Wswr0R@ol%fu-)yCWoZ(FK0&^iZF~;icSX0g`==+o{M)>*XlgxwjHfu!@{}T9 z+x1Yad&OyvwgS_tL?O8oxG%9<6t){1pfDRasyF82=_L)z5X?Fdo5h5(;dl*Lsf)~p zf9JIUKrXlZnKb2xB`K6Cpg3fc$|2kE;P%ZvD~+VTT`eK&m6}9H*Xq(-0YO)YZD7>O z9c5(Rg1of?M^?@G5s=4fMtDbHQ@>srGh5oR*$ z7YwK#h{lWx?^#Ru%MSVmmZNgJh}%w0`m=6fap8EasG^e1&;W$>x4bNk!+V10=_hy@7EP)GO(I)8?n=4Kr9HH(I9r(_aC=tkM_VNIc^(K z_r8TAJ>#AcY@#o&2Eo`g^@$b*&nOQ-pkwIEB$F3vnG#1@6P9R?M7I#c;jXi5Xs;u% z)I#X)*s{a=Ua|s%pMOxhW|qbS?k`D?39odAlwr2?tXW3G;oQ642h)pm_C)hdaF_n_ zBy`_=laJ@B03@d6gs zOUattsUOw;C30QyK(?H{k6?bS?E zy*8VsvyBL0DRKQcY}9ARNReuT$PrX0Q8aXWykhW-v`%fqj4xzI3UH5jO_5+ALzNVf z5u?0BA7`AC>WZnn7#O1-x~gAU$C|@6M?cqrv}sq zRw#Fc;fke@e}z-otiy_U+?1Z}<6&`3{h`suE8W(k_j^6}34u%rC%EeDlO3ZVar!3R zDl!EcJ3lyQC|OQPeX6O{S_gN` z>Y3!GDVg%}J&MO-)Wn|ZWf9TkK(SH7FsO7U) zsu#6HJ*hcsJeAf6=0Mv3n^~I+$RcZnZ~3@J)}w$uK+~2evwS&!)nYl9lKRMzdu}GL z`Kl_5u5H(e%lKQQz6kJB6Y|W;joo##35=_AAP&@uR_mD73~2MADi(za%W^N z`sNvIfe_r;7WI)IO<=tkc)1Vvi;oUo@@mZwR-kbK_BFvbUp4+=MUch`to^Itj*{9y z@*-$5>qpe-Ma$98^D4VW%j^|th#y*EmG(+Mn9AGLcPzipb;JeAzD5kW$%wA=y-~kkP0H;##kSQ0Rjyzu6bQ3JuEaWAOewvP2_X@;WfW{8g(PtZ3MOBm zG5I6ta0RGIo_M944`sj*l8h(`X2Pajvc@KO&0b^I%Z%FKE{$1W-GREVA`>FUL*ivu zv|;LWS#hyzS!P;P>+=dPx8iU5+vu3%EiDw!{1fCqVloquoj*b9nEr8kD-7JCphOX- z#^)H~@t@Ebd;xe~X9p@8oj}0cnnj8QrGVI^bRJ)WYjgkm{3|V)%{2E6g9{rhP;vCG zGPFS*G~8COoJBA0_z6cO)W_o2TB5}0;fc%O?Eni7u9%O3xsS$a(KxqswX zxO(aA3WNR1Y#UgBiS_L%Zh&scI(CwN?<@l@usT_h2e<8%e??L zQsl2(*?=}_ZU8`643`mR$KVNXbN{%y5kv_chYZM$%!HI+&Jc$m(|GHk%2;St*6pMwSQX6 zeq$N?k0g9$E~+C7(j}dvZ@3zH_q~u%b?u)gl07PB!Jv!Aj>SSqi5pwmTeQaS$5DOK zE4BL%o^Dyh(qq>(Ju?v#kE;+0!tgdz^=wD@b(Yw$f@E2jy1F6$yaTmzlOA=+$Yku5 zk&BZ)`N`>1XVO;Ud3bcToeF#+%Xw;n4(Im8JPNG+e!}B%2AuOnLb3>}XO!q5;W`f{ zz^Sgb>Js$zAPdEn{$}mr{Vt6vVitw;*=ckPOPmVk$RrLLToA2zFCzApj0|np4#x%& zG=_Ij0XEbkPLb~9gxvQKlJ0tcx6@3(!owKYb_T;vWs;(` zhsDn$!+T2IWpi4y5sp%T6wlqSTQWzn&lWEBv>r>5+D3RBTPA89vVdvmyrLp}K2|<# z6sW!^8t)EjH*?&dQSnAyd5DcfT{=ul%D`e=1Dki+;}f@aHhjNzylAa$5aZ(RNo*O>&Sjr;B^M4oj*-Ne9CpqA|?(x$v=@<$czW=$@aj#7%4T6B_E=Td`? zPclS(?QdiVD;;sVqD^%MGZ4M#D-;)!B@o6v>UE2N=iw;oU6(M6(*Uzt9!^}p6&c9^ zN3?*l?=IaEBk+`J)C+S408-ubGOF>lvd78~Dp8D>#R_tP>-O&O?1V~w&2hI~dU_^|i3U+V6>3WiIM z3zY4BN2&lLdPd}!EyoiIA|v#hp+r}Lvd5U5K5TcLnN$XkH90gM2VWnh3Edy&ASQj} z;Z*>X&pDHw4^Iou4tHf`05SxPsO8T+zcZ?X`;@80&{Z<#_ zv-AX&*>?Eh7WatC+Ia5loHWl%wU+olE%h~`w0L3MC8r`-#d5Z9v^D>(AltAS7s5E3 z-s=(0=`UL-wU6if|8U7|R^FpknL`6ayvFMY2F;;dZGKmWVwPJNoB9soFS-$W%*)re zgUQ)74>Y4wIOol`#A^#e>ZKv2nsAs&(26_OUZm}QF_?r?2^)A)R|q8It*x9uWUPM$ z?eP38W!A;#?aGVUynwY2cWJWaTMuUrUYNdLS&R$vCZ3EDwDlDCq_i1RxD`q1&S@t* z3Ojp;TBbRj-ETFEp^SeEwr5U$<5Y*LnfnD@v$X_9qJt>?$cuEK100woz5JxuA<};YDJAn^GP-Epq0*u}qfHAcERJ*9 zx2H@Vj|Qo2XyOmeF~VL~3CDKiWS)FE+9{7c5{yBG#F5V0p`|;Q6S;MR>?N3&6{4bS z9u!kw#=Kj8EjuX^S6y{mFW)$+C~Ez20LzR)VP-Z3w(Rh9wI5s2S;5trt-3F1VzDUO z6dcvk_n{5yF$=HbE5Vz*PKmQGfP=DROC z=nEV}FF{09b;|HO2qW&Vb42?M=J>yDK|ryb(nEqmqfK3D6y&0x1TWR(-X<$-l3A&5 z$~xlXOy8ThDt&bv#!`AQQEQE&KPKt9dQSY1LF^F({Sr-fiMT$P7^)f-K}0p0yoYf~ zEVK~9P!chV4+CEC1`p1oprnV&?jVzZkQ<*yByM^Sa{4b=>ZAO$<9u?6g!oaK&7SvX zJp|tE;7Utg)%buIGI98XDO~sV6(#vNZ{?o0*rROks{eS=!t%=l_fPlRYZe^f+|8cxQkS&{jbvYX!QsQM4gyo%@GAMjKP52Oki38U^^DUC} zLB4yRd8-PAmI1tShbzb;6;jdUOi51VS~0vjRz06LV#-VMK$j4mF>&TUEXLrLL=b^; z!@mBSkhAe{)}`#0LUsODvf1L+@1%K<${qEaF1aZppi9gtCW+Ul;JqNN$rgEZ-v1e{ zD45GhVB1ije+pshKZwxjiCt9*;d{x(>9weoCh%$~akNC;nPcg3Rni~@fHT9li@4hV zN`>Ln0q>(|F|YrIl}0%UN7Ie!h3?oN$Dn%YaN0sIh(2PUkZWiFDzcliFb^E@P+_Gg2j2hw#^W!P^VGEpRQUO%pup!$xZG&m z6^AwSUFI!t{OWSt3;zXh-pPEX;q(6-c2z>9tAp=PIJyUu=Cj6R2o5@^#5mKtKJ)x* zb;_u}1t>}uUYYNqU5u#vDt`lxDc~sWf|vIJ_l z;Q?jC#iZ@5oHP%})+lgn%C%P!u^e!Wjr*;xAKMJ&0R1f;&3mhE^}-TK@=UI7NOHWVWdvcrU=)2u2xn8v9zs z3G%D3q#hH&gLGd+cRYkn`owY>%rYLOp2MxLa80ivyb%oP+CT<9{9byHjZatm9MXqo zw;3A-PpWOF=dq87u`82alsHTtv59c+^tM4QCn)#?eF>$*HiDYvuKyi+(O|7x4- z%I<*&ITio@r{7lV?jFVZq}ho#nc-0%^qrOhm|Au!be=_7FaPQgx$jJjE z9snNk(GLaK9zYa;jN==1?GPIw!G<;Oy(EBcN;v24DU;!BWD_aoQ{P=~xag2h3VIMQ;y)eGOAd4oB*)8|=>xVpE zR9}c2){_$t*==2lWXH;K{4doG5Y%wpVbrc zhx6M8*$hJP$Q8oX75^$ep3Y9>O?E@lrit0jQxKlrC#QFR!;V~6TgtS<@<=OvVmlH2 z4O=ik=>js^wMYoX4@eeMKU;Vv=bx)AK_EUsPfPg0ELDhR^`Y-PVPc&;#7O@KK|=tB zk2aef$`P5WG8V{O;NKpAr$AS3UO(R$Bbb8 zNN$H>g;E=)*4UNhyfKj-yDMV?W}`Uibx+S$LvZvgXWP7?jrS%ba$HYPLiKplN(@dL ziB(jvs(YXgruxL${7ih;Bf->Bpm(M0AJW0e(6m&XE>CH-u#xn9op~lq@D~Qn)QS#4_F z)_2wDUJlW!E{18L{e<1EZgW>B`GaKa!ERo2=^ncbT?h4C5Vk#2z-b~4X4GMhOd8ij zI?rI4jM_QjBb4*@2k&UboJ_C+zv|}x@2n|>8oDhD6@Rfd&g#N-Dn`p-?@lWq;iE+K zqTLNLHm^aH99m3@`n#Sk$qT%t19g;1r#FHGF`E;ivA?mmaRbA4;b3mX7+0|>%`-q+&F36&sq=oBH5b}m zF__auO>`C3(4B?nobml~ahhs*9u(QeWMxTgD9zb($f<V~YF6jNq_@3)Zg90Q#uo`2)OFTDW6nz5|bwyaMVtbrKM z1^t{0K}=n>Ytukh!efDpxUgkkN)nTs`fCg>1B#Y-{=`zmt#R&8AA7ebw(=woQH^c4 zX@BacIT6Dp>DnZpLDIT|ysx*c>7f%NTW`EOGrv*dT%*9}nfij^Y=TUiFP1T8XfZB0 zg`pwvvpLP3^|F+)xCHI&;A_AeB|r3dhyQ56t3f3cDTO8UH4^={_WECr4=`|ba+9ui z>5EV}-xb3>lRCThh1#oEtg`Kwqbr}lWg;MDwb;C7Ez+}AJMhDtbC>4DA1b|E(Hzrk zG4z9h2V6f4`q=4sc_*nD0)X}3IabZZt-(w32Ug9q(qf%6D&p+dOEAe$-x`mbmsEt^ zr@WHda+Gcz*g38q6`T^TMa{VD8x{rCSw*g?+#ISVrUd^j4CML$>3ZBu1^H%y(Bzew zK`3vDpXA#TS2_^wyN2^vvZvwSLw*U!=Y599q@7)m5J4v#Q2%);xjOJg_}u#})6L26 z5+nYGw`nP^Rh9m;#ZPBpDx`e@&M8+kYw~D-Z8fr90NrADvF)2N7KQHBKa_LlVL#70 zPVK@(|2{gGqHy#~*WMmsE2g7!FQml+$%ocbb~!JF;=;Yl@0NuY2a|DPLdH$Pzl8Ft z!FcZA2Zq(XbENT$4uRE`aWIzZ;Uc73ze6!@4{?mgh2>85nxF%Qf_$bagkdj-p@wmG zHc93A0==l}!NcE`#C;ud?#TNWOW&}UdUy~|_YC&@0@7$?9U1(KeN4o@mKLp~HKSVqPW^A6Gw`k}* z84n=VxKT&0URg;QFP2`X9KRlPeuIc4tpo^eKELGRzM^U&Ns>EoaEt&oBNpw@FwW}i z>%Rb2Wt!?i0++aqH9OT0!ARGyjcR7e;rk)k%9p|K zYC*5V)0Ep|jI1J|-u?SpFnO-Xj~!ABES=X}^my82DLoT$pDVNoh8<{vkZ_Z*3ZpSl zR+8HJM0uNe^s2uHomb1~PxCOxYOU#DL)|h$%K7btsVUYF1bPd=m5A58l=wR8=(?C; zx#1p|erc$*BufbnAd8}YO*7dh_Xf=$hxrG$}?Y4C+W99 zl_*tQ=lM$>It|pJVwP-=(8t~xdRi==Kum>JTFL=eK#4eK9!(#xGOx|Gml9%wB@>i8 zRGJt=kj#6V0K@D3HSJH?^#HiU$uaNvj{2+%W4=VFK&ar% zeo8*YwFIqNEM72p2_y)mcGg$?VrOEYSS_9qy#Vf2wQLSnS?+-ATZZ3&IE-T}X#{>2 z3kJcl!ure=|HI=Js%VW=$`$z~1JsI~H-C`ssY6b6%5tpmo;Q^?00{a0x{B%(t7JV= zN304s?pO|fu+jH&!K|?`;G@IJs<{up?77*$7@4QW6JSX7zDr94seUi;ua z_l}(-H2V{Tp$A%EiezHt9m1d#>`ujkO%wJ9U?33;i|FsHX+vm;JM3#cn#NSWiS_)l zjCrC#zTFsiCI86gd118gL-kT9`uOTBD1fCThoB&^AqCTS_TK)w|J4*Ej}yq(sEb zVA_VtSU?e}5o&uyR!8=_jO|fsH=>*rQ~DPc4WH};jf<@lgI~t=9%j>*N2PAlhCx69 z9!-=EXX5gfA6Z=PX2c(j=lM|~DmN!O8K_-N?A{fs!Jdm2(d_P7b&><3lyIYou$$+J zpX~yVOTF9ui4dI0%%nle4$PO1+qQ-H%Xt>@dO7&QEkT?e~R#?pzGcfsA;((;{1#jv#uCg z;MnuQXfXu*Q#2V|AO$Xz;u%eNt`7_uuTAIF`-@wS5-7zh%FCy0p)D7!0=y)H%N80A ztN_B#%t%{C>_WhmKJ!$@PQ>Y~zH9%fri-IfI1t5wWgoehY;#&s3fxCDajS2PTYIBK=+yu>320uVv zk=r}cdrEquT`}-7&4#OIW7Y>mH#G>9MDd1G{Z>;(CszGFrl2LJf1_-EndsqSAkUtV z1I?C)8z8x#Rj^g_z6D`#OA)>n^&e9-P3b)6V9`V1_E~yx?zPSnmyd!7<}#j<@t7r) zn$Sn>m{tZe_X_iWLdM{8+|g^Ok1C%i(WLoIIYkQsIe-Y28>Vk%)vhX5JW{}{S<4>F zi@dztF3DmK<_u#}6H-S@#8u;Db#AxOlO|dtp8$n5i0`KKi&Y0x2#b+LE$#2F-OoX7||pJ-TrfRN^3EBO}?Ui@lcDcO#n|h+4Mv7nkEXg2_T!M z*;8K7H(v*vOihMimxZR0U*#HzMR!k!2LJ?lpd-Fd6<5l|bE4<^#||H7Z+1sHiBml1{b^wdKe%lo2KxVSUWxWBcLfCHdv0IMo) zL%x%|9o_L|QsF9H?j?)?s$S?ad+}INYb`B7cQ4c_daR(bq2=2M>l0LH-JM|-rkjCR zv5{1GzGgr{VcdRO(&pj#-nCxpj2+i1KZ7Q{j1Cdd)CXaFun=ZaPpqnG;eeQ3^&3IB zU_u#8Tq_i}=V~a_3AK5-fT?lxmt1YpIME%A+Bgx))Cw^qPxYm^hjca-=7UL=+=6~l z4e%qx1FcZN$GPy>CNZr==_pCI89+3SO?D3jx-=WsZ{D|xu{2F9Il=ZLLb(?gTkIR* z(K+)ZPJ~Z6?W?My>G^A44DY zJnN&N=?_S2tH7=>zKRmKmpQ^8jsH%>CvY9oX-jwppjVb|_vJs8YiJVH(Y}p*dLqbk z5hC4UP-?480bM75+rd&r{0teWrT;ww%GT!uznFi((egYAK)5;dOIOgT7e1IujuUD9 zaPe0!y#aS7_W;J)P3$&^TSm2TlRkHQXFu>8x>DZ>6d)3@dH85{M>KGK8cX=`S@Q;U zP^KL>Ac2|2Kzd9iRF^Upvy7H~a~O41k>_*HZXC@h7XqtMBD<F$FoazfvXS`v*!#s^Q76+XoBcdA$(6L?+o^cyJ;`MP>buBZB#@cIaa9M6 z&ts5C60M?P%X-1M>t?LL|6OqwKkp=>(1OmECU>kAy4xi-yoxdu)#nlAFV{GA6S&bm z`w58$o|jSWJ<*lwCM{?nWJEf%L`RrdNfy-TA`7Kbr52R8u++#1-!PpTg;Zo!=tU8@Nvvy!j9 zTt(|?;JHH%&pX2;uat|1x05MreD|!Rnp5M%gy+}(n|N$&v)UVqW3!Ye)_%N&4-GdS z^rQmCi;h8heF_-D?I+s`t@i>=7>~!k1JTbS9HQ9Y*Xee}ltW5D%R`DQs6Hosz#s1& z;?Qkxj4@5BWY--w$iCrNyA$whM+yId3oA#co^BdGJ5X8C5C2QpgFGi-_19eUI|Ltt zX}n4Hn?*alr`n<+bCYRM-I-|cMJ~`1JHtbb)kJ-$5F-*)N&ww5s zTu7rUHidgE_evq9Y2`Iqm9-evum<_SY@bB2Zcqd$MjW;!N^(P?brr&6|av*v{ z`?yd3MMvKnZ7~&-B(h5J-uzL1A&9h4v8rKzfBt?-cBM6ou$Ch?4SI*4DJW{QZ`{9x zoQ`BJzyj05xL9jU>#9boNNdRnRo>XkWVc$c>QC8EA4lTZ8K7U=-T?T~BUr%Xw*iHC;$w?d|Cs41glJ?$J|c;~z!xx`J zmNmqmFrb&;tZ)=VW3p%)EgiE){xrSVv0_-?O6jRYREqmY*SEAZtDNi3B`&-C)pT8P zdZTz9=vv0_-XMN3{$g`whdI+5*7W?bL?>wy9nrYmWxYA#S@dnR~4pE_pKel zMTltv6?E$~OqT&J07ztmYxuxcPkmvl@Gmpx!>(6nFa9J0q?4Vwi zeA6u}zN|6s>h%M9X%5#qpebsxjZ4#VzWMc*I2)*jSymh#M&Wh+G z6IwfRT9F%R2G)rBmp8q9Etq(S)Z~|l_!t~ZW|hv#mJSn}qt2Dx2S8VT9cfowU-{kPpmP&43(>0p6 zTE@p)s(RoUIiBx~XKQ*!3;8NoN8-^zpektsl4(q{z~oumMxoKatoWy5Af;rmqn^+C zxeeBV5|XirsCYv~Qym*BVsrK*7Xh-fC9Z^$TzgjhZ+-`o(czC=$82!Wqlg^8$(dsu zJ)~#J)JFKfQ$&Oii+`yyTe>4-zLm`E+{?|9STE*QZL7X!oWVH}5m$Ei5;+0wnysOLeaHB7tS*WCzy`{;kordF%CfzZMY zJBKJjILWTIC^^>*apW}A+pG-F<`p*p9CV(?y&l>0PRb-%#3=4Gf;DkC6diLgfs z)#L(i>N?yAS9@oUZyZE{XFasr{cc+#kP+VR*s+J1bw#m_%%S)_d$Zr*D6TIrF^#(g zq0;nFo{;l1aSuv*r&&$Bsgf!SnRcQf!cta3tvvH_ClqzET=5iN(Ts?OlD zUZ2c#J%pMbH8@#(jldbp{hyJZ`KB1;Tz8HG#A9`7x>}6lmx)VPTZZMsr2B`*WG6{r zv9-N8p$+z zhlJX%-0#)}0lw-+HRXHJGDyEg3$2y% z1Yu{IO-hbXqc7}EJ`zB;?dsLLO(4SxakALJ+V|S5!0C4ZG^lDcmTYAUm25n~-kioH z_sw$4paTD^Xcz$$6k)2ju>BS;s(@8;flibIGu}r0Dge()Q68ll5?9*z0aG4PeSOkf zLFNd{-z;{N7R0MgZkU1@u$Qq5m(ITMi#vls0t1{y>5e!vU3Wvb5`BNgrY(2MbZG+JZZ25lfkc?lt zs>;(WDkdy57ESX#h$cAJ4a$riZLjF-Nwn?db#G+yVa{#mpX3>95V$P^ajjLhpLQLy zC~sqIm|xy*AtzTS)Mw%52C9zWc{q~Frm%*PZ~<`@-Ck+2Hjz{Imccv|RxmFe3xh6O zMEX@R6$%IPMpP(5(LWpt#~@FK3QCb1F@c|AN$`nr9FdGn-Bguhs4lF5ue$DvA`bS zU7+hiKFrN?7lsZspJ@|w9l28@aUX=d`V#Of|8ZbBJVkwk!l8rk$@a$YLSpXeOGhUX z<0>utkX=r!s1kdt0Usbdd9H{pX}_P=c%@xr%|L%Gv5YC8tJ)UHNx=l{18iF# z+L^Es1r#brNEUp2@7Eb2@BdqqqxjH ze~P$saqeu>Q41RiD&!hrN*xdHHST5*mX)VMZTOB+)1WGpbXt>Ugg3JRV0+g2r7vf! zZH@a_5GcTy&uJn0jr%(Hu4FNcKoC7bp?EdsXlh3(Hd;n=(XhT0D{UfZ&yxErh11lR z9aS}b;`prVbX@xBeRp*p_rYf2$3M3RQ2tm{O1GIkpk}~@pGx4f8Nr*!WiY!uoG_iq zkAUVGc-CPzpfhAwXtqW<@c)=7)f!w7fhhKq$NrK+CeKBE1XR$Eul<80L2@LX(W!1? z441MqphmgkB)e9LWFDsiT~!_&%jFSnG6=bJS7|9t3d^**zVDpAQ6yH$sXF#=!Tk?i zZuf$pkm?J!65F>Knrwr{`}R>Nndua!pmOlmV<0fE?Dk+N{$xJIs9ZrbthWPt(E7gY z@)wZpBHu0EaSMqP+_A0Uro-2=T^-lY$Pi{|<1^WlsFk2#!<&Un&JGzGpfptGV54V=diiFXPI^BA~C(QkqdNklEFMPF*pGTAk~VFwEzfeZoCm z_Gg06ljb;977t}u&+{_CANz#r_HlD)fjME5HUM6O#9Jdo&9_zuX}{dcTCEPhg14YmIchK)MGz~X(->xB(%%J0XeMS5d z5@e1XgUvF*gQ3a1GX=oGDWt|~xCU&l>%yza4Z*4f3F@oNy^I1Ql%Vz$@MQUAG}uR zSY3mWsgVJbQr?}eh(J!&I^Xg%z7b4iuCw!DL-bZhG%zzWyerfXD}HmoPehZKV{yw# zBNxDztLDlvl?_$*wpn8yiQTn0q@NRNG=S1C1T@3XTr|%*S&|}zzd4|Q&Wzn*28*rt zMoDp7)tx>S?7e&QWbu^G=3fROC&W?Gg-qwk?TI`x_i|8a>Tx8f1XU)aKX#`DFv30) zC>QYNxF9Zd7zsnVnR#sHlxVa0JksnX-dcZh?I?t8FjiPU2TQlM>?K8;4``ybE%zN- z2NHB1=AC^tAmg^SgOA!ZC^Vw_uZWSCXF#$}E#&;kKTPQa zp9xi}Yk-{wI;s=>^Q@sH#V72&P3ehHUB1X;Pm40y@$W*AvdnDb(6!wlxX{V4#f1~I z@~H)52{fo%We3@$YuReT&hn{f`|B!K?w{br$J6g2-RR*tosI8|&%)y_>*jntN!-l= zE&m^8l-kIxkDKTJ02g=IH}X=2hhi!4pR$v(Ey@eD_M%{i9i5v6S%1X)4*=D*8ERsq z?lzk-dn7fH>Jo1>Zo}B@UO9D6`(kU9R!JD|<2Fj8;X=1bbLH2wm&6!I^l=u0k4o<# z?87Phrs0cxNMu3%4V)vO2Z}hb|W4!i6abfg9Zc= z{%SB1YsrpjZ`qHaRHA-szX6+c1#%Lk)e=LAq|+arNLlS*?aQz8$4_s z{lIf7)rzy>hutMCnQWyQG&$T30dOD)jm%dRRf5XOQk{8B|E*iV+0NeoDamnIs6NoB+KQwyLQJK9J=F3E7U^gqajI18`tkhoyNXCg^A zW@rkJway;aUt{EzO=?e0TaN9rU&Z99%mKg^-!Q*dg@20i z@IaV~QpPrd^jw`_QM|r)+gnV+t2kK-nNWP3Dp@0qf4sB138f~$=iZ`wnK074wC}5v za59-uX%UNOUBu;Y&L6n zbOG|{S;Y?uEb;3;PH!g_SLjq2=Q@M2b1cS}g(#C3yzQY(egCW9PXYZY^Gt!3d8g<@Va{uts!l5`S=;(@0N zTqdDolTZ=2W7aM?s8Qasc;BdiVv=OD8}(pOuBB_u4rG#LW81D&@>gXD7BR_eqo1f9 z1+jc2j?~6rlqz?0unES4Sz5ApgUG%M93f{ezQtTn7 z3`#=j+3BEO>?xs+zUdUDxX_@ox@?Ria}JcxDSr%g{h{QX`=%bz{-HOCk@QMOtYSj1 zk^wZN3!mL8gFHo|ndmmar5^NCk2rq`yv5X5%i&sNbf2CnEq-2WSF4PMJS!bsq|OqD zySRh)#(V#ei62?AivKi_E^{~ZZPHV$E5r<^*`f)pTudu6R#*EE5KA{LHkAs!Y# zgDJ>w*9BFk2;xnZWW7HjDP8Bk9G3IP!%rRW{DaFEqN?|W*y=nZ0!6NCLBGEh>H5Eumkcq7ha8_uXl4v6pUg@6_p7wkJWqA)i)q z*q|lD;9DqQ>R+o*Y)RN2`}wZP862q>m|FyL&6$wHdOA}}84F%=`qFJ`k7WHkzP$&=X#xN64Ox(oRL<&Ke`f+xa8#9i7~iHi-ts<9HcxH} zwo2$7OX#)P^igK}ZV4%E#>40S+9zzPuiy}7IMpa5g?n%_?aaEeT?OMyMJ<0{e*RHO zAX}oAXgax}f@_SL1?JOXI8{6tCAl(+W|ai5K5jiIxVoLPSemJ61oOb3EJ!H-R)A#QmPwdhT?$d$?@bu^A zI78OJHkoX~in;8*2?#eH;ZI`6t81oGQpu@%<_z*5(|y7)%VHjKZ8)HWdo8-DsT< z&k-ru1Vq5mimZkfD{@D7Lk2p+gR~cY^_=Y$ zo05h->}z8Dkd81g!fJu2w;zJP?H0_$TM#c7a4Q{bnbOVWLCt$OuhLQrQHuef*g=6a6p#zB1-PNM@;Vy87v*)jnCsGS^>4 z8VKm6*Ywz>(-u`_&+*>)ouMR7|!ccjr(dd#^ zQ$tDD_#;+CpV_9K+Zstd&i&_*t0b% zn|5c8i(7Q&!@4+x{%3n$r=3>!FklVaqMTD2NGGf3{I~$~D(nws@fN3<4YI;5mkcm+ zp7jAQ8DoNq-ynxntZ)P{hKuX8nW6MU<@D2V&1E)Q>#m(*!iv1&kXrcPbAPtmK$Bzw z@d=%SO-Gl6LpOni^sZ|YYG$){A4;bq!I~^=4jtfTE#6V;=f#~$IE5L|%dOT)Ae(() zCyPutB`$~lbZkv7l^dhl%0tVw#GN+dMo+9>)@VCt9IiMK4lz@gBv-;Uc|wf^#DF9@ zfyh0FN{|md%!@xbXKAo}v1;YJx;K5elrt1M0JlNJVulPuHJ9QzUp2;j7EdYZQxAM1 zRe)+xYjWqgtRzzR?T$x-FY&jHUU8JPu$m<=XD04+9-8l!btTP`q{)sfjb3plc#+gx zgD%4AeLbc=GQJ;C_>}&F-c;sqip2(~VPS&vOqnkG2E9o2z%|aX?^_|PB8;T9oosB*D9p7 z04Q%jb4?d(ymf8scX>o4_9;pycOiA`S`AT75XvDcVMGKWJ;0m!efT~DCm zRKs4TDg&4gW;tc@9!aT)7^(^5XOjK+66?}=3BU~1jT~$=P?~t>DZ^g?3Q%{}i02LNaZG+BUAtnIQ#3YF zaP4?p&5nY|K2WG*3^~+VAfsPGC`JHCz{a{Ix8*bYEi8&jtOMr#oyv)>()LL}85IkV zQHMF>7ymV_H8N!QMjc!u^-T;TwpCkOGNcCcC`*;&dF~hckgTFA?HpIc3F~jZ8U8Y` z2u#Z&g15(CNB%(w_s@9B{`$G0pv|Bl)EX8w2R*WYe4&Izw^vq=Ee>de5}l{4XXZ{d3D+~hkNY*D4I6G_x68by{S#FTyzynnDiR1%YRonb|4QGD;_DzS zv&s@@9-QC+({Ym0co3&S4B=*q<;(Y_l~t3{P8jQ; zV{pn#6cJ71*C&}&W*eBPce4Z%R=Gi7M)NI3k{%`TtXIIjgO>K_~)syrf0wbUIxd zAA#p^&4w55J38m^ToUj0LPn9(K4#4T*tG=Catxa{<1Gcj%HLS%#;mEsp4ShI@|kK6 z^v{%g$um$VC4tAA7O)#3pc$GYGp0LEjW$~hc~mS%X&3Cq_u(985?yG z0Pj)<0uoyS?tngCC zLdrN=@Qjk6BHwM{WQ!xQRQ|#Cj&`*s8p{eVlaV(m?M%uU zt_wGL@44_{XP8mAurNcxKxbjit!M=jR*a)e|Vf_`$|7&H4hL?X)y`n;r> zRjIVhZr7?3?m(SN?#)`N)f|w*VG#%zb~j;0rd;ZBkZtjms)HKA%%-AWmboy5t;Fui z;aHP+p|yh7Q+Red&j9&=JXy+u z55puP6u%E$vs#*1^x#}JA$c22vEX{UkB}R{$QKybolOe}SZZ*Bp?$%_mX)3>^o65L zHUPo?WW#&o4@M2w_h|<$^xS-dPdSFf`10BIHWygLtvoF;I z^MDCLhXs=)JAot9Ge-^C@0(O8n_tVeC2N7BW4vOV;fN?<%3J%=wdy3B!BCFax+V@K z9Q4;zXZd1C8z_cwa6>Byjn$8C;;S&$U9DzZaTQ6cP*HQ>pV0$!g9 zDuf36`Rs~ORR7J1`8+tPSJz!0_Y!aAc6(F#rJgOsbl;x7Legu?;dV1a)P>uLunKjS zFlLJIqW`H@MAH>%h&jurjB|W;c(}mZ3TIi{{gPVL6DSW@cA9Hn9O4PCvslW-1uJF0 z5v)VcUEaJ?_Pt-)YGZ#1lKy@yYkmtW3#d1kz*Fkn@VrEiAjZ#mrEl03*IH(`ws!USR- zLxa6%4p?yN1G#k2ywkS?1CRK4)m4!=vqe~)Romc>Ad-V-ebewLDY!Zjm&n0Y9Cm9~ z3VYdZNchX5lTWwc6hk7sPg8rOPr8`WKbU776|C!8dM8FDrD$agslvaUKXFfToz9DL z-50I(mnpb-J1I)!$;T`!mv08r*+T*qHL$($!PC~XaD|Lid}VBoj5zgA%e(SvaBE=p zm`z{zF)PiUEm0d_oyDXCq8_U=3X$n*44%1nMR7P?5&-o-;EWnS*}<<&BG#L}i46GV z2vA+3y+C%S-uF=YMPI-H-|n3&UDqBWKGA|?Ft5cfK9x)it<{+=;Ca^x7*H}DeGr$` zg79|tciV98tZss!J1OA*CDbP%F^koZl0o&FnGhQ2kER<&lFSIP6uB%U?(Ma|J6uuXTR0;}Xw@ zUMvCHS9O{?tbHCf*y~H6D*zAUMw~Y^H?^=C1E48l+<7Wb;mE{k0f`Yw2=upJO4fIt@2bZ?i2XKkB-AbCfoJaT-nWqtARjqe zJGMM?#w^6pa`Vh$gp|kiszsLaqlyl7bTxHwU2r5SY}oJ2tD|fBA_#8F9KY_%=#^SY z-GD0*st*)v&cW?6EMcW_-D|!Ye2L(Ej}utMvGD1W z-X-FB1f?Q>9j?4+f|S#p9*#eEBi)#xd0*J>q$F?15+HA!Y?3cwA;4^W%~U%<`XCF= zLlj&2H`rW_VdzoTQdazYravV(ZW@gv6S2f4YWZn+nM+YP3&><8cvWWj5sEAx_l`l& zN!tPVp?-0&aqz z`MYQi!b_eXSqG*}B>g&AcI{BPj?XL7!|aUyap<>zmhvY2!Z!up3FiG3)YM^cjmUAX z|5>Lbu@uQchl4|RzX1moP^5UkqY8N&>mWA)^YM4=GT;wSK#h(?_pcM=q8bOPM2m{g zZOWTO)SXC@oYDWiUtevh9Q%ZfYNK8YP>{<$4Ut^@>KInaYV`pHOXj>Gh0 zj5nUgVwv}YDQ2sCJ;s(?vtooBxdAGT7ED6IQ}z1;4)tMv;Mdn0#<~POpwtPj8b+kZ*ZW@%&e2gK#lZt|FqNk zJJzaNdym(6f{Fcy=vRutlJj1){sx6RoDfoXvu%}lBFREdWPGp635x{s!x@A=T)_Ua zhc~6MnYf?wHA3TKeGdKQVtauOzW+(ig5+f?4$_g|^Ws8tN(Wwr!M-{$XMu+$u9iLR zkVnRdMsrup@v(tbq~8=GcIKZH(N$!wX@@OT^VxeTL@lh)NtVx)b7LCI#IO(Xn*amp!s+^_^8 z?Kke6DzTdqC#@5s%x{+X)=2Ke$~w3VaPN|W0DZ3HrM%^FK1aVHp=Z5-W8WstTLlKo zFORuZ57}Xlg^alsr)3neE>8G70zp*Zq58t3Syt(%k4TlE(3XI>5*5gS{S zv9K#1X-3df>>-oUa~ilA_;g-iEu}Z{J;cu4H`ml>;J|m%Qw8T(rCppk<*#3vk4uHf zF!Yb$0XynfTwJWq_pt?3=QN9Kn7NuMi9`=-`Q?PmFrHIXvx>ZJAZT*6Ro(4=3}v7dRFX)laaNWV|{xq zaxiFXYmi4{B4>-SUOz*-GeqJ0<0Z#%qO;SFsg_1tkl|2~{K#Otff)_9xmhig@#wAW{6r^z1#5R`ftkV;iR7J zsi~w86YOC$h3dCbNY^tP1H|vCoF572(B#EO#^I#7JHWQcN`zRr$VD7I`3q;W-hUOf zQ$vPfI@>G@g;@{qiM(vDMa5^cV|$=!@L^P|+SSpm<7E|-rT>G&Tz{VC*;ZNCv)o$8 zj0xdQ^n9OI(R6a~=uX9mop}o!+RV-u!N28A-RoY5?yOLbi4~ja&}@RZgN`$)ZbIfh(Zj!S44WFoKl>Dar@H*3x7j#d1b18H zjysKIZ!z@B*W^d_FU6`bTSBw&uabWEw3T4(p?F&}cBbtE$?GK%`FIAfRPbQ0gw$tgQwrT=q$k7B7HpSec zBP{a}1PbyCU@3`bBod>X_!F_#2R6oN+m4MD5!F9js%->X@2D;}L-L}S!SBI>F^H;-hy1axTbnUz- zok;LBXZR0~ho-U~_j2);5zJ7(F_|#4S@Z#W05{;a9m?&_a4LAhn|8N1{@iu>eWd(D z6jt2N9n+{>0znyp4-=D`%0FX$T0H#*!L)f92u5b0&F(Rr+F)fRlyIq zq)-`_H@5n=c;F(Ym|UZIkdM|`YRJbmMVLod(P+cOYSsQ4dT{hLnfS=tH7&f0y;sVf z98J>z-j;0a-w%X5$C~xq@S2echC}2=@dq8Ov*}<+FwGD31=JIos&-0DWl2XMn5~Bc zWTXOrsy_iGekk)R#lp`hedqNNyJIityj175$2{ATTwEuC&kYV_Yl{q*^-M1@OyLfy zbRK?cez~`dv5nuJjyG4e3oRL|EwgE^FT4GFc@&RKHKtL%%( zZVJ&S`BxQwEuGZFML}^&ao{|I=@5CCbUFN8tnT%CcG$q-f29kA-PnYET>E9VM2AvJZR!eE zuz>Ajl3OdYdg&b>{z2|Y`8@KLu~yKm`U>V?y1;wC_JfE`6z#j&(}r(EL61sXt6r~1 z6(?%vzcl1nv1SIOnh#(c{Gy8Dr_>o70tt_>9xyM&btQ2tVns&!0=@sCBDLkK9$Iyk z2SH^cd_d5a55&SNTL>+&WB#wGIEfJ01HNCoiZ)7U=oWV~E$jqIj|vKzNaAj%7t|$D zU!(+i*tg@+jUT&V5O$c0nHap%;v&}tPkEVX2#|Vdd!&1}Hh};TADh;L$=#)>2fv2t z)9ugG$_+e=YsH6;ng2ZZOH=P~pU?}0dw3_Imx8`QAc<)dLCxDUD54?Xu*{+Phavqc zH+J!s1ch+##x-Qqnnk~rF~vn31>{}OZ~FS=Ug=@chw_1A!im|x_vg%0+B@`d1oOMS zN*$Lp`q7ox2~a&SELe9Z|1spkmudcZt1o2Lp*kK24l=+=*~`pO5NHc186A8rAc+BT z<{wjvSL`e@x#t5`P8znoN#=g=ae!+NgXc`%vx}v zl2#t&r0^c0t#Q@SJ&%-pFMjy?r4on_niNC==&u(-Vorn^8sS+{J4f|WtANB#nh6iS zNaQh%FNjA_WlFUuvJE*D)B*`mMlQz?JPca5-}FW#fQ+;{Rw#AyBYfE0OyDCmP<-!JF8Y=Q zDk!C2;Utdn%VzY@rNTDiGtW~fvguQ-T`vFh zQ6XCnRz@({G?7Nuc_j8C?X*}TLQG90=BKKSTOVzAg6;r*)M}B@#mvRdWKZ&oM*_0| zy1yd&D1csR*Q|Y8ksC#76DW{uNl1DuoX6)=AAf-ffNMs8fL?**3x!^K#pW?c^ox1- zRr|k6)%cq-P#uG4Vzbrz;9A9Q_yWn#5u;nHh2mM!?om%%u1;!o^9ixS4F400mxZr0 zl`L^8+O56-#vQUdlFQ+y$P?!y>YU6Nu&aTJM?Mi)7>Ok0#R&H;Qn>JfnsE#7u#$`y zLtWrl%c>z1LqNRzSSc+b$GJFMTXL2Mqf9QjzVGS;Z08_tKS$YcZn**Gh^Fj!xR1dd zpW^ZO!d$s>2T96?2U5PPG3hl+1B+qY&0R9xlm$178oWT;vXm7orUvj6RWut?+!4Yp zBxN7cSeaB1M$Ci}C482mX%%N$t$uJ;c{fi_Nlx1_2KQ97gh+foESwS3Iv`6y1Zl{+ ztxyMAtB#{0ayjo}q&!6oU@X1YP*t$bauh(a*M5P*`&WQH`~clfx*98V`A$&$ucYx7xQ~#C7CbzLYaDLq2Lr;3qb~-vz=kd`*hUhm|KpnLT*Gby(s@`D9C)NO=^|J;ltz{M5AzX*8{GFTPY{h^Gx_jgx-KdjL$dnjPhC zwSLXWp{nn&Gk$YUEB%zkHQH|wG;a6YFAgOeP!!ia_22Fwy~u9*dl3X=1FRnVxqqg- z$bTz*66Nl_?xdq$9&QO}OInw*kGMhnEC@wFB9f|_Q0Z(|#{r8JbnBvof5T#6@^YrC zuKM~bH$ef+!(xJo=WTKoU33{yO@U(WrZft8!|A>o!Lzf!9Lf+jGfb*S?o+zMlTjBT z>!+H{8~>AH8fagRc>{(N**Jh#eh0vXmjY{EZOGgBE@KlipUB=F-@SgvMKf08gek!= zR}5uiKOhX5bQ>~lK&=6j%7D=*0wTby^Q3W^P6IJ=E57euvPPxnTnYSbwyW=YekC{u znr$B*+aj&_WvdZqp*4GZhTS9)tEosRAk8U&j!^i{CxSdL@e&d-8c?mu3)pehXj+xC zl&Y$;d?;xfY8qa2`tFq!AImW%s)ASH)!B5ibW!$Y90jgGzi8F2*8p;fS)MZZ%WF`f zG^y|k2lcyXxkOi@BX=Rz1YhokKT~IlO|2vmc{908NV;!@!(43i+qcez91Jo1#7K?) z4RFeoAhNrGqssWg(9z~Pc<*YTuBvXy>vL*%);v}p)Vwc-Cp9OGYV~+-g_2>3GM$ct z?50qov$202$Tsj?;q4;sm)ZpDNyszRZK^TKVWt!hDu|sGx`kQ7Ek=e1cSFpZDscF1 zP@>3|f;=Pg)6SD99P%o65MRX+A$xPhjt99@8uhr#w;eOQUKcZxaVM~{j6chAu18{* znpqebM+u|S67lGNDoW7F5EPCwqAhauC&BvvAhutv~ zwpgja3|3g_^g@D@7#f^;8IpWVQs}5`=K6soinEVPOr$2e(5P_l422|WXKZ$n+d{_L z!3}m(u-5MoIe+~yMy49=)LEx%UlL>An1`!^Gp++Y;N+c)KH@&MA?r+CtsVH#M|%-@ z-gjZQTw{a`LmkRWcoZ=pIq)<{zLp8rk(hXhoJ62;cr_mC&C!y4*Cg3I#I{U^&Vsw) zqra5ku50*D1V|2VJNd|6x=Swc6 zI>OlL?zX#wwsmEB8`8sJNDwcvPWGxr-PViaaT+<`%n5G}G1i75(dx?{nO>s2HV)S? z=ry?ziudvlx0b9jvP)9@Q&_ONPEUhbR}c)ljA=bkHLtv2k<8%#TQCXj7f6|4=I;Ed zo|1lr@->beK*1*UUBF_r!(?c#iLII1^_E8f0cVVFAvwbgCJZsi_{T@hYjCu?ZHhtX zA}s~38z5Jcv=6s6W6etPXO#Eaq9ww68Y6ECO##-ki_5LwiRfDA`@60K()p&W*Gi4L z_=ZbquBp34*>Y@Qst5@K>E?eXZupvX*^qZpl&H$9-C5r#{r478T`SGco#46Gm3 zg~Y3Bg%*yk=r3K42bj80=lF1{5>T3J^-8X2VL%@mFOsM0XU};f(IKpqsk&n^epxyz zzPqUJA0if*6iTR zm{1G!Yuk3$c78QGXE|9~``u9%i?>DMnMih~8n5VVRtEtfh`z7&;v7}TRW5yN74&J|Y%{`ye*=#jPxq4?0)(CkDJ`|Z* z+^I&wI?{*`hAwv)OPMdSUr6zfl?1&kt>zEWj8qvq7)V?pZ$0+_kjo@BO!y-*z>1+x z+dq6|%kmXn3?w060Ypr{veIRj%(<)g+G|ePEMnq|lk@CbFDQ=EOcOab<4ehJ63ar| zD%4NuAM?KREN{IEJ`UK$z+MY_;wxjq@XR;^(iwXcxbPRRj0riD4){L}RHFnNYy!LB z+Nx}DUu64lF`g+QLBpz>OR(rbi1&BqZZTMPHitE6u3kH-9x+&B2Qn4e#1#W6LA6)n z--CbK2EwY&Vvt=LLj0=5+CHgJ!ckNtYWZa`MjVw$dAF&zXfZo02=yGLbtq0*4DG92 zS?tV^)4^!vji%6QbZBV`{p$JT(P-fgIMS;TPqW~o3$}#}xfp0G$dRCmfBsMMK038G zWk7KKnEf+yzNCAmoB{{)0(a^eM{&IR!%m%+EXXReUJFu%~o<9bQG}L3Rvm4$`E)Gtm_)nb-W8>HXt&?bG(MMwIo#V$1lJ2 zj7KJ^c|e*2!E(;T65qK9XgjktvP8wt&s!rC716QFIWquK`gcN)O)nf@+ zLIt>^_fHE+eKim3wQ)h-Zk;cv09ei+%_Ii+IET(T0zkf!j09KCPJEXlE0r#%ywe$D-`f#l)B#kqFiTza|2f_lc7DI$W~^?qN%Q%P&iiPzS*8hLoZ30L|e*N zYNN{*r2qD_tU6^rIjusz0*(s1QsA$w1ZB2NDyp7E3fo&*IU>{{2hb|C5L)re1>xdc ztbtI#e<$J=%JTW8klekrkEms{)znuUwERB1!8x_-QYXQ@E)Hj_`iwtl&k4W;{}5zf zh+$+Kn11>eo;^1ph(c`w<8&jvLiVbCHXJ^Fw&Lee2)D|7%aiWpwU1)VA4I8kWmd4o zSu57)-l(AM+N7T1x1uBwB1!=(m}O%2>H$~o;*c-^bS><{97u-7 zGF`xsku1hUFp>+}?T=r+X-fmY&t?7ni&MSJdCxK&2h4r^%-TPI^u)0dALlyGzCU9x zp6Xe{y6Kb!QtGg#smlmu>#73XIhI1Pbuv}RE&~DeK)xq91RdOZP#eCZ!FdgTsfVuk z8odu6X=k4^l978z-KI{Cn{F#XjYd&07T$aP!(dkX^uNu=t88tYZd9R+F@$tV!&5o- z$9EuO&W49e4OSc&_H#Mf?O9tb>5IqQ8%h-zV1hdJWV_AkhbSF~yyc|P>DTlItA}aQ z0Eks%WSe!bbP(mw4ZmP_5MOHbb!K@(Uh7Y^8Os?7lsMgDlPUO+p>vX`8Qk@uv*#?^ zCKfoPr~arHS>FIM-!`FPR%;LgcKx-_aPwZ=v@u8lSawTYptr3QR=jtt?6bT?W5(u_ z9S%hWY&@B}LmROEjbjIoxFc6Ua{FGtU6xm%`TA?K_6#8A4O!01no)WW58Kqa!vj&J zO!DQn!c_ncKF)EH{&0EQf3;I=vjmrb+NfB}Uj6=Kb!eIL-yLuBeQjylggQVrgqm7u zJZ`ogI$)=Vab9qwSk_cqZub*##Nf}zP6JMMQNT`dn^as|aT+8NE?eT)8cj256dIZomdAhWi=&Z(kiX99jdCylB8`Ej&#+MgZ($;*-o; zHrNp@E_<6n>X0VPcg_FBdL&xQ0Pez)^euBVI$AWIztbTebvrtkJv9{+Ml`(irpj!SJG1JNRO7je`0v^GHv)I9-q!K)BWJuuc8 zfb7M{-&*Qj8j~8!@czjkW}Lm28SRpcpx1+LU@B6k<_~~<%=boUsTAZdGI^~}&$B!M-A-$?Zc(xGkXHzS@TAaF6CO?{n&p z98MI4HUjS)^s5o4IP=8b3<`L0l|p)^J8c8F8Fk4O357~OQRnyb9Wg{q?$j;K$D|l64e*3KbRS917 zT)HCgpfF3J*ic8)>$oT=6GVJXd>M-NpVheU6O!P5bo)u8!Q=hw|Ps}*M+x90fx39_m({LJ8ajZ$vlKhTvO@NZl z`RUKmC+zg~cJ8D9VsKK&fC=*IWI{3??AJZ_dN?4hA4)T_isy;Vpb$bIxS{#RTAz0z z1i~ZOufbbv$!WN~a#%!LJ+do$d|3t}d|uNA<9{_0`BSUFgd1bF)ORGJ&+v zf$;pYm)=tzLdvA|vD?C3+%M`fpI!Zs3}6h6ZpNkTc!k zNKNEbn2-}#vcJcf7BmCJQf^ixaS6_7K~r~8g*6<czM*mJ@;o8OCtexjq_@U=op~B3U zW&ijeHYJj8(abGZsQy>c`{E8PlZ|9e6emaUb^-vP)8)A}5Id?Gk+jmN@|kc%w{cI> z23y-%c5x7LY0yHXBvmN85molH7a-!G+K188J=yOpA@Sr_$^*b~9Me^ys*eRR_$@fh z<59pQn%lT)j{KKG^flCZlN44{5&O*hq8;Onfx58zX-D(TFZ~d(xGo6$aOuI_Q9n_pz+O}9cOOzB<4_@2dPSv`8Ud~h58<&Z3fzYm z(=8ze(4ThSAxu)p^c*F6#sbKaJ#oHYA!$I4^Eb0d~c+cK%%Fy|%{>&5@(mWSUJjX=c)O*$R_st*4cnX-%4?)+ zi!Qg_lV09@`@^f#@nWCW?yoq|68J8CT4qk+t4954f?T(rrHb>5*SybAEzC$(e=@Cq zFWbX=6Kv)?sF;QDiuhDHtlPZz(eIS>8yB5@35q&{8>Y7N*>Pp`sMSyddoe*PW#TYnoLg0-FKG%gN;X_{k2P~YwcL$GN!II zX7%q;$;iofc*Xdd)ge^);Kb`&CW&~w@A$LuQ}QjjZPyZXWOUx;=Q=Gs{(?__QpkSG zGRqtC9i=b#zFv9TV0OCrjMV#9_Vcb$B&C6JE=L-o1IX(V98K zzOii5)3qng8j3zVFKl^T-XbY`RdIrwOTdm(COs7)0TI`hzN+0Yf6obPtB;fSa>@Hf zt@~9|zxKS!s#}G=y+%K_TxBU%za5+ByEFR8RLeC=@8YYSZ~R~voA}_>7tY!RUp5Dp zw=8W7ca&Me~@wYIs1+2|dIq*?e0y;UA__g^V<{QbLX zt@G(S|MhaJH7;jMr}jNovuc*=c5^i{;#a9TtM4DHs4&&?Y)<8Rfw;ge@tx{>SJvLS z!^Eld`KZxh>Gh8eZ91}`zk^dH+Izt)o9DJyd-H?zLyX_@*7_R1?KF#Rl;GKZ-sYi6 z@Jctq$@~i<55(`5d~>*e=f^Pf9F{HX6jL9p^iH^ajP=tgcfY6RFUlF@KCQo~=~tDh ztINwOXk!0r}kzsjDk@x3P!;w7zLwX6pVsVFbYP&C>U7)00|d0)&R%=0G`n` A4FCWD diff --git a/internal/checker/checker.go b/internal/checker/checker.go index e0c1766d7be..76bb15f637d 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -8,8 +8,6 @@ import ( "sync" "github.com/klauspost/compress/zstd" - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/s3" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" @@ -53,9 +51,6 @@ func New(repo restic.Repository, trackUnused bool) *Checker { return c } -// ErrLegacyLayout is returned when the repository uses the S3 legacy layout. -var ErrLegacyLayout = errors.New("repository uses S3 legacy layout") - // ErrDuplicatePacks is returned when a pack is found in more than one index. type ErrDuplicatePacks struct { PackID restic.ID @@ -177,23 +172,11 @@ func (e *PackError) Error() string { return "pack " + e.ID.String() + ": " + e.Err.Error() } -func isS3Legacy(b backend.Backend) bool { - be := backend.AsBackend[*s3.Backend](b) - return be != nil && be.Layout.Name() == "s3legacy" -} - // Packs checks that all packs referenced in the index are still available and // there are no packs that aren't in an index. errChan is closed after all // packs have been checked. func (c *Checker) Packs(ctx context.Context, errChan chan<- error) { defer close(errChan) - - if r, ok := c.repo.(*repository.Repository); ok { - if isS3Legacy(repository.AsS3Backend(r)) { - errChan <- ErrLegacyLayout - } - } - debug.Log("checking for %d packs", len(c.packs)) debug.Log("listing repository packs") diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 8bdb5480e29..7fe7da96514 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -6,7 +6,6 @@ var Flag = New() // flag names are written in kebab-case const ( BackendErrorRedesign FlagName = "backend-error-redesign" - DeprecateS3LegacyLayout FlagName = "deprecate-s3-legacy-layout" DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" ExplicitS3AnonymousAuth FlagName = "explicit-s3-anonymous-auth" SafeForgetKeepTags FlagName = "safe-forget-keep-tags" @@ -15,7 +14,6 @@ const ( func init() { Flag.SetFlags(map[FlagName]FlagDesc{ BackendErrorRedesign: {Type: Beta, Description: "enforce timeouts for stuck HTTP requests and use new backend error handling design."}, - DeprecateS3LegacyLayout: {Type: Beta, Description: "disable support for S3 legacy layout used up to restic 0.7.0. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your S3 repository if necessary."}, DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, ExplicitS3AnonymousAuth: {Type: Beta, Description: "forbid anonymous S3 authentication unless `-o s3.unsafe-anonymous-auth=true` is set"}, SafeForgetKeepTags: {Type: Beta, Description: "prevent deleting all snapshots if the tag passed to `forget --keep-tags tagname` does not exist"}, diff --git a/internal/migrations/s3_layout.go b/internal/migrations/s3_layout.go deleted file mode 100644 index 8b994b8fc17..00000000000 --- a/internal/migrations/s3_layout.go +++ /dev/null @@ -1,123 +0,0 @@ -package migrations - -import ( - "context" - "fmt" - "os" - "path" - - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/layout" - "github.com/restic/restic/internal/backend/s3" - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" -) - -func init() { - register(&S3Layout{}) -} - -// S3Layout migrates a repository on an S3 backend from the "s3legacy" to the -// "default" layout. -type S3Layout struct{} - -// Check tests whether the migration can be applied. -func (m *S3Layout) Check(_ context.Context, repo restic.Repository) (bool, string, error) { - be := repository.AsS3Backend(repo.(*repository.Repository)) - if be == nil { - debug.Log("backend is not s3") - return false, "backend is not s3", nil - } - - if be.Layout.Name() != "s3legacy" { - debug.Log("layout is not s3legacy") - return false, "not using the legacy s3 layout", nil - } - - return true, "", nil -} - -func (m *S3Layout) RepoCheck() bool { - return false -} - -func retry(max int, fail func(err error), f func() error) error { - var err error - for i := 0; i < max; i++ { - err = f() - if err == nil { - return nil - } - if fail != nil { - fail(err) - } - } - return err -} - -// maxErrors for retrying renames on s3. -const maxErrors = 20 - -func (m *S3Layout) moveFiles(ctx context.Context, be *s3.Backend, l layout.Layout, t restic.FileType) error { - printErr := func(err error) { - fmt.Fprintf(os.Stderr, "renaming file returned error: %v\n", err) - } - - return be.List(ctx, t, func(fi backend.FileInfo) error { - h := backend.Handle{Type: t, Name: fi.Name} - debug.Log("move %v", h) - - return retry(maxErrors, printErr, func() error { - return be.Rename(ctx, h, l) - }) - }) -} - -// Apply runs the migration. -func (m *S3Layout) Apply(ctx context.Context, repo restic.Repository) error { - be := repository.AsS3Backend(repo.(*repository.Repository)) - if be == nil { - debug.Log("backend is not s3") - return errors.New("backend is not s3") - } - - oldLayout := &layout.S3LegacyLayout{ - Path: be.Path(), - Join: path.Join, - } - - newLayout := &layout.DefaultLayout{ - Path: be.Path(), - Join: path.Join, - } - - be.Layout = oldLayout - - for _, t := range []restic.FileType{ - restic.SnapshotFile, - restic.PackFile, - restic.KeyFile, - restic.LockFile, - } { - err := m.moveFiles(ctx, be, newLayout, t) - if err != nil { - return err - } - } - - be.Layout = newLayout - - return nil -} - -// Name returns the name for this migration. -func (m *S3Layout) Name() string { - return "s3_layout" -} - -// Desc returns a short description what the migration does. -func (m *S3Layout) Desc() string { - return "move files from 's3legacy' to the 'default' repository layout" -} diff --git a/internal/repository/s3_backend.go b/internal/repository/s3_backend.go deleted file mode 100644 index 4c77c69a2b9..00000000000 --- a/internal/repository/s3_backend.go +++ /dev/null @@ -1,12 +0,0 @@ -package repository - -import ( - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/s3" -) - -// AsS3Backend extracts the S3 backend from a repository -// TODO remove me once restic 0.17 was released -func AsS3Backend(repo *Repository) *s3.Backend { - return backend.AsBackend[*s3.Backend](repo.be) -} From af989aab4e7f0b3482aba760b8c677a1b9d44953 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 21:15:58 +0200 Subject: [PATCH 633/893] backend/layout: unexport fields and simplify rest layout --- internal/backend/azure/azure.go | 11 +++--- internal/backend/b2/b2.go | 22 +++++------- internal/backend/gs/gs.go | 19 +++++----- internal/backend/layout/layout_default.go | 22 ++++++------ internal/backend/layout/layout_rest.go | 22 +++++++----- internal/backend/layout/layout_test.go | 42 +++++++++++------------ internal/backend/rest/rest.go | 3 +- internal/backend/swift/swift.go | 5 +-- 8 files changed, 68 insertions(+), 78 deletions(-) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 737cf0e1479..1c844f97f4c 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -125,13 +125,10 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { } be := &Backend{ - container: client, - cfg: cfg, - connections: cfg.Connections, - Layout: &layout.DefaultLayout{ - Path: cfg.Prefix, - Join: path.Join, - }, + container: client, + cfg: cfg, + connections: cfg.Connections, + Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, } diff --git a/internal/backend/b2/b2.go b/internal/backend/b2/b2.go index 9717cdd0eaa..3ef2bcbe3d1 100644 --- a/internal/backend/b2/b2.go +++ b/internal/backend/b2/b2.go @@ -107,13 +107,10 @@ func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backen } be := &b2Backend{ - client: client, - bucket: bucket, - cfg: cfg, - Layout: &layout.DefaultLayout{ - Join: path.Join, - Path: cfg.Prefix, - }, + client: client, + bucket: bucket, + cfg: cfg, + Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, canDelete: true, } @@ -143,13 +140,10 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Back } be := &b2Backend{ - client: client, - bucket: bucket, - cfg: cfg, - Layout: &layout.DefaultLayout{ - Join: path.Join, - Path: cfg.Prefix, - }, + client: client, + bucket: bucket, + cfg: cfg, + Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, } return be, nil diff --git a/internal/backend/gs/gs.go b/internal/backend/gs/gs.go index 0af226f5d5f..b4d4ecfd4fc 100644 --- a/internal/backend/gs/gs.go +++ b/internal/backend/gs/gs.go @@ -105,17 +105,14 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { } be := &Backend{ - gcsClient: gcsClient, - projectID: cfg.ProjectID, - connections: cfg.Connections, - bucketName: cfg.Bucket, - region: cfg.Region, - bucket: gcsClient.Bucket(cfg.Bucket), - prefix: cfg.Prefix, - Layout: &layout.DefaultLayout{ - Path: cfg.Prefix, - Join: path.Join, - }, + gcsClient: gcsClient, + projectID: cfg.ProjectID, + connections: cfg.Connections, + bucketName: cfg.Bucket, + region: cfg.Region, + bucket: gcsClient.Bucket(cfg.Bucket), + prefix: cfg.Prefix, + Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, } diff --git a/internal/backend/layout/layout_default.go b/internal/backend/layout/layout_default.go index 3f73a941d92..d2c4634d395 100644 --- a/internal/backend/layout/layout_default.go +++ b/internal/backend/layout/layout_default.go @@ -11,8 +11,8 @@ import ( // subdirs, two characters each (taken from the first two characters of the // file name). type DefaultLayout struct { - Path string - Join func(...string) string + path string + join func(...string) string } var defaultLayoutPaths = map[backend.FileType]string{ @@ -25,8 +25,8 @@ var defaultLayoutPaths = map[backend.FileType]string{ func NewDefaultLayout(path string, join func(...string) string) *DefaultLayout { return &DefaultLayout{ - Path: path, - Join: join, + path: path, + join: join, } } @@ -44,32 +44,32 @@ func (l *DefaultLayout) Dirname(h backend.Handle) string { p := defaultLayoutPaths[h.Type] if h.Type == backend.PackFile && len(h.Name) > 2 { - p = l.Join(p, h.Name[:2]) + "/" + p = l.join(p, h.Name[:2]) + "/" } - return l.Join(l.Path, p) + "/" + return l.join(l.path, p) + "/" } // Filename returns a path to a file, including its name. func (l *DefaultLayout) Filename(h backend.Handle) string { name := h.Name if h.Type == backend.ConfigFile { - return l.Join(l.Path, "config") + return l.join(l.path, "config") } - return l.Join(l.Dirname(h), name) + return l.join(l.Dirname(h), name) } // Paths returns all directory names needed for a repo. func (l *DefaultLayout) Paths() (dirs []string) { for _, p := range defaultLayoutPaths { - dirs = append(dirs, l.Join(l.Path, p)) + dirs = append(dirs, l.join(l.path, p)) } // also add subdirs for i := 0; i < 256; i++ { subdir := hex.EncodeToString([]byte{byte(i)}) - dirs = append(dirs, l.Join(l.Path, defaultLayoutPaths[backend.PackFile], subdir)) + dirs = append(dirs, l.join(l.path, defaultLayoutPaths[backend.PackFile], subdir)) } return dirs @@ -81,6 +81,6 @@ func (l *DefaultLayout) Basedir(t backend.FileType) (dirname string, subdirs boo subdirs = true } - dirname = l.Join(l.Path, defaultLayoutPaths[t]) + dirname = l.join(l.path, defaultLayoutPaths[t]) return } diff --git a/internal/backend/layout/layout_rest.go b/internal/backend/layout/layout_rest.go index 822dd4a7e23..78fc6c82623 100644 --- a/internal/backend/layout/layout_rest.go +++ b/internal/backend/layout/layout_rest.go @@ -1,18 +1,24 @@ package layout import ( + "path" + "github.com/restic/restic/internal/backend" ) // RESTLayout implements the default layout for the REST protocol. type RESTLayout struct { - URL string - Path string - Join func(...string) string + url string } var restLayoutPaths = defaultLayoutPaths +func NewRESTLayout(url string) *RESTLayout { + return &RESTLayout{ + url: url, + } +} + func (l *RESTLayout) String() string { return "" } @@ -25,10 +31,10 @@ func (l *RESTLayout) Name() string { // Dirname returns the directory path for a given file type and name. func (l *RESTLayout) Dirname(h backend.Handle) string { if h.Type == backend.ConfigFile { - return l.URL + l.Join(l.Path, "/") + return l.url + "/" } - return l.URL + l.Join(l.Path, "/", restLayoutPaths[h.Type]) + "/" + return l.url + path.Join("/", restLayoutPaths[h.Type]) + "/" } // Filename returns a path to a file, including its name. @@ -39,18 +45,18 @@ func (l *RESTLayout) Filename(h backend.Handle) string { name = "config" } - return l.URL + l.Join(l.Path, "/", restLayoutPaths[h.Type], name) + return l.url + path.Join("/", restLayoutPaths[h.Type], name) } // Paths returns all directory names func (l *RESTLayout) Paths() (dirs []string) { for _, p := range restLayoutPaths { - dirs = append(dirs, l.URL+l.Join(l.Path, p)) + dirs = append(dirs, l.url+path.Join("/", p)) } return dirs } // Basedir returns the base dir name for files of type t. func (l *RESTLayout) Basedir(t backend.FileType) (dirname string, subdirs bool) { - return l.URL + l.Join(l.Path, restLayoutPaths[t]), false + return l.url + path.Join("/", restLayoutPaths[t]), false } diff --git a/internal/backend/layout/layout_test.go b/internal/backend/layout/layout_test.go index de5ae7d699c..af5105c207a 100644 --- a/internal/backend/layout/layout_test.go +++ b/internal/backend/layout/layout_test.go @@ -6,6 +6,7 @@ import ( "path/filepath" "reflect" "sort" + "strings" "testing" "github.com/restic/restic/internal/backend" @@ -97,8 +98,8 @@ func TestDefaultLayout(t *testing.T) { t.Run("Paths", func(t *testing.T) { l := &DefaultLayout{ - Path: tempdir, - Join: filepath.Join, + path: tempdir, + join: filepath.Join, } dirs := l.Paths() @@ -126,8 +127,8 @@ func TestDefaultLayout(t *testing.T) { for _, test := range tests { t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) { l := &DefaultLayout{ - Path: test.path, - Join: test.join, + path: test.path, + join: test.join, } filename := l.Filename(test.Handle) @@ -139,7 +140,7 @@ func TestDefaultLayout(t *testing.T) { } func TestRESTLayout(t *testing.T) { - path := rtest.TempDir(t) + url := `https://hostname.foo` var tests = []struct { backend.Handle @@ -147,44 +148,43 @@ func TestRESTLayout(t *testing.T) { }{ { backend.Handle{Type: backend.PackFile, Name: "0123456"}, - filepath.Join(path, "data", "0123456"), + strings.Join([]string{url, "data", "0123456"}, "/"), }, { backend.Handle{Type: backend.ConfigFile, Name: "CFG"}, - filepath.Join(path, "config"), + strings.Join([]string{url, "config"}, "/"), }, { backend.Handle{Type: backend.SnapshotFile, Name: "123456"}, - filepath.Join(path, "snapshots", "123456"), + strings.Join([]string{url, "snapshots", "123456"}, "/"), }, { backend.Handle{Type: backend.IndexFile, Name: "123456"}, - filepath.Join(path, "index", "123456"), + strings.Join([]string{url, "index", "123456"}, "/"), }, { backend.Handle{Type: backend.LockFile, Name: "123456"}, - filepath.Join(path, "locks", "123456"), + strings.Join([]string{url, "locks", "123456"}, "/"), }, { backend.Handle{Type: backend.KeyFile, Name: "123456"}, - filepath.Join(path, "keys", "123456"), + strings.Join([]string{url, "keys", "123456"}, "/"), }, } l := &RESTLayout{ - Path: path, - Join: filepath.Join, + url: url, } t.Run("Paths", func(t *testing.T) { dirs := l.Paths() want := []string{ - filepath.Join(path, "data"), - filepath.Join(path, "snapshots"), - filepath.Join(path, "index"), - filepath.Join(path, "locks"), - filepath.Join(path, "keys"), + strings.Join([]string{url, "data"}, "/"), + strings.Join([]string{url, "snapshots"}, "/"), + strings.Join([]string{url, "index"}, "/"), + strings.Join([]string{url, "locks"}, "/"), + strings.Join([]string{url, "keys"}, "/"), } sort.Strings(want) @@ -213,19 +213,19 @@ func TestRESTLayoutURLs(t *testing.T) { dir string }{ { - &RESTLayout{URL: "https://hostname.foo", Path: "", Join: path.Join}, + &RESTLayout{url: "https://hostname.foo"}, backend.Handle{Type: backend.PackFile, Name: "foobar"}, "https://hostname.foo/data/foobar", "https://hostname.foo/data/", }, { - &RESTLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join}, + &RESTLayout{url: "https://hostname.foo:1234/prefix/repo"}, backend.Handle{Type: backend.LockFile, Name: "foobar"}, "https://hostname.foo:1234/prefix/repo/locks/foobar", "https://hostname.foo:1234/prefix/repo/locks/", }, { - &RESTLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join}, + &RESTLayout{url: "https://hostname.foo:1234/prefix/repo"}, backend.Handle{Type: backend.ConfigFile, Name: "foobar"}, "https://hostname.foo:1234/prefix/repo/config", "https://hostname.foo:1234/prefix/repo/", diff --git a/internal/backend/rest/rest.go b/internal/backend/rest/rest.go index d0a08175b57..7bdedff397f 100644 --- a/internal/backend/rest/rest.go +++ b/internal/backend/rest/rest.go @@ -8,7 +8,6 @@ import ( "io" "net/http" "net/url" - "path" "strings" "github.com/restic/restic/internal/backend" @@ -66,7 +65,7 @@ func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) be := &Backend{ url: cfg.URL, client: http.Client{Transport: rt}, - Layout: &layout.RESTLayout{URL: url, Join: path.Join}, + Layout: layout.NewRESTLayout(url), connections: cfg.Connections, } diff --git a/internal/backend/swift/swift.go b/internal/backend/swift/swift.go index e6412d0bff0..dfa2055cdf1 100644 --- a/internal/backend/swift/swift.go +++ b/internal/backend/swift/swift.go @@ -72,10 +72,7 @@ func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backen connections: cfg.Connections, container: cfg.Container, prefix: cfg.Prefix, - Layout: &layout.DefaultLayout{ - Path: cfg.Prefix, - Join: path.Join, - }, + Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), } // Authenticate if needed From 97f696b93705516551ec77cbbde5d8444990b35e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 21:16:22 +0200 Subject: [PATCH 634/893] backend: remove dead code --- internal/backend/azure/azure.go | 5 -- internal/backend/gs/gs.go | 5 -- internal/backend/local/local.go | 10 +-- internal/backend/s3/s3.go | 118 ++------------------------------ internal/backend/sftp/sftp.go | 30 ++------ 5 files changed, 15 insertions(+), 153 deletions(-) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 1c844f97f4c..e09593fd636 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -188,11 +188,6 @@ func (be *Backend) IsPermanentError(err error) bool { return false } -// Join combines path components with slashes. -func (be *Backend) Join(p ...string) string { - return path.Join(p...) -} - func (be *Backend) Connections() uint { return be.connections } diff --git a/internal/backend/gs/gs.go b/internal/backend/gs/gs.go index b4d4ecfd4fc..ad50f194b52 100644 --- a/internal/backend/gs/gs.go +++ b/internal/backend/gs/gs.go @@ -186,11 +186,6 @@ func (be *Backend) IsPermanentError(err error) bool { return false } -// Join combines path components with slashes. -func (be *Backend) Join(p ...string) string { - return path.Join(p...) -} - func (be *Backend) Connections() uint { return be.connections } diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go index ff7e3d35d45..8985ef4c488 100644 --- a/internal/backend/local/local.go +++ b/internal/backend/local/local.go @@ -37,7 +37,7 @@ func NewFactory() location.Factory { return location.NewLimitedBackendFactory("local", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open)) } -func open(ctx context.Context, cfg Config) (*Local, error) { +func open(cfg Config) (*Local, error) { l := layout.NewDefaultLayout(cfg.Path, filepath.Join) fi, err := fs.Stat(l.Filename(backend.Handle{Type: backend.ConfigFile})) @@ -52,17 +52,17 @@ func open(ctx context.Context, cfg Config) (*Local, error) { } // Open opens the local backend as specified by config. -func Open(ctx context.Context, cfg Config) (*Local, error) { +func Open(_ context.Context, cfg Config) (*Local, error) { debug.Log("open local backend at %v", cfg.Path) - return open(ctx, cfg) + return open(cfg) } // Create creates all the necessary files and directories for a new local // backend at dir. Afterwards a new config blob should be created. -func Create(ctx context.Context, cfg Config) (*Local, error) { +func Create(_ context.Context, cfg Config) (*Local, error) { debug.Log("create local backend at %v", cfg.Path) - be, err := open(ctx, cfg) + be, err := open(cfg) if err != nil { return nil, err } diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index 5ef9528919f..2176d289d87 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -9,7 +9,6 @@ import ( "os" "path" "strings" - "time" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/layout" @@ -37,7 +36,7 @@ func NewFactory() location.Factory { return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open) } -func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) { +func open(cfg Config, rt http.RoundTripper) (*Backend, error) { debug.Log("open, config %#v", cfg) if cfg.KeyID == "" && cfg.Secret.String() != "" { @@ -186,14 +185,14 @@ func getCredentials(cfg Config, tr http.RoundTripper) (*credentials.Credentials, // Open opens the S3 backend at bucket and region. The bucket is created if it // does not exist yet. -func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) { - return open(ctx, cfg, rt) +func Open(_ context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) { + return open(cfg, rt) } // Create opens the S3 backend at bucket and region and creates the bucket if // it does not exist yet. func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) { - be, err := open(ctx, cfg, rt) + be, err := open(cfg, rt) if err != nil { return nil, errors.Wrap(err, "open") } @@ -249,78 +248,6 @@ func (be *Backend) IsPermanentError(err error) bool { return false } -// Join combines path components with slashes. -func (be *Backend) Join(p ...string) string { - return path.Join(p...) -} - -type fileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -func (fi *fileInfo) Name() string { return fi.name } // base name of the file -func (fi *fileInfo) Size() int64 { return fi.size } // length in bytes for regular files; system-dependent for others -func (fi *fileInfo) Mode() os.FileMode { return fi.mode } // file mode bits -func (fi *fileInfo) ModTime() time.Time { return fi.modTime } // modification time -func (fi *fileInfo) IsDir() bool { return fi.isDir } // abbreviation for Mode().IsDir() -func (fi *fileInfo) Sys() interface{} { return nil } // underlying data source (can return nil) - -// ReadDir returns the entries for a directory. -func (be *Backend) ReadDir(ctx context.Context, dir string) (list []os.FileInfo, err error) { - debug.Log("ReadDir(%v)", dir) - - // make sure dir ends with a slash - if dir[len(dir)-1] != '/' { - dir += "/" - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - debug.Log("using ListObjectsV1(%v)", be.cfg.ListObjectsV1) - - for obj := range be.client.ListObjects(ctx, be.cfg.Bucket, minio.ListObjectsOptions{ - Prefix: dir, - Recursive: false, - UseV1: be.cfg.ListObjectsV1, - }) { - if obj.Err != nil { - return nil, err - } - - if obj.Key == "" { - continue - } - - name := strings.TrimPrefix(obj.Key, dir) - // Sometimes s3 returns an entry for the dir itself. Ignore it. - if name == "" { - continue - } - entry := &fileInfo{ - name: name, - size: obj.Size, - modTime: obj.LastModified, - } - - if name[len(name)-1] == '/' { - entry.isDir = true - entry.mode = os.ModeDir | 0755 - entry.name = name[:len(name)-1] - } else { - entry.mode = 0644 - } - - list = append(list, entry) - } - - return list, nil -} - func (be *Backend) Connections() uint { return be.cfg.Connections } @@ -518,40 +445,3 @@ func (be *Backend) Delete(ctx context.Context) error { // Close does nothing func (be *Backend) Close() error { return nil } - -// Rename moves a file based on the new layout l. -func (be *Backend) Rename(ctx context.Context, h backend.Handle, l layout.Layout) error { - debug.Log("Rename %v to %v", h, l) - oldname := be.Filename(h) - newname := l.Filename(h) - - if oldname == newname { - debug.Log(" %v is already renamed", newname) - return nil - } - - debug.Log(" %v -> %v", oldname, newname) - - src := minio.CopySrcOptions{ - Bucket: be.cfg.Bucket, - Object: oldname, - } - - dst := minio.CopyDestOptions{ - Bucket: be.cfg.Bucket, - Object: newname, - } - - _, err := be.client.CopyObject(ctx, dst, src) - if err != nil && be.IsNotExist(err) { - debug.Log("copy failed: %v, seems to already have been renamed", err) - return nil - } - - if err != nil { - debug.Log("copy failed: %v", err) - return err - } - - return be.client.RemoveObject(ctx, be.cfg.Bucket, oldname, minio.RemoveObjectOptions{}) -} diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index 8ac6781e9c7..0ecf7ae6283 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -50,8 +50,6 @@ func NewFactory() location.Factory { return location.NewLimitedBackendFactory("sftp", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open)) } -const defaultLayout = "default" - func startClient(cfg Config) (*SFTP, error) { program, args, err := buildSSHCommand(cfg) if err != nil { @@ -145,7 +143,7 @@ func (r *SFTP) clientError() error { // Open opens an sftp backend as described by the config by running // "ssh" with the appropriate arguments (or cfg.Command, if set). -func Open(ctx context.Context, cfg Config) (*SFTP, error) { +func Open(_ context.Context, cfg Config) (*SFTP, error) { debug.Log("open backend with config %#v", cfg) sftp, err := startClient(cfg) @@ -154,10 +152,10 @@ func Open(ctx context.Context, cfg Config) (*SFTP, error) { return nil, err } - return open(ctx, sftp, cfg) + return open(sftp, cfg) } -func open(ctx context.Context, sftp *SFTP, cfg Config) (*SFTP, error) { +func open(sftp *SFTP, cfg Config) (*SFTP, error) { fi, err := sftp.c.Stat(sftp.Layout.Filename(backend.Handle{Type: backend.ConfigFile})) m := util.DeriveModesFromFileInfo(fi, err) debug.Log("using (%03O file, %03O dir) permissions", m.File, m.Dir) @@ -193,16 +191,6 @@ func (r *SFTP) mkdirAllDataSubdirs(ctx context.Context, nconn uint) error { return g.Wait() } -// ReadDir returns the entries for a directory. -func (r *SFTP) ReadDir(_ context.Context, dir string) ([]os.FileInfo, error) { - fi, err := r.c.ReadDir(dir) - - // sftp client does not specify dir name on error, so add it here - err = errors.Wrapf(err, "(%v)", dir) - - return fi, err -} - // IsNotExist returns true if the error is caused by a not existing file. func (r *SFTP) IsNotExist(err error) bool { return errors.Is(err, os.ErrNotExist) @@ -273,7 +261,7 @@ func Create(ctx context.Context, cfg Config) (*SFTP, error) { } // repurpose existing connection - return open(ctx, sftp, cfg) + return open(sftp, cfg) } func (r *SFTP) Connections() uint { @@ -290,12 +278,6 @@ func (r *SFTP) HasAtomicReplace() bool { return r.posixRename } -// Join joins the given paths and cleans them afterwards. This always uses -// forward slashes, which is required by sftp. -func Join(parts ...string) string { - return path.Clean(path.Join(parts...)) -} - // tempSuffix generates a random string suffix that should be sufficiently long // to avoid accidental conflicts func tempSuffix() string { @@ -560,9 +542,9 @@ func (r *SFTP) Close() error { } func (r *SFTP) deleteRecursive(ctx context.Context, name string) error { - entries, err := r.ReadDir(ctx, name) + entries, err := r.c.ReadDir(name) if err != nil { - return errors.Wrap(err, "ReadDir") + return errors.Wrapf(err, "ReadDir(%v)", name) } for _, fi := range entries { From 56440797077c3b8e5687a2f7bd07d7ba268e9a77 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 22:35:22 +0200 Subject: [PATCH 635/893] restic: prepare extraction of fs code from Node --- internal/fs/ea_windows_test.go | 6 +- internal/restic/node.go | 84 ++++++++++++-------------- internal/restic/node_aix.go | 20 +++--- internal/restic/node_darwin.go | 2 +- internal/restic/node_freebsd.go | 2 +- internal/restic/node_linux.go | 2 +- internal/restic/node_netbsd.go | 20 +++--- internal/restic/node_openbsd.go | 18 +++--- internal/restic/node_solaris.go | 2 +- internal/restic/node_test.go | 8 +-- internal/restic/node_windows.go | 16 ++--- internal/restic/node_windows_test.go | 12 ++-- internal/restic/node_xattr.go | 14 ++--- internal/restic/node_xattr_all_test.go | 10 +-- internal/restorer/restorer.go | 4 +- 15 files changed, 108 insertions(+), 112 deletions(-) diff --git a/internal/fs/ea_windows_test.go b/internal/fs/ea_windows_test.go index 74afd7aa56a..e474a3735f6 100644 --- a/internal/fs/ea_windows_test.go +++ b/internal/fs/ea_windows_test.go @@ -142,7 +142,7 @@ func TestSetGetFileEA(t *testing.T) { testFilePath, testFile := setupTestFile(t) testEAs := generateTestEAs(t, 3, testFilePath) fileHandle := openFile(t, testFilePath, windows.FILE_ATTRIBUTE_NORMAL) - defer closeFileHandle(t, testFilePath, testFile, fileHandle) + defer testCloseFileHandle(t, testFilePath, testFile, fileHandle) testSetGetEA(t, testFilePath, fileHandle, testEAs) } @@ -154,7 +154,7 @@ func TestSetGetFolderEA(t *testing.T) { testEAs := generateTestEAs(t, 3, testFolderPath) fileHandle := openFile(t, testFolderPath, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS) - defer closeFileHandle(t, testFolderPath, nil, fileHandle) + defer testCloseFileHandle(t, testFolderPath, nil, fileHandle) testSetGetEA(t, testFolderPath, fileHandle, testEAs) } @@ -212,7 +212,7 @@ func openFile(t *testing.T, path string, attributes uint32) windows.Handle { return fileHandle } -func closeFileHandle(t *testing.T, testfilePath string, testFile *os.File, handle windows.Handle) { +func testCloseFileHandle(t *testing.T, testfilePath string, testFile *os.File, handle windows.Handle) { if testFile != nil { err := testFile.Close() if err != nil { diff --git a/internal/restic/node.go b/internal/restic/node.go index 6afdff64a09..c9c68cbee23 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -150,7 +150,7 @@ func NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (* node.Size = uint64(fi.Size()) } - err := node.fillExtra(path, fi, ignoreXattrListError) + err := nodeFillExtra(node, path, fi, ignoreXattrListError) return node, err } @@ -187,33 +187,33 @@ func (node Node) GetExtendedAttribute(a string) []byte { return nil } -// CreateAt creates the node at the given path but does NOT restore node meta data. -func (node *Node) CreateAt(ctx context.Context, path string, repo BlobLoader) error { +// NodeCreateAt creates the node at the given path but does NOT restore node meta data. +func NodeCreateAt(ctx context.Context, node *Node, path string, repo BlobLoader) error { debug.Log("create node %v at %v", node.Name, path) switch node.Type { case "dir": - if err := node.createDirAt(path); err != nil { + if err := nodeCreateDirAt(node, path); err != nil { return err } case "file": - if err := node.createFileAt(ctx, path, repo); err != nil { + if err := nodeCreateFileAt(ctx, node, path, repo); err != nil { return err } case "symlink": - if err := node.createSymlinkAt(path); err != nil { + if err := nodeCreateSymlinkAt(node, path); err != nil { return err } case "dev": - if err := node.createDevAt(path); err != nil { + if err := nodeCreateDevAt(node, path); err != nil { return err } case "chardev": - if err := node.createCharDevAt(path); err != nil { + if err := nodeCreateCharDevAt(node, path); err != nil { return err } case "fifo": - if err := node.createFifoAt(path); err != nil { + if err := nodeCreateFifoAt(path); err != nil { return err } case "socket": @@ -225,9 +225,9 @@ func (node *Node) CreateAt(ctx context.Context, path string, repo BlobLoader) er return nil } -// RestoreMetadata restores node metadata -func (node Node) RestoreMetadata(path string, warn func(msg string)) error { - err := node.restoreMetadata(path, warn) +// NodeRestoreMetadata restores node metadata +func NodeRestoreMetadata(node *Node, path string, warn func(msg string)) error { + err := nodeRestoreMetadata(node, path, warn) if err != nil { // It is common to have permission errors for folders like /home // unless you're running as root, so ignore those. @@ -242,28 +242,28 @@ func (node Node) RestoreMetadata(path string, warn func(msg string)) error { return err } -func (node Node) restoreMetadata(path string, warn func(msg string)) error { +func nodeRestoreMetadata(node *Node, path string, warn func(msg string)) error { var firsterr error if err := lchown(path, int(node.UID), int(node.GID)); err != nil { firsterr = errors.WithStack(err) } - if err := node.restoreExtendedAttributes(path); err != nil { + if err := nodeRestoreExtendedAttributes(node, path); err != nil { debug.Log("error restoring extended attributes for %v: %v", path, err) if firsterr == nil { firsterr = err } } - if err := node.restoreGenericAttributes(path, warn); err != nil { + if err := nodeRestoreGenericAttributes(node, path, warn); err != nil { debug.Log("error restoring generic attributes for %v: %v", path, err) if firsterr == nil { firsterr = err } } - if err := node.RestoreTimestamps(path); err != nil { + if err := NodeRestoreTimestamps(node, path); err != nil { debug.Log("error restoring timestamps for %v: %v", path, err) if firsterr == nil { firsterr = err @@ -284,14 +284,14 @@ func (node Node) restoreMetadata(path string, warn func(msg string)) error { return firsterr } -func (node Node) RestoreTimestamps(path string) error { +func NodeRestoreTimestamps(node *Node, path string) error { var utimes = [...]syscall.Timespec{ syscall.NsecToTimespec(node.AccessTime.UnixNano()), syscall.NsecToTimespec(node.ModTime.UnixNano()), } if node.Type == "symlink" { - return node.restoreSymlinkTimestamps(path, utimes) + return nodeRestoreSymlinkTimestamps(path, utimes) } if err := syscall.UtimesNano(path, utimes[:]); err != nil { @@ -301,7 +301,7 @@ func (node Node) RestoreTimestamps(path string) error { return nil } -func (node Node) createDirAt(path string) error { +func nodeCreateDirAt(node *Node, path string) error { err := fs.Mkdir(path, node.Mode) if err != nil && !os.IsExist(err) { return errors.WithStack(err) @@ -310,13 +310,13 @@ func (node Node) createDirAt(path string) error { return nil } -func (node Node) createFileAt(ctx context.Context, path string, repo BlobLoader) error { +func nodeCreateFileAt(ctx context.Context, node *Node, path string, repo BlobLoader) error { f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) if err != nil { return errors.WithStack(err) } - err = node.writeNodeContent(ctx, repo, f) + err = nodeWriteNodeContent(ctx, node, repo, f) closeErr := f.Close() if err != nil { @@ -330,7 +330,7 @@ func (node Node) createFileAt(ctx context.Context, path string, repo BlobLoader) return nil } -func (node Node) writeNodeContent(ctx context.Context, repo BlobLoader, f *os.File) error { +func nodeWriteNodeContent(ctx context.Context, node *Node, repo BlobLoader, f *os.File) error { var buf []byte for _, id := range node.Content { buf, err := repo.LoadBlob(ctx, DataBlob, id, buf) @@ -347,7 +347,7 @@ func (node Node) writeNodeContent(ctx context.Context, repo BlobLoader, f *os.Fi return nil } -func (node Node) createSymlinkAt(path string) error { +func nodeCreateSymlinkAt(node *Node, path string) error { if err := fs.Symlink(node.LinkTarget, path); err != nil { return errors.WithStack(err) } @@ -355,15 +355,15 @@ func (node Node) createSymlinkAt(path string) error { return nil } -func (node *Node) createDevAt(path string) error { +func nodeCreateDevAt(node *Node, path string) error { return mknod(path, syscall.S_IFBLK|0600, node.Device) } -func (node *Node) createCharDevAt(path string) error { +func nodeCreateCharDevAt(node *Node, path string) error { return mknod(path, syscall.S_IFCHR|0600, node.Device) } -func (node *Node) createFifoAt(path string) error { +func nodeCreateFifoAt(path string) error { return mkfifo(path, 0600) } @@ -601,7 +601,7 @@ func deepEqual(map1, map2 map[GenericAttributeType]json.RawMessage) bool { return true } -func (node *Node) fillUser(stat *statT) { +func nodeFillUser(node *Node, stat *statT) { uid, gid := stat.uid(), stat.gid() node.UID, node.GID = uid, gid node.User = lookupUsername(uid) @@ -662,7 +662,7 @@ func lookupGroup(gid uint32) string { return group } -func (node *Node) fillExtra(path string, fi os.FileInfo, ignoreXattrListError bool) error { +func nodeFillExtra(node *Node, path string, fi os.FileInfo, ignoreXattrListError bool) error { stat, ok := toStatT(fi.Sys()) if !ok { // fill minimal info with current values for uid, gid @@ -675,9 +675,9 @@ func (node *Node) fillExtra(path string, fi os.FileInfo, ignoreXattrListError bo node.Inode = uint64(stat.ino()) node.DeviceID = uint64(stat.dev()) - node.fillTimes(stat) + nodeFillTimes(node, stat) - node.fillUser(stat) + nodeFillUser(node, stat) switch node.Type { case "file": @@ -703,10 +703,10 @@ func (node *Node) fillExtra(path string, fi os.FileInfo, ignoreXattrListError bo return errors.Errorf("unsupported file type %q", node.Type) } - allowExtended, err := node.fillGenericAttributes(path, fi, stat) + allowExtended, err := nodeFillGenericAttributes(node, path, fi, stat) if allowExtended { // Skip processing ExtendedAttributes if allowExtended is false. - err = errors.CombineErrors(err, node.fillExtendedAttributes(path, ignoreXattrListError)) + err = errors.CombineErrors(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) } return err } @@ -715,7 +715,7 @@ func mkfifo(path string, mode uint32) (err error) { return mknod(path, mode|syscall.S_IFIFO, 0) } -func (node *Node) fillTimes(stat *statT) { +func nodeFillTimes(node *Node, stat *statT) { ctim := stat.ctim() atim := stat.atim() node.ChangeTime = time.Unix(ctim.Unix()) @@ -746,11 +746,11 @@ func handleUnknownGenericAttributeFound(genericAttributeType GenericAttributeTyp } } -// handleAllUnknownGenericAttributesFound performs validations for all generic attributes in the node. +// HandleAllUnknownGenericAttributesFound performs validations for all generic attributes of a node. // This is not used on windows currently because windows has handling for generic attributes. // nolint:unused -func (node Node) handleAllUnknownGenericAttributesFound(warn func(msg string)) error { - for name := range node.GenericAttributes { +func HandleAllUnknownGenericAttributesFound(attributes map[GenericAttributeType]json.RawMessage, warn func(msg string)) error { + for name := range attributes { handleUnknownGenericAttributeFound(name, warn) } return nil @@ -770,9 +770,8 @@ func checkGenericAttributeNameNotHandledAndPut(value GenericAttributeType) bool // The functions below are common helper functions which can be used for generic attributes support // across different OS. -// genericAttributesToOSAttrs gets the os specific attribute from the generic attribute using reflection -// nolint:unused -func genericAttributesToOSAttrs(attrs map[GenericAttributeType]json.RawMessage, attributeType reflect.Type, attributeValuePtr *reflect.Value, keyPrefix string) (unknownAttribs []GenericAttributeType, err error) { +// GenericAttributesToOSAttrs gets the os specific attribute from the generic attribute using reflection +func GenericAttributesToOSAttrs(attrs map[GenericAttributeType]json.RawMessage, attributeType reflect.Type, attributeValuePtr *reflect.Value, keyPrefix string) (unknownAttribs []GenericAttributeType, err error) { attributeValue := *attributeValuePtr for key, rawMsg := range attrs { @@ -796,20 +795,17 @@ func genericAttributesToOSAttrs(attrs map[GenericAttributeType]json.RawMessage, } // getFQKey gets the fully qualified key for the field -// nolint:unused func getFQKey(field reflect.StructField, keyPrefix string) GenericAttributeType { return GenericAttributeType(fmt.Sprintf("%s.%s", keyPrefix, field.Tag.Get("generic"))) } // getFQKeyByIndex gets the fully qualified key for the field index -// nolint:unused func getFQKeyByIndex(attributeType reflect.Type, index int, keyPrefix string) GenericAttributeType { return getFQKey(attributeType.Field(index), keyPrefix) } -// osAttrsToGenericAttributes gets the generic attribute from the os specific attribute using reflection -// nolint:unused -func osAttrsToGenericAttributes(attributeType reflect.Type, attributeValuePtr *reflect.Value, keyPrefix string) (attrs map[GenericAttributeType]json.RawMessage, err error) { +// OSAttrsToGenericAttributes gets the generic attribute from the os specific attribute using reflection +func OSAttrsToGenericAttributes(attributeType reflect.Type, attributeValuePtr *reflect.Value, keyPrefix string) (attrs map[GenericAttributeType]json.RawMessage, err error) { attributeValue := *attributeValuePtr attrs = make(map[GenericAttributeType]json.RawMessage) diff --git a/internal/restic/node_aix.go b/internal/restic/node_aix.go index 32f63af15f0..4cd27997308 100644 --- a/internal/restic/node_aix.go +++ b/internal/restic/node_aix.go @@ -8,7 +8,7 @@ import ( "syscall" ) -func (node Node) restoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { +func nodeRestoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { return nil } @@ -23,13 +23,13 @@ func (s statT) atim() syscall.Timespec { return toTimespec(s.Atim) } func (s statT) mtim() syscall.Timespec { return toTimespec(s.Mtim) } func (s statT) ctim() syscall.Timespec { return toTimespec(s.Ctim) } -// restoreExtendedAttributes is a no-op on AIX. -func (node Node) restoreExtendedAttributes(_ string) error { +// nodeRestoreExtendedAttributes is a no-op on AIX. +func nodeRestoreExtendedAttributes(_ *Node, _ string) error { return nil } -// fillExtendedAttributes is a no-op on AIX. -func (node *Node) fillExtendedAttributes(_ string, _ bool) error { +// nodeFillExtendedAttributes is a no-op on AIX. +func nodeFillExtendedAttributes(_ *Node, _ string, _ bool) error { return nil } @@ -38,12 +38,12 @@ func IsListxattrPermissionError(_ error) bool { return false } -// restoreGenericAttributes is no-op on AIX. -func (node *Node) restoreGenericAttributes(_ string, warn func(msg string)) error { - return node.handleAllUnknownGenericAttributesFound(warn) +// nodeRestoreGenericAttributes is no-op on AIX. +func nodeRestoreGenericAttributes(node *Node, _ string, warn func(msg string)) error { + return HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) } -// fillGenericAttributes is a no-op on AIX. -func (node *Node) fillGenericAttributes(_ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { +// nodeFillGenericAttributes is a no-op on AIX. +func nodeFillGenericAttributes(_ *Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { return true, nil } diff --git a/internal/restic/node_darwin.go b/internal/restic/node_darwin.go index 803aa68e572..099007e0756 100644 --- a/internal/restic/node_darwin.go +++ b/internal/restic/node_darwin.go @@ -2,7 +2,7 @@ package restic import "syscall" -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { +func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { return nil } diff --git a/internal/restic/node_freebsd.go b/internal/restic/node_freebsd.go index 34d5b272c01..6d2dd1d98af 100644 --- a/internal/restic/node_freebsd.go +++ b/internal/restic/node_freebsd.go @@ -5,7 +5,7 @@ package restic import "syscall" -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { +func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { return nil } diff --git a/internal/restic/node_linux.go b/internal/restic/node_linux.go index 85a3638306d..6311a224b12 100644 --- a/internal/restic/node_linux.go +++ b/internal/restic/node_linux.go @@ -10,7 +10,7 @@ import ( "github.com/restic/restic/internal/fs" ) -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { +func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { dir, err := fs.Open(filepath.Dir(path)) if err != nil { return errors.WithStack(err) diff --git a/internal/restic/node_netbsd.go b/internal/restic/node_netbsd.go index 0fe46a3f28d..a53412afbaa 100644 --- a/internal/restic/node_netbsd.go +++ b/internal/restic/node_netbsd.go @@ -5,7 +5,7 @@ import ( "syscall" ) -func (node Node) restoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { +func nodeRestoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { return nil } @@ -13,13 +13,13 @@ func (s statT) atim() syscall.Timespec { return s.Atimespec } func (s statT) mtim() syscall.Timespec { return s.Mtimespec } func (s statT) ctim() syscall.Timespec { return s.Ctimespec } -// restoreExtendedAttributes is a no-op on netbsd. -func (node Node) restoreExtendedAttributes(_ string) error { +// nodeRestoreExtendedAttributes is a no-op on netbsd. +func nodeRestoreExtendedAttributes(_ *Node, _ string) error { return nil } -// fillExtendedAttributes is a no-op on netbsd. -func (node *Node) fillExtendedAttributes(_ string, _ bool) error { +// nodeFillExtendedAttributes is a no-op on netbsd. +func nodeFillExtendedAttributes(_ *Node, _ string, _ bool) error { return nil } @@ -28,12 +28,12 @@ func IsListxattrPermissionError(_ error) bool { return false } -// restoreGenericAttributes is no-op on netbsd. -func (node *Node) restoreGenericAttributes(_ string, warn func(msg string)) error { - return node.handleAllUnknownGenericAttributesFound(warn) +// nodeRestoreGenericAttributes is no-op on netbsd. +func nodeRestoreGenericAttributes(node *Node, _ string, warn func(msg string)) error { + return HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) } -// fillGenericAttributes is a no-op on netbsd. -func (node *Node) fillGenericAttributes(_ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { +// nodeFillGenericAttributes is a no-op on netbsd. +func nodeFillGenericAttributes(_ *Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { return true, nil } diff --git a/internal/restic/node_openbsd.go b/internal/restic/node_openbsd.go index 71841f59f6f..bbba89f2ca6 100644 --- a/internal/restic/node_openbsd.go +++ b/internal/restic/node_openbsd.go @@ -5,7 +5,7 @@ import ( "syscall" ) -func (node Node) restoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { +func nodeRestoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { return nil } @@ -13,13 +13,13 @@ func (s statT) atim() syscall.Timespec { return s.Atim } func (s statT) mtim() syscall.Timespec { return s.Mtim } func (s statT) ctim() syscall.Timespec { return s.Ctim } -// restoreExtendedAttributes is a no-op on openbsd. -func (node Node) restoreExtendedAttributes(_ string) error { +// nodeRestoreExtendedAttributes is a no-op on openbsd. +func nodeRestoreExtendedAttributes(_ *Node, _ string) error { return nil } -// fillExtendedAttributes is a no-op on openbsd. -func (node *Node) fillExtendedAttributes(_ string, _ bool) error { +// nodeFillExtendedAttributes is a no-op on openbsd. +func nodeFillExtendedAttributes(_ *Node, _ string, _ bool) error { return nil } @@ -28,12 +28,12 @@ func IsListxattrPermissionError(_ error) bool { return false } -// restoreGenericAttributes is no-op on openbsd. -func (node *Node) restoreGenericAttributes(_ string, warn func(msg string)) error { - return node.handleAllUnknownGenericAttributesFound(warn) +// nodeRestoreGenericAttributes is no-op on openbsd. +func nodeRestoreGenericAttributes(node *Node, _ string, warn func(msg string)) error { + return HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) } // fillGenericAttributes is a no-op on openbsd. -func (node *Node) fillGenericAttributes(_ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { +func nodeFillGenericAttributes(_ *Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { return true, nil } diff --git a/internal/restic/node_solaris.go b/internal/restic/node_solaris.go index c9d03f9c263..114d117666d 100644 --- a/internal/restic/node_solaris.go +++ b/internal/restic/node_solaris.go @@ -2,7 +2,7 @@ package restic import "syscall" -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { +func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { return nil } diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index ab7f66e5b50..7258b5429f1 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -245,8 +245,8 @@ func TestNodeRestoreAt(t *testing.T) { } else { nodePath = filepath.Join(tempdir, test.Name) } - rtest.OK(t, test.CreateAt(context.TODO(), nodePath, nil)) - rtest.OK(t, test.RestoreMetadata(nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) })) + rtest.OK(t, NodeCreateAt(context.TODO(), &test, nodePath, nil)) + rtest.OK(t, NodeRestoreMetadata(&test, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) })) fi, err := os.Lstat(nodePath) rtest.OK(t, err) @@ -402,10 +402,10 @@ func TestSymlinkSerializationFormat(t *testing.T) { func TestNodeRestoreMetadataError(t *testing.T) { tempdir := t.TempDir() - node := nodeTests[0] + node := &nodeTests[0] nodePath := filepath.Join(tempdir, node.Name) // This will fail because the target file does not exist - err := node.RestoreMetadata(nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) }) + err := NodeRestoreMetadata(node, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) }) test.Assert(t, errors.Is(err, os.ErrNotExist), "failed for an unexpected reason") } diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index bce01ccad04..3f836ae613a 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -56,7 +56,7 @@ func lchown(_ string, _ int, _ int) (err error) { } // restoreSymlinkTimestamps restores timestamps for symlinks -func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { +func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { // tweaked version of UtimesNano from go/src/syscall/syscall_windows.go pathp, e := syscall.UTF16PtrFromString(path) if e != nil { @@ -82,7 +82,7 @@ func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespe } // restore extended attributes for windows -func (node Node) restoreExtendedAttributes(path string) (err error) { +func nodeRestoreExtendedAttributes(node *Node, path string) (err error) { count := len(node.ExtendedAttributes) if count > 0 { eas := make([]fs.ExtendedAttribute, count) @@ -97,7 +97,7 @@ func (node Node) restoreExtendedAttributes(path string) (err error) { } // fill extended attributes in the node. This also includes the Generic attributes for windows. -func (node *Node) fillExtendedAttributes(path string, _ bool) (err error) { +func nodeFillExtendedAttributes(node *Node, path string, _ bool) (err error) { var fileHandle windows.Handle if fileHandle, err = fs.OpenHandleForEA(node.Type, path, false); fileHandle == 0 { return nil @@ -210,7 +210,7 @@ func (s statT) ctim() syscall.Timespec { } // restoreGenericAttributes restores generic attributes for Windows -func (node Node) restoreGenericAttributes(path string, warn func(msg string)) (err error) { +func nodeRestoreGenericAttributes(node *Node, path string, warn func(msg string)) (err error) { if len(node.GenericAttributes) == 0 { return nil } @@ -242,7 +242,7 @@ func (node Node) restoreGenericAttributes(path string, warn func(msg string)) (e // genericAttributesToWindowsAttrs converts the generic attributes map to a WindowsAttributes and also returns a string of unknown attributes that it could not convert. func genericAttributesToWindowsAttrs(attrs map[GenericAttributeType]json.RawMessage) (windowsAttributes WindowsAttributes, unknownAttribs []GenericAttributeType, err error) { waValue := reflect.ValueOf(&windowsAttributes).Elem() - unknownAttribs, err = genericAttributesToOSAttrs(attrs, reflect.TypeOf(windowsAttributes), &waValue, "windows") + unknownAttribs, err = GenericAttributesToOSAttrs(attrs, reflect.TypeOf(windowsAttributes), &waValue, "windows") return windowsAttributes, unknownAttribs, err } @@ -361,11 +361,11 @@ func decryptFile(pathPointer *uint16) error { return nil } -// fillGenericAttributes fills in the generic attributes for windows like File Attributes, +// nodeFillGenericAttributes fills in the generic attributes for windows like File Attributes, // Created time and Security Descriptors. // It also checks if the volume supports extended attributes and stores the result in a map // so that it does not have to be checked again for subsequent calls for paths in the same volume. -func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT) (allowExtended bool, err error) { +func nodeFillGenericAttributes(node *Node, path string, fi os.FileInfo, stat *statT) (allowExtended bool, err error) { if strings.Contains(filepath.Base(path), ":") { // Do not process for Alternate Data Streams in Windows // Also do not allow processing of extended attributes for ADS. @@ -499,7 +499,7 @@ func prepareVolumeName(path string) (volumeName string, err error) { func WindowsAttrsToGenericAttributes(windowsAttributes WindowsAttributes) (attrs map[GenericAttributeType]json.RawMessage, err error) { // Get the value of the WindowsAttributes windowsAttributesValue := reflect.ValueOf(windowsAttributes) - return osAttrsToGenericAttributes(reflect.TypeOf(windowsAttributes), &windowsAttributesValue, runtime.GOOS) + return OSAttrsToGenericAttributes(reflect.TypeOf(windowsAttributes), &windowsAttributesValue, runtime.GOOS) } // getCreationTime gets the value for the WindowsAttribute CreationTime in a windows specific time format. diff --git a/internal/restic/node_windows_test.go b/internal/restic/node_windows_test.go index 6ba25559b7a..e78c8cb9667 100644 --- a/internal/restic/node_windows_test.go +++ b/internal/restic/node_windows_test.go @@ -42,7 +42,7 @@ func testRestoreSecurityDescriptor(t *testing.T, sd string, tempDir, fileType, f expectedNode := getNode(fileName, fileType, genericAttributes) // Restore the file/dir and restore the meta data including the security descriptors. - testPath, node := restoreAndGetNode(t, tempDir, expectedNode, false) + testPath, node := restoreAndGetNode(t, tempDir, &expectedNode, false) // Get the security descriptor from the node constructed from the file info of the restored path. sdByteFromRestoredNode := getWindowsAttr(t, testPath, node).SecurityDescriptor @@ -186,7 +186,7 @@ func runGenericAttributesTest(t *testing.T, tempDir string, genericAttributeName func runGenericAttributesTestForNodes(t *testing.T, expectedNodes []Node, tempDir string, genericAttr GenericAttributeType, genericAttributeExpected WindowsAttributes, warningExpected bool) { for _, testNode := range expectedNodes { - testPath, node := restoreAndGetNode(t, tempDir, testNode, warningExpected) + testPath, node := restoreAndGetNode(t, tempDir, &testNode, warningExpected) rawMessage := node.GenericAttributes[genericAttr] genericAttrsExpected, err := WindowsAttrsToGenericAttributes(genericAttributeExpected) test.OK(t, err) @@ -195,7 +195,7 @@ func runGenericAttributesTestForNodes(t *testing.T, expectedNodes []Node, tempDi } } -func restoreAndGetNode(t *testing.T, tempDir string, testNode Node, warningExpected bool) (string, *Node) { +func restoreAndGetNode(t *testing.T, tempDir string, testNode *Node, warningExpected bool) (string, *Node) { testPath := filepath.Join(tempDir, "001", testNode.Name) err := os.MkdirAll(filepath.Dir(testPath), testNode.Mode) test.OK(t, errors.Wrapf(err, "Failed to create parent directories for: %s", testPath)) @@ -211,7 +211,7 @@ func restoreAndGetNode(t *testing.T, tempDir string, testNode Node, warningExpec test.OK(t, errors.Wrapf(err, "Failed to create test directory: %s", testPath)) } - err = testNode.RestoreMetadata(testPath, func(msg string) { + err = NodeRestoreMetadata(testNode, testPath, func(msg string) { if warningExpected { test.Assert(t, warningExpected, "Warning triggered as expected: %s", msg) } else { @@ -260,7 +260,7 @@ func TestNewGenericAttributeType(t *testing.T) { }, } for _, testNode := range expectedNodes { - testPath, node := restoreAndGetNode(t, tempDir, testNode, true) + testPath, node := restoreAndGetNode(t, tempDir, &testNode, true) _, ua, err := genericAttributesToWindowsAttrs(node.GenericAttributes) test.OK(t, err) // Since this GenericAttribute is unknown to this version of the software, it will not get set on the file. @@ -296,7 +296,7 @@ func TestRestoreExtendedAttributes(t *testing.T) { }, } for _, testNode := range expectedNodes { - testPath, node := restoreAndGetNode(t, tempDir, testNode, false) + testPath, node := restoreAndGetNode(t, tempDir, &testNode, false) var handle windows.Handle var err error diff --git a/internal/restic/node_xattr.go b/internal/restic/node_xattr.go index 5a5a253d98b..062ef4345a2 100644 --- a/internal/restic/node_xattr.go +++ b/internal/restic/node_xattr.go @@ -64,17 +64,17 @@ func handleXattrErr(err error) error { } } -// restoreGenericAttributes is no-op. -func (node *Node) restoreGenericAttributes(_ string, warn func(msg string)) error { - return node.handleAllUnknownGenericAttributesFound(warn) +// nodeRestoreGenericAttributes is no-op. +func nodeRestoreGenericAttributes(node *Node, _ string, warn func(msg string)) error { + return HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) } -// fillGenericAttributes is a no-op. -func (node *Node) fillGenericAttributes(_ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { +// nodeFillGenericAttributes is a no-op. +func nodeFillGenericAttributes(_ *Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { return true, nil } -func (node Node) restoreExtendedAttributes(path string) error { +func nodeRestoreExtendedAttributes(node *Node, path string) error { expectedAttrs := map[string]struct{}{} for _, attr := range node.ExtendedAttributes { err := setxattr(path, attr.Name, attr.Value) @@ -101,7 +101,7 @@ func (node Node) restoreExtendedAttributes(path string) error { return nil } -func (node *Node) fillExtendedAttributes(path string, ignoreListError bool) error { +func nodeFillExtendedAttributes(node *Node, path string, ignoreListError bool) error { xattrs, err := listxattr(path) debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err) if err != nil { diff --git a/internal/restic/node_xattr_all_test.go b/internal/restic/node_xattr_all_test.go index 56ce5e28603..30d29a6ed08 100644 --- a/internal/restic/node_xattr_all_test.go +++ b/internal/restic/node_xattr_all_test.go @@ -21,18 +21,18 @@ func setAndVerifyXattr(t *testing.T, file string, attrs []ExtendedAttribute) { } } - node := Node{ + node := &Node{ Type: "file", ExtendedAttributes: attrs, } - rtest.OK(t, node.restoreExtendedAttributes(file)) + rtest.OK(t, nodeRestoreExtendedAttributes(node, file)) - nodeActual := Node{ + nodeActual := &Node{ Type: "file", } - rtest.OK(t, nodeActual.fillExtendedAttributes(file, false)) + rtest.OK(t, nodeFillExtendedAttributes(nodeActual, file, false)) - rtest.Assert(t, nodeActual.sameExtendedAttributes(node), "xattr mismatch got %v expected %v", nodeActual.ExtendedAttributes, node.ExtendedAttributes) + rtest.Assert(t, nodeActual.Equals(*node), "xattr mismatch got %v expected %v", nodeActual.ExtendedAttributes, node.ExtendedAttributes) } func TestOverwriteXattr(t *testing.T) { diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 0e30b82f8fb..31157c97933 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -272,7 +272,7 @@ func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, targe return errors.Wrap(err, "RemoveNode") } - err := node.CreateAt(ctx, target, res.repo) + err := restic.NodeCreateAt(ctx, node, target, res.repo) if err != nil { debug.Log("node.CreateAt(%s) error %v", target, err) return err @@ -288,7 +288,7 @@ func (res *Restorer) restoreNodeMetadataTo(node *restic.Node, target, location s return nil } debug.Log("restoreNodeMetadata %v %v %v", node.Name, target, location) - err := node.RestoreMetadata(target, res.Warn) + err := restic.NodeRestoreMetadata(node, target, res.Warn) if err != nil { debug.Log("node.RestoreMetadata(%s) error %v", target, err) } From a2e54eac6427791c81f27acbe1daded84ba70b3b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 22:41:06 +0200 Subject: [PATCH 636/893] restic: simplify nodeCreateFileAt The code to write the file content is never used. --- internal/restic/node.go | 35 +++++------------------------------ internal/restic/node_test.go | 3 +-- internal/restorer/restorer.go | 6 +++--- 3 files changed, 9 insertions(+), 35 deletions(-) diff --git a/internal/restic/node.go b/internal/restic/node.go index c9c68cbee23..e23d39f1b1b 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -1,7 +1,6 @@ package restic import ( - "context" "encoding/json" "fmt" "os" @@ -188,7 +187,7 @@ func (node Node) GetExtendedAttribute(a string) []byte { } // NodeCreateAt creates the node at the given path but does NOT restore node meta data. -func NodeCreateAt(ctx context.Context, node *Node, path string, repo BlobLoader) error { +func NodeCreateAt(node *Node, path string) error { debug.Log("create node %v at %v", node.Name, path) switch node.Type { @@ -197,7 +196,7 @@ func NodeCreateAt(ctx context.Context, node *Node, path string, repo BlobLoader) return err } case "file": - if err := nodeCreateFileAt(ctx, node, path, repo); err != nil { + if err := nodeCreateFileAt(path); err != nil { return err } case "symlink": @@ -310,38 +309,14 @@ func nodeCreateDirAt(node *Node, path string) error { return nil } -func nodeCreateFileAt(ctx context.Context, node *Node, path string, repo BlobLoader) error { +func nodeCreateFileAt(path string) error { f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) if err != nil { return errors.WithStack(err) } - err = nodeWriteNodeContent(ctx, node, repo, f) - closeErr := f.Close() - - if err != nil { - return err - } - - if closeErr != nil { - return errors.WithStack(closeErr) - } - - return nil -} - -func nodeWriteNodeContent(ctx context.Context, node *Node, repo BlobLoader, f *os.File) error { - var buf []byte - for _, id := range node.Content { - buf, err := repo.LoadBlob(ctx, DataBlob, id, buf) - if err != nil { - return err - } - - _, err = f.Write(buf) - if err != nil { - return errors.WithStack(err) - } + if err := f.Close(); err != nil { + return errors.WithStack(err) } return nil diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index 7258b5429f1..075dd5cc528 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -1,7 +1,6 @@ package restic import ( - "context" "encoding/json" "fmt" "os" @@ -245,7 +244,7 @@ func TestNodeRestoreAt(t *testing.T) { } else { nodePath = filepath.Join(tempdir, test.Name) } - rtest.OK(t, NodeCreateAt(context.TODO(), &test, nodePath, nil)) + rtest.OK(t, NodeCreateAt(&test, nodePath)) rtest.OK(t, NodeRestoreMetadata(&test, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) })) fi, err := os.Lstat(nodePath) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 31157c97933..83644c7ac68 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -265,14 +265,14 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str return filenames, hasRestored, nil } -func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, target, location string) error { +func (res *Restorer) restoreNodeTo(node *restic.Node, target, location string) error { if !res.opts.DryRun { debug.Log("restoreNode %v %v %v", node.Name, target, location) if err := fs.Remove(target); err != nil && !errors.Is(err, os.ErrNotExist) { return errors.Wrap(err, "RemoveNode") } - err := restic.NodeCreateAt(ctx, node, target, res.repo) + err := restic.NodeCreateAt(node, target) if err != nil { debug.Log("node.CreateAt(%s) error %v", target, err) return err @@ -435,7 +435,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error) debug.Log("second pass, visitNode: restore node %q", location) if node.Type != "file" { _, err := res.withOverwriteCheck(ctx, node, target, location, false, nil, func(_ bool, _ *fileState) error { - return res.restoreNodeTo(ctx, node, target, location) + return res.restoreNodeTo(node, target, location) }) return err } From b9b32e5647a26ceb3a3ebe8a927ba35be6153323 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 26 Aug 2024 23:03:25 +0200 Subject: [PATCH 637/893] restic: extract Node filesystem code to fs package --- internal/archiver/archiver.go | 2 +- internal/archiver/archiver_test.go | 4 +- internal/archiver/archiver_unix_test.go | 2 +- internal/archiver/file_saver_test.go | 2 +- internal/{restic => fs}/mknod_unix.go | 2 +- internal/fs/node.go | 334 ++++++++++++++++++ internal/{restic => fs}/node_aix.go | 14 +- internal/{restic => fs}/node_darwin.go | 2 +- internal/{restic => fs}/node_freebsd.go | 2 +- internal/{restic => fs}/node_linux.go | 5 +- internal/{restic => fs}/node_netbsd.go | 14 +- internal/{restic => fs}/node_openbsd.go | 14 +- internal/{restic => fs}/node_solaris.go | 2 +- internal/fs/node_test.go | 324 +++++++++++++++++ internal/{restic => fs}/node_unix.go | 2 +- internal/{restic => fs}/node_unix_test.go | 7 +- internal/{restic => fs}/node_windows.go | 66 ++-- internal/{restic => fs}/node_windows_test.go | 56 +-- internal/{restic => fs}/node_xattr.go | 17 +- .../{restic => fs}/node_xattr_all_test.go | 13 +- internal/{restic => fs}/node_xattr_test.go | 2 +- internal/restic/node.go | 323 ----------------- internal/restic/node_test.go | 314 ---------------- internal/restic/tree_test.go | 5 +- internal/restorer/restorer.go | 4 +- internal/restorer/restorer_windows_test.go | 3 +- 26 files changed, 783 insertions(+), 752 deletions(-) rename internal/{restic => fs}/mknod_unix.go (93%) create mode 100644 internal/fs/node.go rename internal/{restic => fs}/node_aix.go (68%) rename internal/{restic => fs}/node_darwin.go (95%) rename internal/{restic => fs}/node_freebsd.go (96%) rename internal/{restic => fs}/node_linux.go (88%) rename internal/{restic => fs}/node_netbsd.go (58%) rename internal/{restic => fs}/node_openbsd.go (57%) rename internal/{restic => fs}/node_solaris.go (95%) create mode 100644 internal/fs/node_test.go rename internal/{restic => fs}/node_unix.go (97%) rename internal/{restic => fs}/node_unix_test.go (94%) rename internal/{restic => fs}/node_windows.go (88%) rename internal/{restic => fs}/node_windows_test.go (90%) rename internal/{restic => fs}/node_xattr.go (80%) rename internal/{restic => fs}/node_xattr_all_test.go (77%) rename internal/{restic => fs}/node_xattr_test.go (98%) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index e7c346d3a43..c576d047c81 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -248,7 +248,7 @@ func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s I // nodeFromFileInfo returns the restic node from an os.FileInfo. func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - node, err := restic.NodeFromFileInfo(filename, fi, ignoreXattrListError) + node, err := fs.NodeFromFileInfo(filename, fi, ignoreXattrListError) if !arch.WithAtime { node.AccessTime = node.ModTime } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index c54f9ea3311..18151eb024f 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -557,7 +557,7 @@ func rename(t testing.TB, oldname, newname string) { } func nodeFromFI(t testing.TB, filename string, fi os.FileInfo) *restic.Node { - node, err := restic.NodeFromFileInfo(filename, fi, false) + node, err := fs.NodeFromFileInfo(filename, fi, false) if err != nil { t.Fatal(err) } @@ -2291,7 +2291,7 @@ func TestMetadataChanged(t *testing.T) { // get metadata fi := lstat(t, "testfile") - want, err := restic.NodeFromFileInfo("testfile", fi, false) + want, err := fs.NodeFromFileInfo("testfile", fi, false) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 4a380dff8d2..d91d993dd52 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -48,7 +48,7 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { func statAndSnapshot(t *testing.T, repo archiverRepo, name string) (*restic.Node, *restic.Node) { fi := lstat(t, name) - want, err := restic.NodeFromFileInfo(name, fi, false) + want, err := fs.NodeFromFileInfo(name, fi, false) rtest.OK(t, err) _, node := snapshot(t, repo, fs.Local{}, nil, name) diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go index 409bdedd037..4a43275725a 100644 --- a/internal/archiver/file_saver_test.go +++ b/internal/archiver/file_saver_test.go @@ -50,7 +50,7 @@ func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Cont s := NewFileSaver(ctx, wg, saveBlob, pol, workers, workers) s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - return restic.NodeFromFileInfo(filename, fi, ignoreXattrListError) + return fs.NodeFromFileInfo(filename, fi, ignoreXattrListError) } return s, ctx, wg diff --git a/internal/restic/mknod_unix.go b/internal/fs/mknod_unix.go similarity index 93% rename from internal/restic/mknod_unix.go rename to internal/fs/mknod_unix.go index 7dd6c60d0b9..6127599f72e 100644 --- a/internal/restic/mknod_unix.go +++ b/internal/fs/mknod_unix.go @@ -1,7 +1,7 @@ //go:build !freebsd && !windows // +build !freebsd,!windows -package restic +package fs import "golang.org/x/sys/unix" diff --git a/internal/fs/node.go b/internal/fs/node.go new file mode 100644 index 00000000000..9bd507ba564 --- /dev/null +++ b/internal/fs/node.go @@ -0,0 +1,334 @@ +package fs + +import ( + "os" + "os/user" + "strconv" + "sync" + "syscall" + "time" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +// NodeFromFileInfo returns a new node from the given path and FileInfo. It +// returns the first error that is encountered, together with a node. +func NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { + mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky + node := &restic.Node{ + Path: path, + Name: fi.Name(), + Mode: fi.Mode() & mask, + ModTime: fi.ModTime(), + } + + node.Type = nodeTypeFromFileInfo(fi) + if node.Type == "file" { + node.Size = uint64(fi.Size()) + } + + err := nodeFillExtra(node, path, fi, ignoreXattrListError) + return node, err +} + +func nodeTypeFromFileInfo(fi os.FileInfo) string { + switch fi.Mode() & os.ModeType { + case 0: + return "file" + case os.ModeDir: + return "dir" + case os.ModeSymlink: + return "symlink" + case os.ModeDevice | os.ModeCharDevice: + return "chardev" + case os.ModeDevice: + return "dev" + case os.ModeNamedPipe: + return "fifo" + case os.ModeSocket: + return "socket" + case os.ModeIrregular: + return "irregular" + } + + return "" +} + +func nodeFillExtra(node *restic.Node, path string, fi os.FileInfo, ignoreXattrListError bool) error { + stat, ok := toStatT(fi.Sys()) + if !ok { + // fill minimal info with current values for uid, gid + node.UID = uint32(os.Getuid()) + node.GID = uint32(os.Getgid()) + node.ChangeTime = node.ModTime + return nil + } + + node.Inode = uint64(stat.ino()) + node.DeviceID = uint64(stat.dev()) + + nodeFillTimes(node, stat) + + nodeFillUser(node, stat) + + switch node.Type { + case "file": + node.Size = uint64(stat.size()) + node.Links = uint64(stat.nlink()) + case "dir": + case "symlink": + var err error + node.LinkTarget, err = Readlink(path) + node.Links = uint64(stat.nlink()) + if err != nil { + return errors.WithStack(err) + } + case "dev": + node.Device = uint64(stat.rdev()) + node.Links = uint64(stat.nlink()) + case "chardev": + node.Device = uint64(stat.rdev()) + node.Links = uint64(stat.nlink()) + case "fifo": + case "socket": + default: + return errors.Errorf("unsupported file type %q", node.Type) + } + + allowExtended, err := nodeFillGenericAttributes(node, path, fi, stat) + if allowExtended { + // Skip processing ExtendedAttributes if allowExtended is false. + err = errors.CombineErrors(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) + } + return err +} + +func nodeFillTimes(node *restic.Node, stat *statT) { + ctim := stat.ctim() + atim := stat.atim() + node.ChangeTime = time.Unix(ctim.Unix()) + node.AccessTime = time.Unix(atim.Unix()) +} + +func nodeFillUser(node *restic.Node, stat *statT) { + uid, gid := stat.uid(), stat.gid() + node.UID, node.GID = uid, gid + node.User = lookupUsername(uid) + node.Group = lookupGroup(gid) +} + +var ( + uidLookupCache = make(map[uint32]string) + uidLookupCacheMutex = sync.RWMutex{} +) + +// Cached user name lookup by uid. Returns "" when no name can be found. +func lookupUsername(uid uint32) string { + uidLookupCacheMutex.RLock() + username, ok := uidLookupCache[uid] + uidLookupCacheMutex.RUnlock() + + if ok { + return username + } + + u, err := user.LookupId(strconv.Itoa(int(uid))) + if err == nil { + username = u.Username + } + + uidLookupCacheMutex.Lock() + uidLookupCache[uid] = username + uidLookupCacheMutex.Unlock() + + return username +} + +var ( + gidLookupCache = make(map[uint32]string) + gidLookupCacheMutex = sync.RWMutex{} +) + +// Cached group name lookup by gid. Returns "" when no name can be found. +func lookupGroup(gid uint32) string { + gidLookupCacheMutex.RLock() + group, ok := gidLookupCache[gid] + gidLookupCacheMutex.RUnlock() + + if ok { + return group + } + + g, err := user.LookupGroupId(strconv.Itoa(int(gid))) + if err == nil { + group = g.Name + } + + gidLookupCacheMutex.Lock() + gidLookupCache[gid] = group + gidLookupCacheMutex.Unlock() + + return group +} + +// NodeCreateAt creates the node at the given path but does NOT restore node meta data. +func NodeCreateAt(node *restic.Node, path string) error { + debug.Log("create node %v at %v", node.Name, path) + + switch node.Type { + case "dir": + if err := nodeCreateDirAt(node, path); err != nil { + return err + } + case "file": + if err := nodeCreateFileAt(path); err != nil { + return err + } + case "symlink": + if err := nodeCreateSymlinkAt(node, path); err != nil { + return err + } + case "dev": + if err := nodeCreateDevAt(node, path); err != nil { + return err + } + case "chardev": + if err := nodeCreateCharDevAt(node, path); err != nil { + return err + } + case "fifo": + if err := nodeCreateFifoAt(path); err != nil { + return err + } + case "socket": + return nil + default: + return errors.Errorf("filetype %q not implemented", node.Type) + } + + return nil +} + +func nodeCreateDirAt(node *restic.Node, path string) error { + err := Mkdir(path, node.Mode) + if err != nil && !os.IsExist(err) { + return errors.WithStack(err) + } + + return nil +} + +func nodeCreateFileAt(path string) error { + f, err := OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) + if err != nil { + return errors.WithStack(err) + } + + if err := f.Close(); err != nil { + return errors.WithStack(err) + } + + return nil +} + +func nodeCreateSymlinkAt(node *restic.Node, path string) error { + if err := Symlink(node.LinkTarget, path); err != nil { + return errors.WithStack(err) + } + + return nil +} + +func nodeCreateDevAt(node *restic.Node, path string) error { + return mknod(path, syscall.S_IFBLK|0600, node.Device) +} + +func nodeCreateCharDevAt(node *restic.Node, path string) error { + return mknod(path, syscall.S_IFCHR|0600, node.Device) +} + +func nodeCreateFifoAt(path string) error { + return mkfifo(path, 0600) +} + +func mkfifo(path string, mode uint32) (err error) { + return mknod(path, mode|syscall.S_IFIFO, 0) +} + +// NodeRestoreMetadata restores node metadata +func NodeRestoreMetadata(node *restic.Node, path string, warn func(msg string)) error { + err := nodeRestoreMetadata(node, path, warn) + if err != nil { + // It is common to have permission errors for folders like /home + // unless you're running as root, so ignore those. + if os.Geteuid() > 0 && errors.Is(err, os.ErrPermission) { + debug.Log("not running as root, ignoring permission error for %v: %v", + path, err) + return nil + } + debug.Log("restoreMetadata(%s) error %v", path, err) + } + + return err +} + +func nodeRestoreMetadata(node *restic.Node, path string, warn func(msg string)) error { + var firsterr error + + if err := lchown(path, int(node.UID), int(node.GID)); err != nil { + firsterr = errors.WithStack(err) + } + + if err := nodeRestoreExtendedAttributes(node, path); err != nil { + debug.Log("error restoring extended attributes for %v: %v", path, err) + if firsterr == nil { + firsterr = err + } + } + + if err := nodeRestoreGenericAttributes(node, path, warn); err != nil { + debug.Log("error restoring generic attributes for %v: %v", path, err) + if firsterr == nil { + firsterr = err + } + } + + if err := NodeRestoreTimestamps(node, path); err != nil { + debug.Log("error restoring timestamps for %v: %v", path, err) + if firsterr == nil { + firsterr = err + } + } + + // Moving RestoreTimestamps and restoreExtendedAttributes calls above as for readonly files in windows + // calling Chmod below will no longer allow any modifications to be made on the file and the + // calls above would fail. + if node.Type != "symlink" { + if err := Chmod(path, node.Mode); err != nil { + if firsterr == nil { + firsterr = errors.WithStack(err) + } + } + } + + return firsterr +} + +func NodeRestoreTimestamps(node *restic.Node, path string) error { + var utimes = [...]syscall.Timespec{ + syscall.NsecToTimespec(node.AccessTime.UnixNano()), + syscall.NsecToTimespec(node.ModTime.UnixNano()), + } + + if node.Type == "symlink" { + return nodeRestoreSymlinkTimestamps(path, utimes) + } + + if err := syscall.UtimesNano(path, utimes[:]); err != nil { + return errors.Wrap(err, "UtimesNano") + } + + return nil +} diff --git a/internal/restic/node_aix.go b/internal/fs/node_aix.go similarity index 68% rename from internal/restic/node_aix.go rename to internal/fs/node_aix.go index 4cd27997308..4e694442511 100644 --- a/internal/restic/node_aix.go +++ b/internal/fs/node_aix.go @@ -1,11 +1,13 @@ //go:build aix // +build aix -package restic +package fs import ( "os" "syscall" + + "github.com/restic/restic/internal/restic" ) func nodeRestoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { @@ -24,12 +26,12 @@ func (s statT) mtim() syscall.Timespec { return toTimespec(s.Mtim) } func (s statT) ctim() syscall.Timespec { return toTimespec(s.Ctim) } // nodeRestoreExtendedAttributes is a no-op on AIX. -func nodeRestoreExtendedAttributes(_ *Node, _ string) error { +func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { return nil } // nodeFillExtendedAttributes is a no-op on AIX. -func nodeFillExtendedAttributes(_ *Node, _ string, _ bool) error { +func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { return nil } @@ -39,11 +41,11 @@ func IsListxattrPermissionError(_ error) bool { } // nodeRestoreGenericAttributes is no-op on AIX. -func nodeRestoreGenericAttributes(node *Node, _ string, warn func(msg string)) error { - return HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) +func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { + return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) } // nodeFillGenericAttributes is a no-op on AIX. -func nodeFillGenericAttributes(_ *Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { return true, nil } diff --git a/internal/restic/node_darwin.go b/internal/fs/node_darwin.go similarity index 95% rename from internal/restic/node_darwin.go rename to internal/fs/node_darwin.go index 099007e0756..1ca7ce4806c 100644 --- a/internal/restic/node_darwin.go +++ b/internal/fs/node_darwin.go @@ -1,4 +1,4 @@ -package restic +package fs import "syscall" diff --git a/internal/restic/node_freebsd.go b/internal/fs/node_freebsd.go similarity index 96% rename from internal/restic/node_freebsd.go rename to internal/fs/node_freebsd.go index 6d2dd1d98af..8796358b022 100644 --- a/internal/restic/node_freebsd.go +++ b/internal/fs/node_freebsd.go @@ -1,7 +1,7 @@ //go:build freebsd // +build freebsd -package restic +package fs import "syscall" diff --git a/internal/restic/node_linux.go b/internal/fs/node_linux.go similarity index 88% rename from internal/restic/node_linux.go rename to internal/fs/node_linux.go index 6311a224b12..1cb4ee1ae15 100644 --- a/internal/restic/node_linux.go +++ b/internal/fs/node_linux.go @@ -1,4 +1,4 @@ -package restic +package fs import ( "path/filepath" @@ -7,11 +7,10 @@ import ( "golang.org/x/sys/unix" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/fs" ) func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - dir, err := fs.Open(filepath.Dir(path)) + dir, err := Open(filepath.Dir(path)) if err != nil { return errors.WithStack(err) } diff --git a/internal/restic/node_netbsd.go b/internal/fs/node_netbsd.go similarity index 58% rename from internal/restic/node_netbsd.go rename to internal/fs/node_netbsd.go index a53412afbaa..c71e4bdf56f 100644 --- a/internal/restic/node_netbsd.go +++ b/internal/fs/node_netbsd.go @@ -1,8 +1,10 @@ -package restic +package fs import ( "os" "syscall" + + "github.com/restic/restic/internal/restic" ) func nodeRestoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { @@ -14,12 +16,12 @@ func (s statT) mtim() syscall.Timespec { return s.Mtimespec } func (s statT) ctim() syscall.Timespec { return s.Ctimespec } // nodeRestoreExtendedAttributes is a no-op on netbsd. -func nodeRestoreExtendedAttributes(_ *Node, _ string) error { +func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { return nil } // nodeFillExtendedAttributes is a no-op on netbsd. -func nodeFillExtendedAttributes(_ *Node, _ string, _ bool) error { +func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { return nil } @@ -29,11 +31,11 @@ func IsListxattrPermissionError(_ error) bool { } // nodeRestoreGenericAttributes is no-op on netbsd. -func nodeRestoreGenericAttributes(node *Node, _ string, warn func(msg string)) error { - return HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) +func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { + return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) } // nodeFillGenericAttributes is a no-op on netbsd. -func nodeFillGenericAttributes(_ *Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { return true, nil } diff --git a/internal/restic/node_openbsd.go b/internal/fs/node_openbsd.go similarity index 57% rename from internal/restic/node_openbsd.go rename to internal/fs/node_openbsd.go index bbba89f2ca6..f74f2ae009f 100644 --- a/internal/restic/node_openbsd.go +++ b/internal/fs/node_openbsd.go @@ -1,8 +1,10 @@ -package restic +package fs import ( "os" "syscall" + + "github.com/restic/restic/internal/restic" ) func nodeRestoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { @@ -14,12 +16,12 @@ func (s statT) mtim() syscall.Timespec { return s.Mtim } func (s statT) ctim() syscall.Timespec { return s.Ctim } // nodeRestoreExtendedAttributes is a no-op on openbsd. -func nodeRestoreExtendedAttributes(_ *Node, _ string) error { +func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { return nil } // nodeFillExtendedAttributes is a no-op on openbsd. -func nodeFillExtendedAttributes(_ *Node, _ string, _ bool) error { +func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { return nil } @@ -29,11 +31,11 @@ func IsListxattrPermissionError(_ error) bool { } // nodeRestoreGenericAttributes is no-op on openbsd. -func nodeRestoreGenericAttributes(node *Node, _ string, warn func(msg string)) error { - return HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) +func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { + return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) } // fillGenericAttributes is a no-op on openbsd. -func nodeFillGenericAttributes(_ *Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { return true, nil } diff --git a/internal/restic/node_solaris.go b/internal/fs/node_solaris.go similarity index 95% rename from internal/restic/node_solaris.go rename to internal/fs/node_solaris.go index 114d117666d..3f025b334be 100644 --- a/internal/restic/node_solaris.go +++ b/internal/fs/node_solaris.go @@ -1,4 +1,4 @@ -package restic +package fs import "syscall" diff --git a/internal/fs/node_test.go b/internal/fs/node_test.go new file mode 100644 index 00000000000..e7f608352c2 --- /dev/null +++ b/internal/fs/node_test.go @@ -0,0 +1,324 @@ +package fs + +import ( + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" +) + +func BenchmarkNodeFillUser(t *testing.B) { + tempfile, err := os.CreateTemp("", "restic-test-temp-") + if err != nil { + t.Fatal(err) + } + + fi, err := tempfile.Stat() + if err != nil { + t.Fatal(err) + } + + path := tempfile.Name() + + t.ResetTimer() + + for i := 0; i < t.N; i++ { + _, err := NodeFromFileInfo(path, fi, false) + rtest.OK(t, err) + } + + rtest.OK(t, tempfile.Close()) + rtest.RemoveAll(t, tempfile.Name()) +} + +func BenchmarkNodeFromFileInfo(t *testing.B) { + tempfile, err := os.CreateTemp("", "restic-test-temp-") + if err != nil { + t.Fatal(err) + } + + fi, err := tempfile.Stat() + if err != nil { + t.Fatal(err) + } + + path := tempfile.Name() + + t.ResetTimer() + + for i := 0; i < t.N; i++ { + _, err := NodeFromFileInfo(path, fi, false) + if err != nil { + t.Fatal(err) + } + } + + rtest.OK(t, tempfile.Close()) + rtest.RemoveAll(t, tempfile.Name()) +} + +func parseTime(s string) time.Time { + t, err := time.Parse("2006-01-02 15:04:05.999", s) + if err != nil { + panic(err) + } + + return t.Local() +} + +var nodeTests = []restic.Node{ + { + Name: "testFile", + Type: "file", + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0604, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testSuidFile", + Type: "file", + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0755 | os.ModeSetuid, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testSuidFile2", + Type: "file", + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0755 | os.ModeSetgid, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testSticky", + Type: "file", + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0755 | os.ModeSticky, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testDir", + Type: "dir", + Subtree: nil, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0750 | os.ModeDir, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testSymlink", + Type: "symlink", + LinkTarget: "invalid", + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0777 | os.ModeSymlink, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + + // include "testFile" and "testDir" again with slightly different + // metadata, so we can test if CreateAt works with pre-existing files. + { + Name: "testFile", + Type: "file", + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0604, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + }, + { + Name: "testDir", + Type: "dir", + Subtree: nil, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0750 | os.ModeDir, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + }, + { + Name: "testXattrFile", + Type: "file", + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0604, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + ExtendedAttributes: []restic.ExtendedAttribute{ + {Name: "user.foo", Value: []byte("bar")}, + }, + }, + { + Name: "testXattrDir", + Type: "dir", + Subtree: nil, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0750 | os.ModeDir, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + ExtendedAttributes: []restic.ExtendedAttribute{ + {Name: "user.foo", Value: []byte("bar")}, + }, + }, + { + Name: "testXattrFileMacOSResourceFork", + Type: "file", + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0604, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + ExtendedAttributes: []restic.ExtendedAttribute{ + {Name: "com.apple.ResourceFork", Value: []byte("bar")}, + }, + }, +} + +func TestNodeRestoreAt(t *testing.T) { + tempdir := t.TempDir() + + for _, test := range nodeTests { + t.Run("", func(t *testing.T) { + var nodePath string + if test.ExtendedAttributes != nil { + if runtime.GOOS == "windows" { + // In windows extended attributes are case insensitive and windows returns + // the extended attributes in UPPER case. + // Update the tests to use UPPER case xattr names for windows. + extAttrArr := test.ExtendedAttributes + // Iterate through the array using pointers + for i := 0; i < len(extAttrArr); i++ { + extAttrArr[i].Name = strings.ToUpper(extAttrArr[i].Name) + } + } + for _, attr := range test.ExtendedAttributes { + if strings.HasPrefix(attr.Name, "com.apple.") && runtime.GOOS != "darwin" { + t.Skipf("attr %v only relevant on macOS", attr.Name) + } + } + + // tempdir might be backed by a filesystem that does not support + // extended attributes + nodePath = test.Name + defer func() { + _ = os.Remove(nodePath) + }() + } else { + nodePath = filepath.Join(tempdir, test.Name) + } + rtest.OK(t, NodeCreateAt(&test, nodePath)) + rtest.OK(t, NodeRestoreMetadata(&test, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) })) + + fi, err := os.Lstat(nodePath) + rtest.OK(t, err) + + n2, err := NodeFromFileInfo(nodePath, fi, false) + rtest.OK(t, err) + n3, err := NodeFromFileInfo(nodePath, fi, true) + rtest.OK(t, err) + rtest.Assert(t, n2.Equals(*n3), "unexpected node info mismatch %v", cmp.Diff(n2, n3)) + + rtest.Assert(t, test.Name == n2.Name, + "%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name) + rtest.Assert(t, test.Type == n2.Type, + "%v: type doesn't match (%v != %v)", test.Type, test.Type, n2.Type) + rtest.Assert(t, test.Size == n2.Size, + "%v: size doesn't match (%v != %v)", test.Size, test.Size, n2.Size) + + if runtime.GOOS != "windows" { + rtest.Assert(t, test.UID == n2.UID, + "%v: UID doesn't match (%v != %v)", test.Type, test.UID, n2.UID) + rtest.Assert(t, test.GID == n2.GID, + "%v: GID doesn't match (%v != %v)", test.Type, test.GID, n2.GID) + if test.Type != "symlink" { + // On OpenBSD only root can set sticky bit (see sticky(8)). + if runtime.GOOS != "openbsd" && runtime.GOOS != "netbsd" && runtime.GOOS != "solaris" && test.Name == "testSticky" { + rtest.Assert(t, test.Mode == n2.Mode, + "%v: mode doesn't match (0%o != 0%o)", test.Type, test.Mode, n2.Mode) + } + } + } + + AssertFsTimeEqual(t, "AccessTime", test.Type, test.AccessTime, n2.AccessTime) + AssertFsTimeEqual(t, "ModTime", test.Type, test.ModTime, n2.ModTime) + if len(n2.ExtendedAttributes) == 0 { + n2.ExtendedAttributes = nil + } + rtest.Assert(t, reflect.DeepEqual(test.ExtendedAttributes, n2.ExtendedAttributes), + "%v: xattrs don't match (%v != %v)", test.Name, test.ExtendedAttributes, n2.ExtendedAttributes) + }) + } +} + +func AssertFsTimeEqual(t *testing.T, label string, nodeType string, t1 time.Time, t2 time.Time) { + var equal bool + + // Go currently doesn't support setting timestamps of symbolic links on darwin and bsd + if nodeType == "symlink" { + switch runtime.GOOS { + case "darwin", "freebsd", "openbsd", "netbsd", "solaris": + return + } + } + + switch runtime.GOOS { + case "darwin": + // HFS+ timestamps don't support sub-second precision, + // see https://en.wikipedia.org/wiki/Comparison_of_file_systems + diff := int(t1.Sub(t2).Seconds()) + equal = diff == 0 + default: + equal = t1.Equal(t2) + } + + rtest.Assert(t, equal, "%s: %s doesn't match (%v != %v)", label, nodeType, t1, t2) +} + +func TestNodeRestoreMetadataError(t *testing.T) { + tempdir := t.TempDir() + + node := &nodeTests[0] + nodePath := filepath.Join(tempdir, node.Name) + + // This will fail because the target file does not exist + err := NodeRestoreMetadata(node, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) }) + test.Assert(t, errors.Is(err, os.ErrNotExist), "failed for an unexpected reason") +} diff --git a/internal/restic/node_unix.go b/internal/fs/node_unix.go similarity index 97% rename from internal/restic/node_unix.go rename to internal/fs/node_unix.go index 976cd7b0366..fb247ac99f4 100644 --- a/internal/restic/node_unix.go +++ b/internal/fs/node_unix.go @@ -1,7 +1,7 @@ //go:build !windows // +build !windows -package restic +package fs import ( "os" diff --git a/internal/restic/node_unix_test.go b/internal/fs/node_unix_test.go similarity index 94% rename from internal/restic/node_unix_test.go rename to internal/fs/node_unix_test.go index 9ea7b172595..b505357f2fc 100644 --- a/internal/restic/node_unix_test.go +++ b/internal/fs/node_unix_test.go @@ -1,7 +1,7 @@ //go:build !windows // +build !windows -package restic +package fs import ( "os" @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -27,7 +28,7 @@ func stat(t testing.TB, filename string) (fi os.FileInfo, ok bool) { return fi, true } -func checkFile(t testing.TB, stat *syscall.Stat_t, node *Node) { +func checkFile(t testing.TB, stat *syscall.Stat_t, node *restic.Node) { t.Helper() if uint32(node.Mode.Perm()) != uint32(stat.Mode&0777) { t.Errorf("Mode does not match, want %v, got %v", stat.Mode&0777, node.Mode) @@ -80,7 +81,7 @@ func checkFile(t testing.TB, stat *syscall.Stat_t, node *Node) { } -func checkDevice(t testing.TB, stat *syscall.Stat_t, node *Node) { +func checkDevice(t testing.TB, stat *syscall.Stat_t, node *restic.Node) { if node.Device != uint64(stat.Rdev) { t.Errorf("Rdev does not match, want %v, got %v", stat.Rdev, node.Device) } diff --git a/internal/restic/node_windows.go b/internal/fs/node_windows.go similarity index 88% rename from internal/restic/node_windows.go rename to internal/fs/node_windows.go index 3f836ae613a..90fa3462c3a 100644 --- a/internal/restic/node_windows.go +++ b/internal/fs/node_windows.go @@ -1,4 +1,4 @@ -package restic +package fs import ( "encoding/json" @@ -14,7 +14,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" "golang.org/x/sys/windows" ) @@ -82,12 +82,12 @@ func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error } // restore extended attributes for windows -func nodeRestoreExtendedAttributes(node *Node, path string) (err error) { +func nodeRestoreExtendedAttributes(node *restic.Node, path string) (err error) { count := len(node.ExtendedAttributes) if count > 0 { - eas := make([]fs.ExtendedAttribute, count) + eas := make([]ExtendedAttribute, count) for i, attr := range node.ExtendedAttributes { - eas[i] = fs.ExtendedAttribute{Name: attr.Name, Value: attr.Value} + eas[i] = ExtendedAttribute{Name: attr.Name, Value: attr.Value} } if errExt := restoreExtendedAttributes(node.Type, path, eas); errExt != nil { return errExt @@ -97,9 +97,9 @@ func nodeRestoreExtendedAttributes(node *Node, path string) (err error) { } // fill extended attributes in the node. This also includes the Generic attributes for windows. -func nodeFillExtendedAttributes(node *Node, path string, _ bool) (err error) { +func nodeFillExtendedAttributes(node *restic.Node, path string, _ bool) (err error) { var fileHandle windows.Handle - if fileHandle, err = fs.OpenHandleForEA(node.Type, path, false); fileHandle == 0 { + if fileHandle, err = OpenHandleForEA(node.Type, path, false); fileHandle == 0 { return nil } if err != nil { @@ -107,8 +107,8 @@ func nodeFillExtendedAttributes(node *Node, path string, _ bool) (err error) { } defer closeFileHandle(fileHandle, path) // Replaced inline defer with named function call //Get the windows Extended Attributes using the file handle - var extAtts []fs.ExtendedAttribute - extAtts, err = fs.GetFileEA(fileHandle) + var extAtts []ExtendedAttribute + extAtts, err = GetFileEA(fileHandle) debug.Log("fillExtendedAttributes(%v) %v", path, extAtts) if err != nil { return errors.Errorf("get EA failed for path %v, with: %v", path, err) @@ -119,7 +119,7 @@ func nodeFillExtendedAttributes(node *Node, path string, _ bool) (err error) { //Fill the ExtendedAttributes in the node using the name/value pairs in the windows EA for _, attr := range extAtts { - extendedAttr := ExtendedAttribute{ + extendedAttr := restic.ExtendedAttribute{ Name: attr.Name, Value: attr.Value, } @@ -139,9 +139,9 @@ func closeFileHandle(fileHandle windows.Handle, path string) { // restoreExtendedAttributes handles restore of the Windows Extended Attributes to the specified path. // The Windows API requires setting of all the Extended Attributes in one call. -func restoreExtendedAttributes(nodeType, path string, eas []fs.ExtendedAttribute) (err error) { +func restoreExtendedAttributes(nodeType, path string, eas []ExtendedAttribute) (err error) { var fileHandle windows.Handle - if fileHandle, err = fs.OpenHandleForEA(nodeType, path, true); fileHandle == 0 { + if fileHandle, err = OpenHandleForEA(nodeType, path, true); fileHandle == 0 { return nil } if err != nil { @@ -150,7 +150,7 @@ func restoreExtendedAttributes(nodeType, path string, eas []fs.ExtendedAttribute defer closeFileHandle(fileHandle, path) // Replaced inline defer with named function call // clear old unexpected xattrs by setting them to an empty value - oldEAs, err := fs.GetFileEA(fileHandle) + oldEAs, err := GetFileEA(fileHandle) if err != nil { return err } @@ -165,11 +165,11 @@ func restoreExtendedAttributes(nodeType, path string, eas []fs.ExtendedAttribute } if !found { - eas = append(eas, fs.ExtendedAttribute{Name: oldEA.Name, Value: nil}) + eas = append(eas, ExtendedAttribute{Name: oldEA.Name, Value: nil}) } } - if err = fs.SetFileEA(fileHandle, eas); err != nil { + if err = SetFileEA(fileHandle, eas); err != nil { return errors.Errorf("set EA failed for path %v, with: %v", path, err) } return nil @@ -210,7 +210,7 @@ func (s statT) ctim() syscall.Timespec { } // restoreGenericAttributes restores generic attributes for Windows -func nodeRestoreGenericAttributes(node *Node, path string, warn func(msg string)) (err error) { +func nodeRestoreGenericAttributes(node *restic.Node, path string, warn func(msg string)) (err error) { if len(node.GenericAttributes) == 0 { return nil } @@ -230,19 +230,19 @@ func nodeRestoreGenericAttributes(node *Node, path string, warn func(msg string) } } if windowsAttributes.SecurityDescriptor != nil { - if err := fs.SetSecurityDescriptor(path, windowsAttributes.SecurityDescriptor); err != nil { + if err := SetSecurityDescriptor(path, windowsAttributes.SecurityDescriptor); err != nil { errs = append(errs, fmt.Errorf("error restoring security descriptor for: %s : %v", path, err)) } } - HandleUnknownGenericAttributesFound(unknownAttribs, warn) + restic.HandleUnknownGenericAttributesFound(unknownAttribs, warn) return errors.CombineErrors(errs...) } // genericAttributesToWindowsAttrs converts the generic attributes map to a WindowsAttributes and also returns a string of unknown attributes that it could not convert. -func genericAttributesToWindowsAttrs(attrs map[GenericAttributeType]json.RawMessage) (windowsAttributes WindowsAttributes, unknownAttribs []GenericAttributeType, err error) { +func genericAttributesToWindowsAttrs(attrs map[restic.GenericAttributeType]json.RawMessage) (windowsAttributes WindowsAttributes, unknownAttribs []restic.GenericAttributeType, err error) { waValue := reflect.ValueOf(&windowsAttributes).Elem() - unknownAttribs, err = GenericAttributesToOSAttrs(attrs, reflect.TypeOf(windowsAttributes), &waValue, "windows") + unknownAttribs, err = restic.GenericAttributesToOSAttrs(attrs, reflect.TypeOf(windowsAttributes), &waValue, "windows") return windowsAttributes, unknownAttribs, err } @@ -289,14 +289,14 @@ func fixEncryptionAttribute(path string, attrs *uint32, pathPointer *uint16) (er // File should be encrypted. err = encryptFile(pathPointer) if err != nil { - if fs.IsAccessDenied(err) || errors.Is(err, windows.ERROR_FILE_READ_ONLY) { + if IsAccessDenied(err) || errors.Is(err, windows.ERROR_FILE_READ_ONLY) { // If existing file already has readonly or system flag, encrypt file call fails. // The readonly and system flags will be set again at the end of this func if they are needed. - err = fs.ResetPermissions(path) + err = ResetPermissions(path) if err != nil { return fmt.Errorf("failed to encrypt file: failed to reset permissions: %s : %v", path, err) } - err = fs.ClearSystem(path) + err = ClearSystem(path) if err != nil { return fmt.Errorf("failed to encrypt file: failed to clear system flag: %s : %v", path, err) } @@ -317,14 +317,14 @@ func fixEncryptionAttribute(path string, attrs *uint32, pathPointer *uint16) (er // File should not be encrypted, but its already encrypted. Decrypt it. err = decryptFile(pathPointer) if err != nil { - if fs.IsAccessDenied(err) || errors.Is(err, windows.ERROR_FILE_READ_ONLY) { + if IsAccessDenied(err) || errors.Is(err, windows.ERROR_FILE_READ_ONLY) { // If existing file already has readonly or system flag, decrypt file call fails. // The readonly and system flags will be set again after this func if they are needed. - err = fs.ResetPermissions(path) + err = ResetPermissions(path) if err != nil { return fmt.Errorf("failed to encrypt file: failed to reset permissions: %s : %v", path, err) } - err = fs.ClearSystem(path) + err = ClearSystem(path) if err != nil { return fmt.Errorf("failed to decrypt file: failed to clear system flag: %s : %v", path, err) } @@ -365,7 +365,7 @@ func decryptFile(pathPointer *uint16) error { // Created time and Security Descriptors. // It also checks if the volume supports extended attributes and stores the result in a map // so that it does not have to be checked again for subsequent calls for paths in the same volume. -func nodeFillGenericAttributes(node *Node, path string, fi os.FileInfo, stat *statT) (allowExtended bool, err error) { +func nodeFillGenericAttributes(node *restic.Node, path string, fi os.FileInfo, stat *statT) (allowExtended bool, err error) { if strings.Contains(filepath.Base(path), ":") { // Do not process for Alternate Data Streams in Windows // Also do not allow processing of extended attributes for ADS. @@ -392,7 +392,7 @@ func nodeFillGenericAttributes(node *Node, path string, fi os.FileInfo, stat *st if err != nil { return false, err } - if sd, err = fs.GetSecurityDescriptor(path); err != nil { + if sd, err = GetSecurityDescriptor(path); err != nil { return allowExtended, err } } @@ -422,7 +422,7 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { return eaSupportedValue.(bool), nil } // If not found, check if EA is supported with manually prepared volume name - isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeName + `\`) + isEASupportedVolume, err = PathSupportsExtendedAttributes(volumeName + `\`) // If the prepared volume name is not valid, we will fetch the actual volume name next. if err != nil && !errors.Is(err, windows.DNS_ERROR_INVALID_NAME) { debug.Log("Error checking if extended attributes are supported for prepared volume name %s: %v", volumeName, err) @@ -432,7 +432,7 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { } } // If an entry is not found, get the actual volume name using the GetVolumePathName function - volumeNameActual, err := fs.GetVolumePathName(path) + volumeNameActual, err := GetVolumePathName(path) if err != nil { debug.Log("Error getting actual volume name %s for path %s: %v", volumeName, path, err) // There can be multiple errors like path does not exist, bad network path, etc. @@ -447,7 +447,7 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { return eaSupportedValue.(bool), nil } // If the actual volume name is different and is not in the map, again check if the new volume supports extended attributes with the actual volume name - isEASupportedVolume, err = fs.PathSupportsExtendedAttributes(volumeNameActual + `\`) + isEASupportedVolume, err = PathSupportsExtendedAttributes(volumeNameActual + `\`) // Debug log for cases where the prepared volume name is not valid if err != nil { debug.Log("Error checking if extended attributes are supported for actual volume name %s: %v", volumeNameActual, err) @@ -496,10 +496,10 @@ func prepareVolumeName(path string) (volumeName string, err error) { } // windowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection -func WindowsAttrsToGenericAttributes(windowsAttributes WindowsAttributes) (attrs map[GenericAttributeType]json.RawMessage, err error) { +func WindowsAttrsToGenericAttributes(windowsAttributes WindowsAttributes) (attrs map[restic.GenericAttributeType]json.RawMessage, err error) { // Get the value of the WindowsAttributes windowsAttributesValue := reflect.ValueOf(windowsAttributes) - return OSAttrsToGenericAttributes(reflect.TypeOf(windowsAttributes), &windowsAttributesValue, runtime.GOOS) + return restic.OSAttrsToGenericAttributes(reflect.TypeOf(windowsAttributes), &windowsAttributesValue, runtime.GOOS) } // getCreationTime gets the value for the WindowsAttribute CreationTime in a windows specific time format. diff --git a/internal/restic/node_windows_test.go b/internal/fs/node_windows_test.go similarity index 90% rename from internal/restic/node_windows_test.go rename to internal/fs/node_windows_test.go index e78c8cb9667..046c1984c81 100644 --- a/internal/restic/node_windows_test.go +++ b/internal/fs/node_windows_test.go @@ -1,7 +1,7 @@ //go:build windows // +build windows -package restic +package fs import ( "encoding/base64" @@ -15,7 +15,7 @@ import ( "time" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" "golang.org/x/sys/windows" ) @@ -23,10 +23,10 @@ import ( func TestRestoreSecurityDescriptors(t *testing.T) { t.Parallel() tempDir := t.TempDir() - for i, sd := range fs.TestFileSDs { + for i, sd := range TestFileSDs { testRestoreSecurityDescriptor(t, sd, tempDir, "file", fmt.Sprintf("testfile%d", i)) } - for i, sd := range fs.TestDirSDs { + for i, sd := range TestDirSDs { testRestoreSecurityDescriptor(t, sd, tempDir, "dir", fmt.Sprintf("testdir%d", i)) } } @@ -47,17 +47,17 @@ func testRestoreSecurityDescriptor(t *testing.T, sd string, tempDir, fileType, f sdByteFromRestoredNode := getWindowsAttr(t, testPath, node).SecurityDescriptor // Get the security descriptor for the test path after the restore. - sdBytesFromRestoredPath, err := fs.GetSecurityDescriptor(testPath) + sdBytesFromRestoredPath, err := GetSecurityDescriptor(testPath) test.OK(t, errors.Wrapf(err, "Error while getting the security descriptor for: %s", testPath)) // Compare the input SD and the SD got from the restored file. - fs.CompareSecurityDescriptors(t, testPath, sdInputBytes, *sdBytesFromRestoredPath) + CompareSecurityDescriptors(t, testPath, sdInputBytes, *sdBytesFromRestoredPath) // Compare the SD got from node constructed from the restored file info and the SD got directly from the restored file. - fs.CompareSecurityDescriptors(t, testPath, *sdByteFromRestoredNode, *sdBytesFromRestoredPath) + CompareSecurityDescriptors(t, testPath, *sdByteFromRestoredNode, *sdBytesFromRestoredPath) } -func getNode(name string, fileType string, genericAttributes map[GenericAttributeType]json.RawMessage) Node { - return Node{ +func getNode(name string, fileType string, genericAttributes map[restic.GenericAttributeType]json.RawMessage) restic.Node { + return restic.Node{ Name: name, Type: fileType, Mode: 0644, @@ -68,7 +68,7 @@ func getNode(name string, fileType string, genericAttributes map[GenericAttribut } } -func getWindowsAttr(t *testing.T, testPath string, node *Node) WindowsAttributes { +func getWindowsAttr(t *testing.T, testPath string, node *restic.Node) WindowsAttributes { windowsAttributes, unknownAttribs, err := genericAttributesToWindowsAttrs(node.GenericAttributes) test.OK(t, errors.Wrapf(err, "Error getting windows attr from generic attr: %s", testPath)) test.Assert(t, len(unknownAttribs) == 0, "Unknown attribs found: %s for: %s", unknownAttribs, testPath) @@ -83,12 +83,12 @@ func TestRestoreCreationTime(t *testing.T) { creationTimeAttribute := getCreationTime(fi, path) test.OK(t, errors.Wrapf(err, "Could not get creation time for path: %s", path)) //Using the temp dir creation time as the test creation time for the test file and folder - runGenericAttributesTest(t, path, TypeCreationTime, WindowsAttributes{CreationTime: creationTimeAttribute}, false) + runGenericAttributesTest(t, path, restic.TypeCreationTime, WindowsAttributes{CreationTime: creationTimeAttribute}, false) } func TestRestoreFileAttributes(t *testing.T) { t.Parallel() - genericAttributeName := TypeFileAttributes + genericAttributeName := restic.TypeFileAttributes tempDir := t.TempDir() normal := uint32(syscall.FILE_ATTRIBUTE_NORMAL) hidden := uint32(syscall.FILE_ATTRIBUTE_HIDDEN) @@ -110,7 +110,7 @@ func TestRestoreFileAttributes(t *testing.T) { for i, fileAttr := range fileAttributes { genericAttrs, err := WindowsAttrsToGenericAttributes(fileAttr) test.OK(t, err) - expectedNodes := []Node{ + expectedNodes := []restic.Node{ { Name: fmt.Sprintf("testfile%d", i), Type: "file", @@ -143,7 +143,7 @@ func TestRestoreFileAttributes(t *testing.T) { for i, folderAttr := range folderAttributes { genericAttrs, err := WindowsAttrsToGenericAttributes(folderAttr) test.OK(t, err) - expectedNodes := []Node{ + expectedNodes := []restic.Node{ { Name: fmt.Sprintf("testdirectory%d", i), Type: "dir", @@ -158,10 +158,10 @@ func TestRestoreFileAttributes(t *testing.T) { } } -func runGenericAttributesTest(t *testing.T, tempDir string, genericAttributeName GenericAttributeType, genericAttributeExpected WindowsAttributes, warningExpected bool) { +func runGenericAttributesTest(t *testing.T, tempDir string, genericAttributeName restic.GenericAttributeType, genericAttributeExpected WindowsAttributes, warningExpected bool) { genericAttributes, err := WindowsAttrsToGenericAttributes(genericAttributeExpected) test.OK(t, err) - expectedNodes := []Node{ + expectedNodes := []restic.Node{ { Name: "testfile", Type: "file", @@ -183,7 +183,7 @@ func runGenericAttributesTest(t *testing.T, tempDir string, genericAttributeName } runGenericAttributesTestForNodes(t, expectedNodes, tempDir, genericAttributeName, genericAttributeExpected, warningExpected) } -func runGenericAttributesTestForNodes(t *testing.T, expectedNodes []Node, tempDir string, genericAttr GenericAttributeType, genericAttributeExpected WindowsAttributes, warningExpected bool) { +func runGenericAttributesTestForNodes(t *testing.T, expectedNodes []restic.Node, tempDir string, genericAttr restic.GenericAttributeType, genericAttributeExpected WindowsAttributes, warningExpected bool) { for _, testNode := range expectedNodes { testPath, node := restoreAndGetNode(t, tempDir, &testNode, warningExpected) @@ -195,7 +195,7 @@ func runGenericAttributesTestForNodes(t *testing.T, expectedNodes []Node, tempDi } } -func restoreAndGetNode(t *testing.T, tempDir string, testNode *Node, warningExpected bool) (string, *Node) { +func restoreAndGetNode(t *testing.T, tempDir string, testNode *restic.Node, warningExpected bool) (string, *restic.Node) { testPath := filepath.Join(tempDir, "001", testNode.Name) err := os.MkdirAll(filepath.Dir(testPath), testNode.Mode) test.OK(t, errors.Wrapf(err, "Failed to create parent directories for: %s", testPath)) @@ -230,16 +230,16 @@ func restoreAndGetNode(t *testing.T, tempDir string, testNode *Node, warningExpe return testPath, nodeFromFileInfo } -const TypeSomeNewAttribute GenericAttributeType = "MockAttributes.SomeNewAttribute" +const TypeSomeNewAttribute restic.GenericAttributeType = "MockAttributes.SomeNewAttribute" func TestNewGenericAttributeType(t *testing.T) { t.Parallel() - newGenericAttribute := map[GenericAttributeType]json.RawMessage{} + newGenericAttribute := map[restic.GenericAttributeType]json.RawMessage{} newGenericAttribute[TypeSomeNewAttribute] = []byte("any value") tempDir := t.TempDir() - expectedNodes := []Node{ + expectedNodes := []restic.Node{ { Name: "testfile", Type: "file", @@ -271,7 +271,7 @@ func TestNewGenericAttributeType(t *testing.T) { func TestRestoreExtendedAttributes(t *testing.T) { t.Parallel() tempDir := t.TempDir() - expectedNodes := []Node{ + expectedNodes := []restic.Node{ { Name: "testfile", Type: "file", @@ -279,7 +279,7 @@ func TestRestoreExtendedAttributes(t *testing.T) { ModTime: parseTime("2005-05-14 21:07:03.111"), AccessTime: parseTime("2005-05-14 21:07:04.222"), ChangeTime: parseTime("2005-05-14 21:07:05.333"), - ExtendedAttributes: []ExtendedAttribute{ + ExtendedAttributes: []restic.ExtendedAttribute{ {"user.foo", []byte("bar")}, }, }, @@ -290,7 +290,7 @@ func TestRestoreExtendedAttributes(t *testing.T) { ModTime: parseTime("2005-05-14 21:07:03.111"), AccessTime: parseTime("2005-05-14 21:07:04.222"), ChangeTime: parseTime("2005-05-14 21:07:05.333"), - ExtendedAttributes: []ExtendedAttribute{ + ExtendedAttributes: []restic.ExtendedAttribute{ {"user.foo", []byte("bar")}, }, }, @@ -312,12 +312,12 @@ func TestRestoreExtendedAttributes(t *testing.T) { test.OK(t, errors.Wrapf(err, "Error closing file for: %s", testPath)) }() - extAttr, err := fs.GetFileEA(handle) + extAttr, err := GetFileEA(handle) test.OK(t, errors.Wrapf(err, "Error getting extended attributes for: %s", testPath)) test.Equals(t, len(node.ExtendedAttributes), len(extAttr)) for _, expectedExtAttr := range node.ExtendedAttributes { - var foundExtAttr *fs.ExtendedAttribute + var foundExtAttr *ExtendedAttribute for _, ea := range extAttr { if strings.EqualFold(ea.Name, expectedExtAttr.Name) { foundExtAttr = &ea @@ -491,13 +491,13 @@ func TestPrepareVolumeName(t *testing.T) { test.Equals(t, tc.expectedVolume, volume) if tc.isRealPath { - isEASupportedVolume, err := fs.PathSupportsExtendedAttributes(volume + `\`) + isEASupportedVolume, err := PathSupportsExtendedAttributes(volume + `\`) // If the prepared volume name is not valid, we will next fetch the actual volume name. test.OK(t, err) test.Equals(t, tc.expectedEASupported, isEASupportedVolume) - actualVolume, err := fs.GetVolumePathName(tc.path) + actualVolume, err := GetVolumePathName(tc.path) test.OK(t, err) test.Equals(t, tc.expectedVolume, actualVolume) } diff --git a/internal/restic/node_xattr.go b/internal/fs/node_xattr.go similarity index 80% rename from internal/restic/node_xattr.go rename to internal/fs/node_xattr.go index 062ef4345a2..11bdf382bfe 100644 --- a/internal/restic/node_xattr.go +++ b/internal/fs/node_xattr.go @@ -1,7 +1,7 @@ //go:build darwin || freebsd || linux || solaris // +build darwin freebsd linux solaris -package restic +package fs import ( "fmt" @@ -10,6 +10,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" "github.com/pkg/xattr" ) @@ -65,16 +66,16 @@ func handleXattrErr(err error) error { } // nodeRestoreGenericAttributes is no-op. -func nodeRestoreGenericAttributes(node *Node, _ string, warn func(msg string)) error { - return HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) +func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { + return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) } // nodeFillGenericAttributes is a no-op. -func nodeFillGenericAttributes(_ *Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { return true, nil } -func nodeRestoreExtendedAttributes(node *Node, path string) error { +func nodeRestoreExtendedAttributes(node *restic.Node, path string) error { expectedAttrs := map[string]struct{}{} for _, attr := range node.ExtendedAttributes { err := setxattr(path, attr.Name, attr.Value) @@ -101,7 +102,7 @@ func nodeRestoreExtendedAttributes(node *Node, path string) error { return nil } -func nodeFillExtendedAttributes(node *Node, path string, ignoreListError bool) error { +func nodeFillExtendedAttributes(node *restic.Node, path string, ignoreListError bool) error { xattrs, err := listxattr(path) debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err) if err != nil { @@ -111,14 +112,14 @@ func nodeFillExtendedAttributes(node *Node, path string, ignoreListError bool) e return err } - node.ExtendedAttributes = make([]ExtendedAttribute, 0, len(xattrs)) + node.ExtendedAttributes = make([]restic.ExtendedAttribute, 0, len(xattrs)) for _, attr := range xattrs { attrVal, err := getxattr(path, attr) if err != nil { fmt.Fprintf(os.Stderr, "can not obtain extended attribute %v for %v:\n", attr, path) continue } - attr := ExtendedAttribute{ + attr := restic.ExtendedAttribute{ Name: attr, Value: attrVal, } diff --git a/internal/restic/node_xattr_all_test.go b/internal/fs/node_xattr_all_test.go similarity index 77% rename from internal/restic/node_xattr_all_test.go rename to internal/fs/node_xattr_all_test.go index 30d29a6ed08..39670d6e1e0 100644 --- a/internal/restic/node_xattr_all_test.go +++ b/internal/fs/node_xattr_all_test.go @@ -1,7 +1,7 @@ //go:build darwin || freebsd || linux || solaris || windows // +build darwin freebsd linux solaris windows -package restic +package fs import ( "os" @@ -10,10 +10,11 @@ import ( "strings" "testing" + "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) -func setAndVerifyXattr(t *testing.T, file string, attrs []ExtendedAttribute) { +func setAndVerifyXattr(t *testing.T, file string, attrs []restic.ExtendedAttribute) { if runtime.GOOS == "windows" { // windows seems to convert the xattr name to upper case for i := range attrs { @@ -21,13 +22,13 @@ func setAndVerifyXattr(t *testing.T, file string, attrs []ExtendedAttribute) { } } - node := &Node{ + node := &restic.Node{ Type: "file", ExtendedAttributes: attrs, } rtest.OK(t, nodeRestoreExtendedAttributes(node, file)) - nodeActual := &Node{ + nodeActual := &restic.Node{ Type: "file", } rtest.OK(t, nodeFillExtendedAttributes(nodeActual, file, false)) @@ -40,14 +41,14 @@ func TestOverwriteXattr(t *testing.T) { file := filepath.Join(dir, "file") rtest.OK(t, os.WriteFile(file, []byte("hello world"), 0o600)) - setAndVerifyXattr(t, file, []ExtendedAttribute{ + setAndVerifyXattr(t, file, []restic.ExtendedAttribute{ { Name: "user.foo", Value: []byte("bar"), }, }) - setAndVerifyXattr(t, file, []ExtendedAttribute{ + setAndVerifyXattr(t, file, []restic.ExtendedAttribute{ { Name: "user.other", Value: []byte("some"), diff --git a/internal/restic/node_xattr_test.go b/internal/fs/node_xattr_test.go similarity index 98% rename from internal/restic/node_xattr_test.go rename to internal/fs/node_xattr_test.go index 5ce77bd2805..d948e3b31b1 100644 --- a/internal/restic/node_xattr_test.go +++ b/internal/fs/node_xattr_test.go @@ -1,7 +1,7 @@ //go:build darwin || freebsd || linux || solaris // +build darwin freebsd linux solaris -package restic +package fs import ( "os" diff --git a/internal/restic/node.go b/internal/restic/node.go index e23d39f1b1b..8bf97e59c1d 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -4,12 +4,10 @@ import ( "encoding/json" "fmt" "os" - "os/user" "reflect" "strconv" "strings" "sync" - "syscall" "time" "unicode/utf8" @@ -18,7 +16,6 @@ import ( "bytes" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/fs" ) // ExtendedAttribute is a tuple storing the xattr name and value for various filesystems. @@ -133,49 +130,6 @@ func (node Node) String() string { mode|node.Mode, node.UID, node.GID, node.Size, node.ModTime, node.Name) } -// NodeFromFileInfo returns a new node from the given path and FileInfo. It -// returns the first error that is encountered, together with a node. -func NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*Node, error) { - mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky - node := &Node{ - Path: path, - Name: fi.Name(), - Mode: fi.Mode() & mask, - ModTime: fi.ModTime(), - } - - node.Type = nodeTypeFromFileInfo(fi) - if node.Type == "file" { - node.Size = uint64(fi.Size()) - } - - err := nodeFillExtra(node, path, fi, ignoreXattrListError) - return node, err -} - -func nodeTypeFromFileInfo(fi os.FileInfo) string { - switch fi.Mode() & os.ModeType { - case 0: - return "file" - case os.ModeDir: - return "dir" - case os.ModeSymlink: - return "symlink" - case os.ModeDevice | os.ModeCharDevice: - return "chardev" - case os.ModeDevice: - return "dev" - case os.ModeNamedPipe: - return "fifo" - case os.ModeSocket: - return "socket" - case os.ModeIrregular: - return "irregular" - } - - return "" -} - // GetExtendedAttribute gets the extended attribute. func (node Node) GetExtendedAttribute(a string) []byte { for _, attr := range node.ExtendedAttributes { @@ -186,162 +140,6 @@ func (node Node) GetExtendedAttribute(a string) []byte { return nil } -// NodeCreateAt creates the node at the given path but does NOT restore node meta data. -func NodeCreateAt(node *Node, path string) error { - debug.Log("create node %v at %v", node.Name, path) - - switch node.Type { - case "dir": - if err := nodeCreateDirAt(node, path); err != nil { - return err - } - case "file": - if err := nodeCreateFileAt(path); err != nil { - return err - } - case "symlink": - if err := nodeCreateSymlinkAt(node, path); err != nil { - return err - } - case "dev": - if err := nodeCreateDevAt(node, path); err != nil { - return err - } - case "chardev": - if err := nodeCreateCharDevAt(node, path); err != nil { - return err - } - case "fifo": - if err := nodeCreateFifoAt(path); err != nil { - return err - } - case "socket": - return nil - default: - return errors.Errorf("filetype %q not implemented", node.Type) - } - - return nil -} - -// NodeRestoreMetadata restores node metadata -func NodeRestoreMetadata(node *Node, path string, warn func(msg string)) error { - err := nodeRestoreMetadata(node, path, warn) - if err != nil { - // It is common to have permission errors for folders like /home - // unless you're running as root, so ignore those. - if os.Geteuid() > 0 && errors.Is(err, os.ErrPermission) { - debug.Log("not running as root, ignoring permission error for %v: %v", - path, err) - return nil - } - debug.Log("restoreMetadata(%s) error %v", path, err) - } - - return err -} - -func nodeRestoreMetadata(node *Node, path string, warn func(msg string)) error { - var firsterr error - - if err := lchown(path, int(node.UID), int(node.GID)); err != nil { - firsterr = errors.WithStack(err) - } - - if err := nodeRestoreExtendedAttributes(node, path); err != nil { - debug.Log("error restoring extended attributes for %v: %v", path, err) - if firsterr == nil { - firsterr = err - } - } - - if err := nodeRestoreGenericAttributes(node, path, warn); err != nil { - debug.Log("error restoring generic attributes for %v: %v", path, err) - if firsterr == nil { - firsterr = err - } - } - - if err := NodeRestoreTimestamps(node, path); err != nil { - debug.Log("error restoring timestamps for %v: %v", path, err) - if firsterr == nil { - firsterr = err - } - } - - // Moving RestoreTimestamps and restoreExtendedAttributes calls above as for readonly files in windows - // calling Chmod below will no longer allow any modifications to be made on the file and the - // calls above would fail. - if node.Type != "symlink" { - if err := fs.Chmod(path, node.Mode); err != nil { - if firsterr == nil { - firsterr = errors.WithStack(err) - } - } - } - - return firsterr -} - -func NodeRestoreTimestamps(node *Node, path string) error { - var utimes = [...]syscall.Timespec{ - syscall.NsecToTimespec(node.AccessTime.UnixNano()), - syscall.NsecToTimespec(node.ModTime.UnixNano()), - } - - if node.Type == "symlink" { - return nodeRestoreSymlinkTimestamps(path, utimes) - } - - if err := syscall.UtimesNano(path, utimes[:]); err != nil { - return errors.Wrap(err, "UtimesNano") - } - - return nil -} - -func nodeCreateDirAt(node *Node, path string) error { - err := fs.Mkdir(path, node.Mode) - if err != nil && !os.IsExist(err) { - return errors.WithStack(err) - } - - return nil -} - -func nodeCreateFileAt(path string) error { - f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) - if err != nil { - return errors.WithStack(err) - } - - if err := f.Close(); err != nil { - return errors.WithStack(err) - } - - return nil -} - -func nodeCreateSymlinkAt(node *Node, path string) error { - if err := fs.Symlink(node.LinkTarget, path); err != nil { - return errors.WithStack(err) - } - - return nil -} - -func nodeCreateDevAt(node *Node, path string) error { - return mknod(path, syscall.S_IFBLK|0600, node.Device) -} - -func nodeCreateCharDevAt(node *Node, path string) error { - return mknod(path, syscall.S_IFCHR|0600, node.Device) -} - -func nodeCreateFifoAt(path string) error { - return mkfifo(path, 0600) -} - // FixTime returns a time.Time which can safely be used to marshal as JSON. If // the timestamp is earlier than year zero, the year is set to zero. In the same // way, if the year is larger than 9999, the year is set to 9999. Other than @@ -576,127 +374,6 @@ func deepEqual(map1, map2 map[GenericAttributeType]json.RawMessage) bool { return true } -func nodeFillUser(node *Node, stat *statT) { - uid, gid := stat.uid(), stat.gid() - node.UID, node.GID = uid, gid - node.User = lookupUsername(uid) - node.Group = lookupGroup(gid) -} - -var ( - uidLookupCache = make(map[uint32]string) - uidLookupCacheMutex = sync.RWMutex{} -) - -// Cached user name lookup by uid. Returns "" when no name can be found. -func lookupUsername(uid uint32) string { - uidLookupCacheMutex.RLock() - username, ok := uidLookupCache[uid] - uidLookupCacheMutex.RUnlock() - - if ok { - return username - } - - u, err := user.LookupId(strconv.Itoa(int(uid))) - if err == nil { - username = u.Username - } - - uidLookupCacheMutex.Lock() - uidLookupCache[uid] = username - uidLookupCacheMutex.Unlock() - - return username -} - -var ( - gidLookupCache = make(map[uint32]string) - gidLookupCacheMutex = sync.RWMutex{} -) - -// Cached group name lookup by gid. Returns "" when no name can be found. -func lookupGroup(gid uint32) string { - gidLookupCacheMutex.RLock() - group, ok := gidLookupCache[gid] - gidLookupCacheMutex.RUnlock() - - if ok { - return group - } - - g, err := user.LookupGroupId(strconv.Itoa(int(gid))) - if err == nil { - group = g.Name - } - - gidLookupCacheMutex.Lock() - gidLookupCache[gid] = group - gidLookupCacheMutex.Unlock() - - return group -} - -func nodeFillExtra(node *Node, path string, fi os.FileInfo, ignoreXattrListError bool) error { - stat, ok := toStatT(fi.Sys()) - if !ok { - // fill minimal info with current values for uid, gid - node.UID = uint32(os.Getuid()) - node.GID = uint32(os.Getgid()) - node.ChangeTime = node.ModTime - return nil - } - - node.Inode = uint64(stat.ino()) - node.DeviceID = uint64(stat.dev()) - - nodeFillTimes(node, stat) - - nodeFillUser(node, stat) - - switch node.Type { - case "file": - node.Size = uint64(stat.size()) - node.Links = uint64(stat.nlink()) - case "dir": - case "symlink": - var err error - node.LinkTarget, err = fs.Readlink(path) - node.Links = uint64(stat.nlink()) - if err != nil { - return errors.WithStack(err) - } - case "dev": - node.Device = uint64(stat.rdev()) - node.Links = uint64(stat.nlink()) - case "chardev": - node.Device = uint64(stat.rdev()) - node.Links = uint64(stat.nlink()) - case "fifo": - case "socket": - default: - return errors.Errorf("unsupported file type %q", node.Type) - } - - allowExtended, err := nodeFillGenericAttributes(node, path, fi, stat) - if allowExtended { - // Skip processing ExtendedAttributes if allowExtended is false. - err = errors.CombineErrors(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) - } - return err -} - -func mkfifo(path string, mode uint32) (err error) { - return mknod(path, mode|syscall.S_IFIFO, 0) -} - -func nodeFillTimes(node *Node, stat *statT) { - ctim := stat.ctim() - atim := stat.atim() - node.ChangeTime = time.Unix(ctim.Unix()) - node.AccessTime = time.Unix(atim.Unix()) -} - // HandleUnknownGenericAttributesFound is used for handling and distinguing between scenarios related to future versions and cross-OS repositories func HandleUnknownGenericAttributesFound(unknownAttribs []GenericAttributeType, warn func(msg string)) { for _, unknownAttrib := range unknownAttribs { diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index 075dd5cc528..38a17cb0906 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -3,315 +3,12 @@ package restic import ( "encoding/json" "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" "testing" "time" - "github.com/google/go-cmp/cmp" - "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/test" - rtest "github.com/restic/restic/internal/test" ) -func BenchmarkNodeFillUser(t *testing.B) { - tempfile, err := os.CreateTemp("", "restic-test-temp-") - if err != nil { - t.Fatal(err) - } - - fi, err := tempfile.Stat() - if err != nil { - t.Fatal(err) - } - - path := tempfile.Name() - - t.ResetTimer() - - for i := 0; i < t.N; i++ { - _, err := NodeFromFileInfo(path, fi, false) - rtest.OK(t, err) - } - - rtest.OK(t, tempfile.Close()) - rtest.RemoveAll(t, tempfile.Name()) -} - -func BenchmarkNodeFromFileInfo(t *testing.B) { - tempfile, err := os.CreateTemp("", "restic-test-temp-") - if err != nil { - t.Fatal(err) - } - - fi, err := tempfile.Stat() - if err != nil { - t.Fatal(err) - } - - path := tempfile.Name() - - t.ResetTimer() - - for i := 0; i < t.N; i++ { - _, err := NodeFromFileInfo(path, fi, false) - if err != nil { - t.Fatal(err) - } - } - - rtest.OK(t, tempfile.Close()) - rtest.RemoveAll(t, tempfile.Name()) -} - -func parseTime(s string) time.Time { - t, err := time.Parse("2006-01-02 15:04:05.999", s) - if err != nil { - panic(err) - } - - return t.Local() -} - -var nodeTests = []Node{ - { - Name: "testFile", - Type: "file", - Content: IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0604, - ModTime: parseTime("2015-05-14 21:07:23.111"), - AccessTime: parseTime("2015-05-14 21:07:24.222"), - ChangeTime: parseTime("2015-05-14 21:07:25.333"), - }, - { - Name: "testSuidFile", - Type: "file", - Content: IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0755 | os.ModeSetuid, - ModTime: parseTime("2015-05-14 21:07:23.111"), - AccessTime: parseTime("2015-05-14 21:07:24.222"), - ChangeTime: parseTime("2015-05-14 21:07:25.333"), - }, - { - Name: "testSuidFile2", - Type: "file", - Content: IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0755 | os.ModeSetgid, - ModTime: parseTime("2015-05-14 21:07:23.111"), - AccessTime: parseTime("2015-05-14 21:07:24.222"), - ChangeTime: parseTime("2015-05-14 21:07:25.333"), - }, - { - Name: "testSticky", - Type: "file", - Content: IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0755 | os.ModeSticky, - ModTime: parseTime("2015-05-14 21:07:23.111"), - AccessTime: parseTime("2015-05-14 21:07:24.222"), - ChangeTime: parseTime("2015-05-14 21:07:25.333"), - }, - { - Name: "testDir", - Type: "dir", - Subtree: nil, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0750 | os.ModeDir, - ModTime: parseTime("2015-05-14 21:07:23.111"), - AccessTime: parseTime("2015-05-14 21:07:24.222"), - ChangeTime: parseTime("2015-05-14 21:07:25.333"), - }, - { - Name: "testSymlink", - Type: "symlink", - LinkTarget: "invalid", - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0777 | os.ModeSymlink, - ModTime: parseTime("2015-05-14 21:07:23.111"), - AccessTime: parseTime("2015-05-14 21:07:24.222"), - ChangeTime: parseTime("2015-05-14 21:07:25.333"), - }, - - // include "testFile" and "testDir" again with slightly different - // metadata, so we can test if CreateAt works with pre-existing files. - { - Name: "testFile", - Type: "file", - Content: IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0604, - ModTime: parseTime("2005-05-14 21:07:03.111"), - AccessTime: parseTime("2005-05-14 21:07:04.222"), - ChangeTime: parseTime("2005-05-14 21:07:05.333"), - }, - { - Name: "testDir", - Type: "dir", - Subtree: nil, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0750 | os.ModeDir, - ModTime: parseTime("2005-05-14 21:07:03.111"), - AccessTime: parseTime("2005-05-14 21:07:04.222"), - ChangeTime: parseTime("2005-05-14 21:07:05.333"), - }, - { - Name: "testXattrFile", - Type: "file", - Content: IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0604, - ModTime: parseTime("2005-05-14 21:07:03.111"), - AccessTime: parseTime("2005-05-14 21:07:04.222"), - ChangeTime: parseTime("2005-05-14 21:07:05.333"), - ExtendedAttributes: []ExtendedAttribute{ - {"user.foo", []byte("bar")}, - }, - }, - { - Name: "testXattrDir", - Type: "dir", - Subtree: nil, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0750 | os.ModeDir, - ModTime: parseTime("2005-05-14 21:07:03.111"), - AccessTime: parseTime("2005-05-14 21:07:04.222"), - ChangeTime: parseTime("2005-05-14 21:07:05.333"), - ExtendedAttributes: []ExtendedAttribute{ - {"user.foo", []byte("bar")}, - }, - }, - { - Name: "testXattrFileMacOSResourceFork", - Type: "file", - Content: IDs{}, - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - Mode: 0604, - ModTime: parseTime("2005-05-14 21:07:03.111"), - AccessTime: parseTime("2005-05-14 21:07:04.222"), - ChangeTime: parseTime("2005-05-14 21:07:05.333"), - ExtendedAttributes: []ExtendedAttribute{ - {"com.apple.ResourceFork", []byte("bar")}, - }, - }, -} - -func TestNodeRestoreAt(t *testing.T) { - tempdir := t.TempDir() - - for _, test := range nodeTests { - t.Run("", func(t *testing.T) { - var nodePath string - if test.ExtendedAttributes != nil { - if runtime.GOOS == "windows" { - // In windows extended attributes are case insensitive and windows returns - // the extended attributes in UPPER case. - // Update the tests to use UPPER case xattr names for windows. - extAttrArr := test.ExtendedAttributes - // Iterate through the array using pointers - for i := 0; i < len(extAttrArr); i++ { - extAttrArr[i].Name = strings.ToUpper(extAttrArr[i].Name) - } - } - for _, attr := range test.ExtendedAttributes { - if strings.HasPrefix(attr.Name, "com.apple.") && runtime.GOOS != "darwin" { - t.Skipf("attr %v only relevant on macOS", attr.Name) - } - } - - // tempdir might be backed by a filesystem that does not support - // extended attributes - nodePath = test.Name - defer func() { - _ = os.Remove(nodePath) - }() - } else { - nodePath = filepath.Join(tempdir, test.Name) - } - rtest.OK(t, NodeCreateAt(&test, nodePath)) - rtest.OK(t, NodeRestoreMetadata(&test, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) })) - - fi, err := os.Lstat(nodePath) - rtest.OK(t, err) - - n2, err := NodeFromFileInfo(nodePath, fi, false) - rtest.OK(t, err) - n3, err := NodeFromFileInfo(nodePath, fi, true) - rtest.OK(t, err) - rtest.Assert(t, n2.Equals(*n3), "unexpected node info mismatch %v", cmp.Diff(n2, n3)) - - rtest.Assert(t, test.Name == n2.Name, - "%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name) - rtest.Assert(t, test.Type == n2.Type, - "%v: type doesn't match (%v != %v)", test.Type, test.Type, n2.Type) - rtest.Assert(t, test.Size == n2.Size, - "%v: size doesn't match (%v != %v)", test.Size, test.Size, n2.Size) - - if runtime.GOOS != "windows" { - rtest.Assert(t, test.UID == n2.UID, - "%v: UID doesn't match (%v != %v)", test.Type, test.UID, n2.UID) - rtest.Assert(t, test.GID == n2.GID, - "%v: GID doesn't match (%v != %v)", test.Type, test.GID, n2.GID) - if test.Type != "symlink" { - // On OpenBSD only root can set sticky bit (see sticky(8)). - if runtime.GOOS != "openbsd" && runtime.GOOS != "netbsd" && runtime.GOOS != "solaris" && test.Name == "testSticky" { - rtest.Assert(t, test.Mode == n2.Mode, - "%v: mode doesn't match (0%o != 0%o)", test.Type, test.Mode, n2.Mode) - } - } - } - - AssertFsTimeEqual(t, "AccessTime", test.Type, test.AccessTime, n2.AccessTime) - AssertFsTimeEqual(t, "ModTime", test.Type, test.ModTime, n2.ModTime) - if len(n2.ExtendedAttributes) == 0 { - n2.ExtendedAttributes = nil - } - rtest.Assert(t, reflect.DeepEqual(test.ExtendedAttributes, n2.ExtendedAttributes), - "%v: xattrs don't match (%v != %v)", test.Name, test.ExtendedAttributes, n2.ExtendedAttributes) - }) - } -} - -func AssertFsTimeEqual(t *testing.T, label string, nodeType string, t1 time.Time, t2 time.Time) { - var equal bool - - // Go currently doesn't support setting timestamps of symbolic links on darwin and bsd - if nodeType == "symlink" { - switch runtime.GOOS { - case "darwin", "freebsd", "openbsd", "netbsd", "solaris": - return - } - } - - switch runtime.GOOS { - case "darwin": - // HFS+ timestamps don't support sub-second precision, - // see https://en.wikipedia.org/wiki/Comparison_of_file_systems - diff := int(t1.Sub(t2).Seconds()) - equal = diff == 0 - default: - equal = t1.Equal(t2) - } - - rtest.Assert(t, equal, "%s: %s doesn't match (%v != %v)", label, nodeType, t1, t2) -} - func parseTimeNano(t testing.TB, s string) time.Time { // 2006-01-02T15:04:05.999999999Z07:00 ts, err := time.Parse(time.RFC3339Nano, s) @@ -397,14 +94,3 @@ func TestSymlinkSerializationFormat(t *testing.T) { test.Assert(t, n2.LinkTargetRaw == nil, "quoted link target is just a helper field and must be unset after decoding") } } - -func TestNodeRestoreMetadataError(t *testing.T) { - tempdir := t.TempDir() - - node := &nodeTests[0] - nodePath := filepath.Join(tempdir, node.Name) - - // This will fail because the target file does not exist - err := NodeRestoreMetadata(node, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) }) - test.Assert(t, errors.Is(err, os.ErrNotExist), "failed for an unexpected reason") -} diff --git a/internal/restic/tree_test.go b/internal/restic/tree_test.go index 8e0b3587ace..cdd6b3c18cc 100644 --- a/internal/restic/tree_test.go +++ b/internal/restic/tree_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/restic/restic/internal/archiver" + "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -86,7 +87,7 @@ func TestNodeComparison(t *testing.T) { fi, err := os.Lstat("tree_test.go") rtest.OK(t, err) - node, err := restic.NodeFromFileInfo("tree_test.go", fi, false) + node, err := fs.NodeFromFileInfo("tree_test.go", fi, false) rtest.OK(t, err) n2 := *node @@ -127,7 +128,7 @@ func TestTreeEqualSerialization(t *testing.T) { for _, fn := range files[:i] { fi, err := os.Lstat(fn) rtest.OK(t, err) - node, err := restic.NodeFromFileInfo(fn, fi, false) + node, err := fs.NodeFromFileInfo(fn, fi, false) rtest.OK(t, err) rtest.OK(t, tree.Insert(node)) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 83644c7ac68..26b6f347498 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -272,7 +272,7 @@ func (res *Restorer) restoreNodeTo(node *restic.Node, target, location string) e return errors.Wrap(err, "RemoveNode") } - err := restic.NodeCreateAt(node, target) + err := fs.NodeCreateAt(node, target) if err != nil { debug.Log("node.CreateAt(%s) error %v", target, err) return err @@ -288,7 +288,7 @@ func (res *Restorer) restoreNodeMetadataTo(node *restic.Node, target, location s return nil } debug.Log("restoreNodeMetadata %v %v %v", node.Name, target, location) - err := restic.NodeRestoreMetadata(node, target, res.Warn) + err := fs.NodeRestoreMetadata(node, target, res.Warn) if err != nil { debug.Log("node.RestoreMetadata(%s) error %v", target, err) } diff --git a/internal/restorer/restorer_windows_test.go b/internal/restorer/restorer_windows_test.go index 4764bed2d4c..9fcdfc48d39 100644 --- a/internal/restorer/restorer_windows_test.go +++ b/internal/restorer/restorer_windows_test.go @@ -16,6 +16,7 @@ import ( "unsafe" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" @@ -263,7 +264,7 @@ func setup(t *testing.T, nodesMap map[string]Node) *Restorer { //If the node is a directory add FILE_ATTRIBUTE_DIRECTORY to attributes fileattr |= windows.FILE_ATTRIBUTE_DIRECTORY } - attrs, err := restic.WindowsAttrsToGenericAttributes(restic.WindowsAttributes{FileAttributes: &fileattr}) + attrs, err := fs.WindowsAttrsToGenericAttributes(fs.WindowsAttributes{FileAttributes: &fileattr}) test.OK(t, err) return attrs } From fc549c94621920ed71995e529443bc0b108a7103 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 10 Jul 2024 22:28:48 +0200 Subject: [PATCH 638/893] cleanup imports --- internal/restorer/fileswriter.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/internal/restorer/fileswriter.go b/internal/restorer/fileswriter.go index 962f666197d..d6f78f2d7cd 100644 --- a/internal/restorer/fileswriter.go +++ b/internal/restorer/fileswriter.go @@ -2,7 +2,6 @@ package restorer import ( "fmt" - stdfs "io/fs" "os" "sync" "syscall" @@ -82,7 +81,7 @@ func createFile(path string, createSize int64, sparse bool, allowRecursiveDelete return nil, err } - var fi stdfs.FileInfo + var fi os.FileInfo if f != nil { // stat to check that we've opened a regular file fi, err = f.Stat() @@ -135,7 +134,7 @@ func createFile(path string, createSize int64, sparse bool, allowRecursiveDelete return ensureSize(f, fi, createSize, sparse) } -func ensureSize(f *os.File, fi stdfs.FileInfo, createSize int64, sparse bool) (*os.File, error) { +func ensureSize(f *os.File, fi os.FileInfo, createSize int64, sparse bool) (*os.File, error) { if sparse { err := truncateSparse(f, createSize) if err != nil { From 0ddb4441d7e74eab98389dd2e917e99b5327e9a1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 21 Jul 2024 14:40:33 +0200 Subject: [PATCH 639/893] fs: clean up helper functions --- internal/archiver/archiver.go | 4 ++-- internal/archiver/testing.go | 4 ++-- internal/archiver/testing_test.go | 2 +- internal/fs/file.go | 20 -------------------- internal/fs/helpers.go | 13 ------------- 5 files changed, 5 insertions(+), 38 deletions(-) delete mode 100644 internal/fs/helpers.go diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index c576d047c81..4f099084323 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -446,7 +446,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous } switch { - case fs.IsRegularFile(fi): + case fi.Mode().IsRegular(): debug.Log(" %v regular file", target) // check if the file has not changed before performing a fopen operation (more expensive, specially @@ -505,7 +505,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous } // make sure it's still a file - if !fs.IsRegularFile(fi) { + if !fi.Mode().IsRegular() { err = errors.Errorf("file %v changed type, refusing to archive", fi.Name()) _ = file.Close() err = arch.error(abstarget, err) diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go index 106e6844553..8bd854904f2 100644 --- a/internal/archiver/testing.go +++ b/internal/archiver/testing.go @@ -169,7 +169,7 @@ func TestEnsureFiles(t testing.TB, target string, dir TestDir) { } return nil case TestFile: - if !fs.IsRegularFile(fi) { + if !fi.Mode().IsRegular() { t.Errorf("is not a regular file: %v", path) return nil } @@ -208,7 +208,7 @@ func TestEnsureFiles(t testing.TB, target string, dir TestDir) { }) // then, traverse the directory again, looking for additional files - err := fs.Walk(target, func(path string, fi os.FileInfo, err error) error { + err := filepath.Walk(target, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } diff --git a/internal/archiver/testing_test.go b/internal/archiver/testing_test.go index ff3bd3668d4..bb4b63a827b 100644 --- a/internal/archiver/testing_test.go +++ b/internal/archiver/testing_test.go @@ -122,7 +122,7 @@ func TestTestCreateFiles(t *testing.T) { switch node := item.(type) { case TestFile: - if !fs.IsRegularFile(fi) { + if !fi.Mode().IsRegular() { t.Errorf("is not regular file: %v", name) continue } diff --git a/internal/fs/file.go b/internal/fs/file.go index 85b202dc8b8..1071f4e8781 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -3,7 +3,6 @@ package fs import ( "fmt" "os" - "path/filepath" "time" ) @@ -75,15 +74,6 @@ func Lstat(name string) (os.FileInfo, error) { return os.Lstat(fixpath(name)) } -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func Create(name string) (*os.File, error) { - return os.Create(fixpath(name)) -} - // Open opens a file for reading. func Open(name string) (File, error) { return os.Open(fixpath(name)) @@ -98,16 +88,6 @@ func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { return os.OpenFile(fixpath(name), flag, perm) } -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very -// large directories Walk can be inefficient. -// Walk does not follow symbolic links. -func Walk(root string, walkFn filepath.WalkFunc) error { - return filepath.Walk(fixpath(root), walkFn) -} - // RemoveIfExists removes a file, returning no error if it does not exist. func RemoveIfExists(filename string) error { err := os.Remove(filename) diff --git a/internal/fs/helpers.go b/internal/fs/helpers.go deleted file mode 100644 index 4dd1e0e7338..00000000000 --- a/internal/fs/helpers.go +++ /dev/null @@ -1,13 +0,0 @@ -package fs - -import "os" - -// IsRegularFile returns true if fi belongs to a normal file. If fi is nil, -// false is returned. -func IsRegularFile(fi os.FileInfo) bool { - if fi == nil { - return false - } - - return fi.Mode()&os.ModeType == 0 -} From 80ed863aab455e3843136c69c3639d23cb4fd720 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 21 Jul 2024 15:00:34 +0200 Subject: [PATCH 640/893] repository: remove redundant cleanup code The temp files used by the packer manager are either delete after creation (unix) or marked as delete on close (windows). Thus, no explicit cleanup is necessary. --- internal/fs/file.go | 9 --------- internal/repository/packer_manager.go | 9 --------- 2 files changed, 18 deletions(-) diff --git a/internal/fs/file.go b/internal/fs/file.go index 1071f4e8781..356b466c3d0 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -88,15 +88,6 @@ func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { return os.OpenFile(fixpath(name), flag, perm) } -// RemoveIfExists removes a file, returning no error if it does not exist. -func RemoveIfExists(filename string) error { - err := os.Remove(filename) - if err != nil && os.IsNotExist(err) { - err = nil - } - return err -} - // Chtimes changes the access and modification times of the named file, // similar to the Unix utime() or utimes() functions. // diff --git a/internal/repository/packer_manager.go b/internal/repository/packer_manager.go index 8dd8f71c1b3..731ad9a6ad0 100644 --- a/internal/repository/packer_manager.go +++ b/internal/repository/packer_manager.go @@ -6,7 +6,6 @@ import ( "crypto/sha256" "io" "os" - "runtime" "sync" "github.com/restic/restic/internal/backend" @@ -186,14 +185,6 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *packe return errors.Wrap(err, "close tempfile") } - // on windows the tempfile is automatically deleted on close - if runtime.GOOS != "windows" { - err = fs.RemoveIfExists(p.tmpfile.Name()) - if err != nil { - return errors.WithStack(err) - } - } - // update blobs in the index debug.Log(" updating blobs %v to pack %v", p.Packer.Blobs(), id) r.idx.StorePack(id, p.Packer.Blobs()) From 263709da8c1a711cf4979cdcbba2d90d110f9779 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 21 Jul 2024 15:03:17 +0200 Subject: [PATCH 641/893] fs: unexport isListxattrPermissionError --- internal/fs/node_aix.go | 4 ++-- internal/fs/node_netbsd.go | 4 ++-- internal/fs/node_openbsd.go | 4 ++-- internal/fs/node_xattr.go | 4 ++-- internal/fs/node_xattr_test.go | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/internal/fs/node_aix.go b/internal/fs/node_aix.go index 4e694442511..123985c2d9b 100644 --- a/internal/fs/node_aix.go +++ b/internal/fs/node_aix.go @@ -35,8 +35,8 @@ func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { return nil } -// IsListxattrPermissionError is a no-op on AIX. -func IsListxattrPermissionError(_ error) bool { +// isListxattrPermissionError is a no-op on AIX. +func isListxattrPermissionError(_ error) bool { return false } diff --git a/internal/fs/node_netbsd.go b/internal/fs/node_netbsd.go index c71e4bdf56f..9961258515d 100644 --- a/internal/fs/node_netbsd.go +++ b/internal/fs/node_netbsd.go @@ -25,8 +25,8 @@ func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { return nil } -// IsListxattrPermissionError is a no-op on netbsd. -func IsListxattrPermissionError(_ error) bool { +// isListxattrPermissionError is a no-op on netbsd. +func isListxattrPermissionError(_ error) bool { return false } diff --git a/internal/fs/node_openbsd.go b/internal/fs/node_openbsd.go index f74f2ae009f..62eb7861815 100644 --- a/internal/fs/node_openbsd.go +++ b/internal/fs/node_openbsd.go @@ -25,8 +25,8 @@ func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { return nil } -// IsListxattrPermissionError is a no-op on openbsd. -func IsListxattrPermissionError(_ error) bool { +// isListxattrPermissionError is a no-op on openbsd. +func isListxattrPermissionError(_ error) bool { return false } diff --git a/internal/fs/node_xattr.go b/internal/fs/node_xattr.go index 11bdf382bfe..55376ba58eb 100644 --- a/internal/fs/node_xattr.go +++ b/internal/fs/node_xattr.go @@ -28,7 +28,7 @@ func listxattr(path string) ([]string, error) { return l, handleXattrErr(err) } -func IsListxattrPermissionError(err error) bool { +func isListxattrPermissionError(err error) bool { var xerr *xattr.Error if errors.As(err, &xerr) { return xerr.Op == "xattr.list" && errors.Is(xerr.Err, os.ErrPermission) @@ -106,7 +106,7 @@ func nodeFillExtendedAttributes(node *restic.Node, path string, ignoreListError xattrs, err := listxattr(path) debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err) if err != nil { - if ignoreListError && IsListxattrPermissionError(err) { + if ignoreListError && isListxattrPermissionError(err) { return nil } return err diff --git a/internal/fs/node_xattr_test.go b/internal/fs/node_xattr_test.go index d948e3b31b1..3784dba45df 100644 --- a/internal/fs/node_xattr_test.go +++ b/internal/fs/node_xattr_test.go @@ -19,10 +19,10 @@ func TestIsListxattrPermissionError(t *testing.T) { } err := handleXattrErr(xerr) rtest.Assert(t, err != nil, "missing error") - rtest.Assert(t, IsListxattrPermissionError(err), "expected IsListxattrPermissionError to return true for %v", err) + rtest.Assert(t, isListxattrPermissionError(err), "expected IsListxattrPermissionError to return true for %v", err) xerr.Err = os.ErrNotExist err = handleXattrErr(xerr) rtest.Assert(t, err != nil, "missing error") - rtest.Assert(t, !IsListxattrPermissionError(err), "expected IsListxattrPermissionError to return false for %v", err) + rtest.Assert(t, !isListxattrPermissionError(err), "expected IsListxattrPermissionError to return false for %v", err) } From 507842b614bb80c07a37872c304274d5b2378ea8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 21 Jul 2024 16:30:49 +0200 Subject: [PATCH 642/893] fs: remove Open method from FS interface --- internal/archiver/archiver_test.go | 17 ----------- internal/archiver/file_saver_test.go | 2 +- internal/fs/fs_local.go | 10 ------- internal/fs/fs_local_vss.go | 5 ---- internal/fs/fs_reader.go | 45 ++++++++++------------------ internal/fs/fs_reader_test.go | 35 +++------------------- internal/fs/fs_track.go | 10 ------- internal/fs/interface.go | 1 - 8 files changed, 21 insertions(+), 104 deletions(-) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 18151eb024f..d67b5b06aa1 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1664,15 +1664,6 @@ type MockFS struct { bytesRead map[string]int // tracks bytes read from all opened files } -func (m *MockFS) Open(name string) (fs.File, error) { - f, err := m.FS.Open(name) - if err != nil { - return f, err - } - - return MockFile{File: f, fs: m, filename: name}, nil -} - func (m *MockFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) { f, err := m.FS.OpenFile(name, flag, perm) if err != nil { @@ -2061,14 +2052,6 @@ type TrackFS struct { m sync.Mutex } -func (m *TrackFS) Open(name string) (fs.File, error) { - m.m.Lock() - m.opened[name]++ - m.m.Unlock() - - return m.FS.Open(name) -} - func (m *TrackFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) { m.m.Lock() m.opened[name]++ diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go index 4a43275725a..948d7ce3ccf 100644 --- a/internal/archiver/file_saver_test.go +++ b/internal/archiver/file_saver_test.go @@ -72,7 +72,7 @@ func TestFileSaver(t *testing.T) { var results []FutureNode for _, filename := range files { - f, err := testFs.Open(filename) + f, err := testFs.OpenFile(filename, os.O_RDONLY, 0) if err != nil { t.Fatal(err) } diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go index 48c40dc9098..0bcbf7f3a85 100644 --- a/internal/fs/fs_local.go +++ b/internal/fs/fs_local.go @@ -18,16 +18,6 @@ func (fs Local) VolumeName(path string) string { return filepath.VolumeName(path) } -// Open opens a file for reading. -func (fs Local) Open(name string) (File, error) { - f, err := os.Open(fixpath(name)) - if err != nil { - return nil, err - } - _ = setFlags(f) - return f, nil -} - // OpenFile is the generalized open call; most users will use Open // or Create instead. It opens the named file with specified flag // (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 718dfc46d9f..46b40d013e9 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -125,11 +125,6 @@ func (fs *LocalVss) DeleteSnapshots() { fs.snapshots = activeSnapshots } -// Open wraps the Open method of the underlying file system. -func (fs *LocalVss) Open(name string) (File, error) { - return os.Open(fs.snapshotPath(name)) -} - // OpenFile wraps the Open method of the underlying file system. func (fs *LocalVss) OpenFile(name string, flag int, perm os.FileMode) (File, error) { return os.OpenFile(fs.snapshotPath(name), flag, perm) diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index 47af74245be..93a42f9eb9f 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -39,29 +39,6 @@ func (fs *Reader) VolumeName(_ string) string { return "" } -// Open opens a file for reading. -func (fs *Reader) Open(name string) (f File, err error) { - switch name { - case fs.Name: - fs.open.Do(func() { - f = newReaderFile(fs.ReadCloser, fs.fi(), fs.AllowEmptyFile) - }) - - if f == nil { - return nil, pathError("open", name, syscall.EIO) - } - - return f, nil - case "/", ".": - f = fakeDir{ - entries: []os.FileInfo{fs.fi()}, - } - return f, nil - } - - return nil, pathError("open", name, syscall.ENOENT) -} - func (fs *Reader) fi() os.FileInfo { return fakeFileInfo{ name: fs.Name, @@ -82,15 +59,25 @@ func (fs *Reader) OpenFile(name string, flag int, _ os.FileMode) (f File, err er fmt.Errorf("invalid combination of flags 0x%x", flag)) } - fs.open.Do(func() { - f = newReaderFile(fs.ReadCloser, fs.fi(), fs.AllowEmptyFile) - }) + switch name { + case fs.Name: + fs.open.Do(func() { + f = newReaderFile(fs.ReadCloser, fs.fi(), fs.AllowEmptyFile) + }) + + if f == nil { + return nil, pathError("open", name, syscall.EIO) + } - if f == nil { - return nil, pathError("open", name, syscall.EIO) + return f, nil + case "/", ".": + f = fakeDir{ + entries: []os.FileInfo{fs.fi()}, + } + return f, nil } - return f, nil + return nil, pathError("open", name, syscall.ENOENT) } // Stat returns a FileInfo describing the named file. If there is an error, it diff --git a/internal/fs/fs_reader_test.go b/internal/fs/fs_reader_test.go index d3ef5608a2d..9fa67b5ac31 100644 --- a/internal/fs/fs_reader_test.go +++ b/internal/fs/fs_reader_test.go @@ -15,27 +15,6 @@ import ( "github.com/restic/restic/internal/test" ) -func verifyFileContentOpen(t testing.TB, fs FS, filename string, want []byte) { - f, err := fs.Open(filename) - if err != nil { - t.Fatal(err) - } - - buf, err := io.ReadAll(f) - if err != nil { - t.Fatal(err) - } - - err = f.Close() - if err != nil { - t.Fatal(err) - } - - if !cmp.Equal(want, buf) { - t.Error(cmp.Diff(want, buf)) - } -} - func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte) { f, err := fs.OpenFile(filename, O_RDONLY, 0) if err != nil { @@ -58,7 +37,7 @@ func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte } func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) { - f, err := fs.Open(dir) + f, err := fs.OpenFile(dir, os.O_RDONLY, 0) if err != nil { t.Fatal(err) } @@ -96,7 +75,7 @@ func (s fiSlice) Swap(i, j int) { } func verifyDirectoryContentsFI(t testing.TB, fs FS, dir string, want []os.FileInfo) { - f, err := fs.Open(dir) + f, err := fs.OpenFile(dir, os.O_RDONLY, 0) if err != nil { t.Fatal(err) } @@ -219,12 +198,6 @@ func TestFSReader(t *testing.T) { verifyDirectoryContentsFI(t, fs, ".", []os.FileInfo{fi}) }, }, - { - name: "file/Open", - f: func(t *testing.T, fs FS) { - verifyFileContentOpen(t, fs, filename, data) - }, - }, { name: "file/OpenFile", f: func(t *testing.T, fs FS) { @@ -245,7 +218,7 @@ func TestFSReader(t *testing.T) { { name: "file/Stat", f: func(t *testing.T, fs FS) { - f, err := fs.Open(filename) + f, err := fs.OpenFile(filename, os.O_RDONLY, 0) if err != nil { t.Fatal(err) } @@ -417,7 +390,7 @@ func TestFSReaderMinFileSize(t *testing.T) { AllowEmptyFile: test.allowEmpty, } - f, err := fs.Open("testfile") + f, err := fs.OpenFile("testfile", os.O_RDONLY, 0) if err != nil { t.Fatal(err) } diff --git a/internal/fs/fs_track.go b/internal/fs/fs_track.go index 0c65a856440..366bbee762a 100644 --- a/internal/fs/fs_track.go +++ b/internal/fs/fs_track.go @@ -15,16 +15,6 @@ type Track struct { FS } -// Open wraps the Open method of the underlying file system. -func (fs Track) Open(name string) (File, error) { - f, err := fs.FS.Open(fixpath(name)) - if err != nil { - return nil, err - } - - return newTrackFile(debug.Stack(), name, f), nil -} - // OpenFile wraps the OpenFile method of the underlying file system. func (fs Track) OpenFile(name string, flag int, perm os.FileMode) (File, error) { f, err := fs.FS.OpenFile(fixpath(name), flag, perm) diff --git a/internal/fs/interface.go b/internal/fs/interface.go index b26c56944bc..e1f4ef2d980 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -7,7 +7,6 @@ import ( // FS bundles all methods needed for a file system. type FS interface { - Open(name string) (File, error) OpenFile(name string, flag int, perm os.FileMode) (File, error) Stat(name string) (os.FileInfo, error) Lstat(name string) (os.FileInfo, error) From c6fae0320ef7b38a789d490be1ec53a7f8ca430d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 11:26:52 +0200 Subject: [PATCH 643/893] archiver: hide implementation details --- internal/archiver/archiver.go | 106 ++++++++++++++------------- internal/archiver/archiver_test.go | 2 +- internal/archiver/blob_saver.go | 34 ++++----- internal/archiver/blob_saver_test.go | 16 ++-- internal/archiver/buffer.go | 26 +++---- internal/archiver/doc.go | 9 --- internal/archiver/file_saver.go | 36 ++++----- internal/archiver/file_saver_test.go | 10 +-- internal/archiver/scanner.go | 4 +- internal/archiver/tree.go | 54 +++++++------- internal/archiver/tree_saver.go | 32 ++++---- internal/archiver/tree_saver_test.go | 16 ++-- internal/archiver/tree_test.go | 102 +++++++++++++------------- 13 files changed, 223 insertions(+), 224 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 4f099084323..397347bcba2 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -75,6 +75,14 @@ type archiverRepo interface { } // Archiver saves a directory structure to the repo. +// +// An Archiver has a number of worker goroutines handling saving the different +// data structures to the repository, the details are implemented by the +// fileSaver, blobSaver, and treeSaver types. +// +// The main goroutine (the one calling Snapshot()) traverses the directory tree +// and delegates all work to these worker pools. They return a futureNode which +// can be resolved later, by calling Wait() on it. type Archiver struct { Repo archiverRepo SelectByName SelectByNameFunc @@ -82,9 +90,9 @@ type Archiver struct { FS fs.FS Options Options - blobSaver *BlobSaver - fileSaver *FileSaver - treeSaver *TreeSaver + blobSaver *blobSaver + fileSaver *fileSaver + treeSaver *treeSaver mu sync.Mutex summary *Summary @@ -160,7 +168,7 @@ func (o Options) ApplyDefaults() Options { if o.SaveTreeConcurrency == 0 { // can either wait for a file, wait for a tree, serialize a tree or wait for saveblob // the last two are cpu-bound and thus mutually exclusive. - // Also allow waiting for FileReadConcurrency files, this is the maximum of FutureFiles + // Also allow waiting for FileReadConcurrency files, this is the maximum of files // which currently can be in progress. The main backup loop blocks when trying to queue // more files to read. o.SaveTreeConcurrency = uint(runtime.GOMAXPROCS(0)) + o.ReadConcurrency @@ -297,27 +305,27 @@ func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { // saveDir stores a directory in the repo and returns the node. snPath is the // path within the current snapshot. -func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) { +func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete fileCompleteFunc) (d futureNode, err error) { debug.Log("%v %v", snPath, dir) treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi, false) if err != nil { - return FutureNode{}, err + return futureNode{}, err } names, err := fs.Readdirnames(arch.FS, dir, fs.O_NOFOLLOW) if err != nil { - return FutureNode{}, err + return futureNode{}, err } sort.Strings(names) - nodes := make([]FutureNode, 0, len(names)) + nodes := make([]futureNode, 0, len(names)) for _, name := range names { // test if context has been cancelled if ctx.Err() != nil { debug.Log("context has been cancelled, aborting") - return FutureNode{}, ctx.Err() + return futureNode{}, ctx.Err() } pathname := arch.FS.Join(dir, name) @@ -333,7 +341,7 @@ func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi continue } - return FutureNode{}, err + return futureNode{}, err } if excluded { @@ -348,11 +356,11 @@ func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi return fn, nil } -// FutureNode holds a reference to a channel that returns a FutureNodeResult +// futureNode holds a reference to a channel that returns a FutureNodeResult // or a reference to an already existing result. If the result is available // immediately, then storing a reference directly requires less memory than // using the indirection via a channel. -type FutureNode struct { +type futureNode struct { ch <-chan futureNodeResult res *futureNodeResult } @@ -365,18 +373,18 @@ type futureNodeResult struct { err error } -func newFutureNode() (FutureNode, chan<- futureNodeResult) { +func newFutureNode() (futureNode, chan<- futureNodeResult) { ch := make(chan futureNodeResult, 1) - return FutureNode{ch: ch}, ch + return futureNode{ch: ch}, ch } -func newFutureNodeWithResult(res futureNodeResult) FutureNode { - return FutureNode{ +func newFutureNodeWithResult(res futureNodeResult) futureNode { + return futureNode{ res: &res, } } -func (fn *FutureNode) take(ctx context.Context) futureNodeResult { +func (fn *futureNode) take(ctx context.Context) futureNodeResult { if fn.res != nil { res := fn.res // free result @@ -415,19 +423,19 @@ func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool { // Errors and completion needs to be handled by the caller. // // snPath is the path within the current snapshot. -func (arch *Archiver) save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) { +func (arch *Archiver) save(ctx context.Context, snPath, target string, previous *restic.Node) (fn futureNode, excluded bool, err error) { start := time.Now() debug.Log("%v target %q, previous %v", snPath, target, previous) abstarget, err := arch.FS.Abs(target) if err != nil { - return FutureNode{}, false, err + return futureNode{}, false, err } // exclude files by path before running Lstat to reduce number of lstat calls if !arch.SelectByName(abstarget) { debug.Log("%v is excluded by path", target) - return FutureNode{}, true, nil + return futureNode{}, true, nil } // get file info and run remaining select functions that require file information @@ -436,13 +444,13 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous debug.Log("lstat() for %v returned error: %v", target, err) err = arch.error(abstarget, err) if err != nil { - return FutureNode{}, false, errors.WithStack(err) + return futureNode{}, false, errors.WithStack(err) } - return FutureNode{}, true, nil + return futureNode{}, true, nil } if !arch.Select(abstarget, fi) { debug.Log("%v is excluded", target) - return FutureNode{}, true, nil + return futureNode{}, true, nil } switch { @@ -458,7 +466,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous arch.CompleteBlob(previous.Size) node, err := arch.nodeFromFileInfo(snPath, target, fi, false) if err != nil { - return FutureNode{}, false, err + return futureNode{}, false, err } // copy list of blobs @@ -477,7 +485,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous err := errors.Errorf("parts of %v not found in the repository index; storing the file again", target) err = arch.error(abstarget, err) if err != nil { - return FutureNode{}, false, err + return futureNode{}, false, err } } @@ -488,9 +496,9 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous debug.Log("Openfile() for %v returned error: %v", target, err) err = arch.error(abstarget, err) if err != nil { - return FutureNode{}, false, errors.WithStack(err) + return futureNode{}, false, errors.WithStack(err) } - return FutureNode{}, true, nil + return futureNode{}, true, nil } fi, err = file.Stat() @@ -499,9 +507,9 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous _ = file.Close() err = arch.error(abstarget, err) if err != nil { - return FutureNode{}, false, errors.WithStack(err) + return futureNode{}, false, errors.WithStack(err) } - return FutureNode{}, true, nil + return futureNode{}, true, nil } // make sure it's still a file @@ -510,9 +518,9 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous _ = file.Close() err = arch.error(abstarget, err) if err != nil { - return FutureNode{}, false, err + return futureNode{}, false, err } - return FutureNode{}, true, nil + return futureNode{}, true, nil } // Save will close the file, we don't need to do that @@ -533,7 +541,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous err = arch.error(abstarget, err) } if err != nil { - return FutureNode{}, false, err + return futureNode{}, false, err } fn, err = arch.saveDir(ctx, snPath, target, fi, oldSubtree, @@ -542,19 +550,19 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous }) if err != nil { debug.Log("SaveDir for %v returned error: %v", snPath, err) - return FutureNode{}, false, err + return futureNode{}, false, err } case fi.Mode()&os.ModeSocket > 0: debug.Log(" %v is a socket, ignoring", target) - return FutureNode{}, true, nil + return futureNode{}, true, nil default: debug.Log(" %v other", target) node, err := arch.nodeFromFileInfo(snPath, target, fi, false) if err != nil { - return FutureNode{}, false, err + return futureNode{}, false, err } fn = newFutureNodeWithResult(futureNodeResult{ snPath: snPath, @@ -621,17 +629,17 @@ func (arch *Archiver) statDir(dir string) (os.FileInfo, error) { // saveTree stores a Tree in the repo, returned is the tree. snPath is the path // within the current snapshot. -func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree, complete CompleteFunc) (FutureNode, int, error) { +func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree, previous *restic.Tree, complete fileCompleteFunc) (futureNode, int, error) { var node *restic.Node if snPath != "/" { if atree.FileInfoPath == "" { - return FutureNode{}, 0, errors.Errorf("FileInfoPath for %v is empty", snPath) + return futureNode{}, 0, errors.Errorf("FileInfoPath for %v is empty", snPath) } fi, err := arch.statDir(atree.FileInfoPath) if err != nil { - return FutureNode{}, 0, err + return futureNode{}, 0, err } debug.Log("%v, dir node data loaded from %v", snPath, atree.FileInfoPath) @@ -639,7 +647,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, // thus ignore errors for such folders. node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi, true) if err != nil { - return FutureNode{}, 0, err + return futureNode{}, 0, err } } else { // fake root node @@ -648,7 +656,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, debug.Log("%v (%v nodes), parent %v", snPath, len(atree.Nodes), previous) nodeNames := atree.NodeNames() - nodes := make([]FutureNode, 0, len(nodeNames)) + nodes := make([]futureNode, 0, len(nodeNames)) // iterate over the nodes of atree in lexicographic (=deterministic) order for _, name := range nodeNames { @@ -656,7 +664,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, // test if context has been cancelled if ctx.Err() != nil { - return FutureNode{}, 0, ctx.Err() + return futureNode{}, 0, ctx.Err() } // this is a leaf node @@ -669,11 +677,11 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, // ignore error continue } - return FutureNode{}, 0, err + return futureNode{}, 0, err } if err != nil { - return FutureNode{}, 0, err + return futureNode{}, 0, err } if !excluded { @@ -691,7 +699,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, err = arch.error(join(snPath, name), err) } if err != nil { - return FutureNode{}, 0, err + return futureNode{}, 0, err } // not a leaf node, archive subtree @@ -699,7 +707,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, arch.trackItem(snItem, oldNode, n, is, time.Since(start)) }) if err != nil { - return FutureNode{}, 0, err + return futureNode{}, 0, err } nodes = append(nodes, fn) } @@ -779,16 +787,16 @@ func (arch *Archiver) loadParentTree(ctx context.Context, sn *restic.Snapshot) * // runWorkers starts the worker pools, which are stopped when the context is cancelled. func (arch *Archiver) runWorkers(ctx context.Context, wg *errgroup.Group) { - arch.blobSaver = NewBlobSaver(ctx, wg, arch.Repo, arch.Options.SaveBlobConcurrency) + arch.blobSaver = newBlobSaver(ctx, wg, arch.Repo, arch.Options.SaveBlobConcurrency) - arch.fileSaver = NewFileSaver(ctx, wg, + arch.fileSaver = newFileSaver(ctx, wg, arch.blobSaver.Save, arch.Repo.Config().ChunkerPolynomial, arch.Options.ReadConcurrency, arch.Options.SaveBlobConcurrency) arch.fileSaver.CompleteBlob = arch.CompleteBlob arch.fileSaver.NodeFromFileInfo = arch.nodeFromFileInfo - arch.treeSaver = NewTreeSaver(ctx, wg, arch.Options.SaveTreeConcurrency, arch.blobSaver.Save, arch.Error) + arch.treeSaver = newTreeSaver(ctx, wg, arch.Options.SaveTreeConcurrency, arch.blobSaver.Save, arch.Error) } func (arch *Archiver) stopWorkers() { @@ -809,7 +817,7 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps return nil, restic.ID{}, nil, err } - atree, err := NewTree(arch.FS, cleanTargets) + atree, err := newTree(arch.FS, cleanTargets) if err != nil { return nil, restic.ID{}, nil, err } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index d67b5b06aa1..74fecef8067 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1121,7 +1121,7 @@ func TestArchiverSaveTree(t *testing.T) { test.prepare(t) } - atree, err := NewTree(testFS, test.targets) + atree, err := newTree(testFS, test.targets) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/blob_saver.go b/internal/archiver/blob_saver.go index d4347a16918..356a32ce290 100644 --- a/internal/archiver/blob_saver.go +++ b/internal/archiver/blob_saver.go @@ -9,22 +9,22 @@ import ( "golang.org/x/sync/errgroup" ) -// Saver allows saving a blob. -type Saver interface { +// saver allows saving a blob. +type saver interface { SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) } -// BlobSaver concurrently saves incoming blobs to the repo. -type BlobSaver struct { - repo Saver +// blobSaver concurrently saves incoming blobs to the repo. +type blobSaver struct { + repo saver ch chan<- saveBlobJob } -// NewBlobSaver returns a new blob. A worker pool is started, it is stopped +// newBlobSaver returns a new blob. A worker pool is started, it is stopped // when ctx is cancelled. -func NewBlobSaver(ctx context.Context, wg *errgroup.Group, repo Saver, workers uint) *BlobSaver { +func newBlobSaver(ctx context.Context, wg *errgroup.Group, repo saver, workers uint) *blobSaver { ch := make(chan saveBlobJob) - s := &BlobSaver{ + s := &blobSaver{ repo: repo, ch: ch, } @@ -38,13 +38,13 @@ func NewBlobSaver(ctx context.Context, wg *errgroup.Group, repo Saver, workers u return s } -func (s *BlobSaver) TriggerShutdown() { +func (s *blobSaver) TriggerShutdown() { close(s.ch) } // Save stores a blob in the repo. It checks the index and the known blobs // before saving anything. It takes ownership of the buffer passed in. -func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf *Buffer, filename string, cb func(res SaveBlobResponse)) { +func (s *blobSaver) Save(ctx context.Context, t restic.BlobType, buf *buffer, filename string, cb func(res saveBlobResponse)) { select { case s.ch <- saveBlobJob{BlobType: t, buf: buf, fn: filename, cb: cb}: case <-ctx.Done(): @@ -54,26 +54,26 @@ func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf *Buffer, fi type saveBlobJob struct { restic.BlobType - buf *Buffer + buf *buffer fn string - cb func(res SaveBlobResponse) + cb func(res saveBlobResponse) } -type SaveBlobResponse struct { +type saveBlobResponse struct { id restic.ID length int sizeInRepo int known bool } -func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) (SaveBlobResponse, error) { +func (s *blobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) (saveBlobResponse, error) { id, known, sizeInRepo, err := s.repo.SaveBlob(ctx, t, buf, restic.ID{}, false) if err != nil { - return SaveBlobResponse{}, err + return saveBlobResponse{}, err } - return SaveBlobResponse{ + return saveBlobResponse{ id: id, length: len(buf), sizeInRepo: sizeInRepo, @@ -81,7 +81,7 @@ func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) }, nil } -func (s *BlobSaver) worker(ctx context.Context, jobs <-chan saveBlobJob) error { +func (s *blobSaver) worker(ctx context.Context, jobs <-chan saveBlobJob) error { for { var job saveBlobJob var ok bool diff --git a/internal/archiver/blob_saver_test.go b/internal/archiver/blob_saver_test.go index f7ef2f47dfe..e23ed12e5e0 100644 --- a/internal/archiver/blob_saver_test.go +++ b/internal/archiver/blob_saver_test.go @@ -38,20 +38,20 @@ func TestBlobSaver(t *testing.T) { wg, ctx := errgroup.WithContext(ctx) saver := &saveFail{} - b := NewBlobSaver(ctx, wg, saver, uint(runtime.NumCPU())) + b := newBlobSaver(ctx, wg, saver, uint(runtime.NumCPU())) var wait sync.WaitGroup - var results []SaveBlobResponse + var results []saveBlobResponse var lock sync.Mutex wait.Add(20) for i := 0; i < 20; i++ { - buf := &Buffer{Data: []byte(fmt.Sprintf("foo%d", i))} + buf := &buffer{Data: []byte(fmt.Sprintf("foo%d", i))} idx := i lock.Lock() - results = append(results, SaveBlobResponse{}) + results = append(results, saveBlobResponse{}) lock.Unlock() - b.Save(ctx, restic.DataBlob, buf, "file", func(res SaveBlobResponse) { + b.Save(ctx, restic.DataBlob, buf, "file", func(res saveBlobResponse) { lock.Lock() results[idx] = res lock.Unlock() @@ -95,11 +95,11 @@ func TestBlobSaverError(t *testing.T) { failAt: int32(test.failAt), } - b := NewBlobSaver(ctx, wg, saver, uint(runtime.NumCPU())) + b := newBlobSaver(ctx, wg, saver, uint(runtime.NumCPU())) for i := 0; i < test.blobs; i++ { - buf := &Buffer{Data: []byte(fmt.Sprintf("foo%d", i))} - b.Save(ctx, restic.DataBlob, buf, "errfile", func(res SaveBlobResponse) {}) + buf := &buffer{Data: []byte(fmt.Sprintf("foo%d", i))} + b.Save(ctx, restic.DataBlob, buf, "errfile", func(res saveBlobResponse) {}) } b.TriggerShutdown() diff --git a/internal/archiver/buffer.go b/internal/archiver/buffer.go index 39bda26682d..d5bfb46b393 100644 --- a/internal/archiver/buffer.go +++ b/internal/archiver/buffer.go @@ -1,14 +1,14 @@ package archiver -// Buffer is a reusable buffer. After the buffer has been used, Release should +// buffer is a reusable buffer. After the buffer has been used, Release should // be called so the underlying slice is put back into the pool. -type Buffer struct { +type buffer struct { Data []byte - pool *BufferPool + pool *bufferPool } // Release puts the buffer back into the pool it came from. -func (b *Buffer) Release() { +func (b *buffer) Release() { pool := b.pool if pool == nil || cap(b.Data) > pool.defaultSize { return @@ -20,32 +20,32 @@ func (b *Buffer) Release() { } } -// BufferPool implements a limited set of reusable buffers. -type BufferPool struct { - ch chan *Buffer +// bufferPool implements a limited set of reusable buffers. +type bufferPool struct { + ch chan *buffer defaultSize int } -// NewBufferPool initializes a new buffer pool. The pool stores at most max +// newBufferPool initializes a new buffer pool. The pool stores at most max // items. New buffers are created with defaultSize. Buffers that have grown // larger are not put back. -func NewBufferPool(max int, defaultSize int) *BufferPool { - b := &BufferPool{ - ch: make(chan *Buffer, max), +func newBufferPool(max int, defaultSize int) *bufferPool { + b := &bufferPool{ + ch: make(chan *buffer, max), defaultSize: defaultSize, } return b } // Get returns a new buffer, either from the pool or newly allocated. -func (pool *BufferPool) Get() *Buffer { +func (pool *bufferPool) Get() *buffer { select { case buf := <-pool.ch: return buf default: } - b := &Buffer{ + b := &buffer{ Data: make([]byte, pool.defaultSize), pool: pool, } diff --git a/internal/archiver/doc.go b/internal/archiver/doc.go index 928145aa203..1b9603975cf 100644 --- a/internal/archiver/doc.go +++ b/internal/archiver/doc.go @@ -1,12 +1,3 @@ // Package archiver contains the code which reads files, splits them into // chunks and saves the data to the repository. -// -// An Archiver has a number of worker goroutines handling saving the different -// data structures to the repository, the details are implemented by the -// FileSaver, BlobSaver, and TreeSaver types. -// -// The main goroutine (the one calling Snapshot()) traverses the directory tree -// and delegates all work to these worker pools. They return a type -// (FutureFile, FutureBlob, and FutureTree) which can be resolved later, by -// calling Wait() on it. package archiver diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index d10334301ac..fa19cab861a 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -15,13 +15,13 @@ import ( "golang.org/x/sync/errgroup" ) -// SaveBlobFn saves a blob to a repo. -type SaveBlobFn func(context.Context, restic.BlobType, *Buffer, string, func(res SaveBlobResponse)) +// saveBlobFn saves a blob to a repo. +type saveBlobFn func(context.Context, restic.BlobType, *buffer, string, func(res saveBlobResponse)) -// FileSaver concurrently saves incoming files to the repo. -type FileSaver struct { - saveFilePool *BufferPool - saveBlob SaveBlobFn +// fileSaver concurrently saves incoming files to the repo. +type fileSaver struct { + saveFilePool *bufferPool + saveBlob saveBlobFn pol chunker.Pol @@ -32,18 +32,18 @@ type FileSaver struct { NodeFromFileInfo func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) } -// NewFileSaver returns a new file saver. A worker pool with fileWorkers is +// newFileSaver returns a new file saver. A worker pool with fileWorkers is // started, it is stopped when ctx is cancelled. -func NewFileSaver(ctx context.Context, wg *errgroup.Group, save SaveBlobFn, pol chunker.Pol, fileWorkers, blobWorkers uint) *FileSaver { +func newFileSaver(ctx context.Context, wg *errgroup.Group, save saveBlobFn, pol chunker.Pol, fileWorkers, blobWorkers uint) *fileSaver { ch := make(chan saveFileJob) debug.Log("new file saver with %v file workers and %v blob workers", fileWorkers, blobWorkers) poolSize := fileWorkers + blobWorkers - s := &FileSaver{ + s := &fileSaver{ saveBlob: save, - saveFilePool: NewBufferPool(int(poolSize), chunker.MaxSize), + saveFilePool: newBufferPool(int(poolSize), chunker.MaxSize), pol: pol, ch: ch, @@ -60,18 +60,18 @@ func NewFileSaver(ctx context.Context, wg *errgroup.Group, save SaveBlobFn, pol return s } -func (s *FileSaver) TriggerShutdown() { +func (s *fileSaver) TriggerShutdown() { close(s.ch) } -// CompleteFunc is called when the file has been saved. -type CompleteFunc func(*restic.Node, ItemStats) +// fileCompleteFunc is called when the file has been saved. +type fileCompleteFunc func(*restic.Node, ItemStats) // Save stores the file f and returns the data once it has been completed. The // file is closed by Save. completeReading is only called if the file was read // successfully. complete is always called. If completeReading is called, then // this will always happen before calling complete. -func (s *FileSaver) Save(ctx context.Context, snPath string, target string, file fs.File, fi os.FileInfo, start func(), completeReading func(), complete CompleteFunc) FutureNode { +func (s *fileSaver) Save(ctx context.Context, snPath string, target string, file fs.File, fi os.FileInfo, start func(), completeReading func(), complete fileCompleteFunc) futureNode { fn, ch := newFutureNode() job := saveFileJob{ snPath: snPath, @@ -105,11 +105,11 @@ type saveFileJob struct { start func() completeReading func() - complete CompleteFunc + complete fileCompleteFunc } // saveFile stores the file f in the repo, then closes it. -func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, target string, f fs.File, fi os.FileInfo, start func(), finishReading func(), finish func(res futureNodeResult)) { +func (s *fileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, target string, f fs.File, fi os.FileInfo, start func(), finishReading func(), finish func(res futureNodeResult)) { start() fnr := futureNodeResult{ @@ -205,7 +205,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat node.Content = append(node.Content, restic.ID{}) lock.Unlock() - s.saveBlob(ctx, restic.DataBlob, buf, target, func(sbr SaveBlobResponse) { + s.saveBlob(ctx, restic.DataBlob, buf, target, func(sbr saveBlobResponse) { lock.Lock() if !sbr.known { fnr.stats.DataBlobs++ @@ -246,7 +246,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat completeBlob() } -func (s *FileSaver) worker(ctx context.Context, jobs <-chan saveFileJob) { +func (s *fileSaver) worker(ctx context.Context, jobs <-chan saveFileJob) { // a worker has one chunker which is reused for each file (because it contains a rather large buffer) chnker := chunker.New(nil, s.pol) diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go index 948d7ce3ccf..ede616e28d3 100644 --- a/internal/archiver/file_saver_test.go +++ b/internal/archiver/file_saver_test.go @@ -30,11 +30,11 @@ func createTestFiles(t testing.TB, num int) (files []string) { return files } -func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Context, *errgroup.Group) { +func startFileSaver(ctx context.Context, t testing.TB) (*fileSaver, context.Context, *errgroup.Group) { wg, ctx := errgroup.WithContext(ctx) - saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *Buffer, _ string, cb func(SaveBlobResponse)) { - cb(SaveBlobResponse{ + saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *buffer, _ string, cb func(saveBlobResponse)) { + cb(saveBlobResponse{ id: restic.Hash(buf.Data), length: len(buf.Data), sizeInRepo: len(buf.Data), @@ -48,7 +48,7 @@ func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Cont t.Fatal(err) } - s := NewFileSaver(ctx, wg, saveBlob, pol, workers, workers) + s := newFileSaver(ctx, wg, saveBlob, pol, workers, workers) s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { return fs.NodeFromFileInfo(filename, fi, ignoreXattrListError) } @@ -69,7 +69,7 @@ func TestFileSaver(t *testing.T) { testFs := fs.Local{} s, ctx, wg := startFileSaver(ctx, t) - var results []FutureNode + var results []futureNode for _, filename := range files { f, err := testFs.OpenFile(filename, os.O_RDONLY, 0) diff --git a/internal/archiver/scanner.go b/internal/archiver/scanner.go index d61e5ce4785..433d388212e 100644 --- a/internal/archiver/scanner.go +++ b/internal/archiver/scanner.go @@ -38,7 +38,7 @@ type ScanStats struct { Bytes uint64 } -func (s *Scanner) scanTree(ctx context.Context, stats ScanStats, tree Tree) (ScanStats, error) { +func (s *Scanner) scanTree(ctx context.Context, stats ScanStats, tree tree) (ScanStats, error) { // traverse the path in the file system for all leaf nodes if tree.Leaf() { abstarget, err := s.FS.Abs(tree.Path) @@ -83,7 +83,7 @@ func (s *Scanner) Scan(ctx context.Context, targets []string) error { debug.Log("clean targets %v", cleanTargets) // we're using the same tree representation as the archiver does - tree, err := NewTree(s.FS, cleanTargets) + tree, err := newTree(s.FS, cleanTargets) if err != nil { return err } diff --git a/internal/archiver/tree.go b/internal/archiver/tree.go index cd03ba52172..f4eb1abde62 100644 --- a/internal/archiver/tree.go +++ b/internal/archiver/tree.go @@ -9,7 +9,7 @@ import ( "github.com/restic/restic/internal/fs" ) -// Tree recursively defines how a snapshot should look like when +// tree recursively defines how a snapshot should look like when // archived. // // When `Path` is set, this is a leaf node and the contents of `Path` should be @@ -20,8 +20,8 @@ import ( // // `FileInfoPath` is used to extract metadata for intermediate (=non-leaf) // trees. -type Tree struct { - Nodes map[string]Tree +type tree struct { + Nodes map[string]tree Path string // where the files/dirs to be saved are found FileInfoPath string // where the dir can be found that is not included itself, but its subdirs Root string // parent directory of the tree @@ -95,13 +95,13 @@ func rootDirectory(fs fs.FS, target string) string { } // Add adds a new file or directory to the tree. -func (t *Tree) Add(fs fs.FS, path string) error { +func (t *tree) Add(fs fs.FS, path string) error { if path == "" { panic("invalid path (empty string)") } if t.Nodes == nil { - t.Nodes = make(map[string]Tree) + t.Nodes = make(map[string]tree) } pc, virtualPrefix := pathComponents(fs, path, false) @@ -111,7 +111,7 @@ func (t *Tree) Add(fs fs.FS, path string) error { name := pc[0] root := rootDirectory(fs, path) - tree := Tree{Root: root} + tree := tree{Root: root} origName := name i := 0 @@ -152,63 +152,63 @@ func (t *Tree) Add(fs fs.FS, path string) error { } // add adds a new target path into the tree. -func (t *Tree) add(fs fs.FS, target, root string, pc []string) error { +func (t *tree) add(fs fs.FS, target, root string, pc []string) error { if len(pc) == 0 { return errors.Errorf("invalid path %q", target) } if t.Nodes == nil { - t.Nodes = make(map[string]Tree) + t.Nodes = make(map[string]tree) } name := pc[0] if len(pc) == 1 { - tree, ok := t.Nodes[name] + node, ok := t.Nodes[name] if !ok { - t.Nodes[name] = Tree{Path: target} + t.Nodes[name] = tree{Path: target} return nil } - if tree.Path != "" { + if node.Path != "" { return errors.Errorf("path is already set for target %v", target) } - tree.Path = target - t.Nodes[name] = tree + node.Path = target + t.Nodes[name] = node return nil } - tree := Tree{} + node := tree{} if other, ok := t.Nodes[name]; ok { - tree = other + node = other } subroot := fs.Join(root, name) - tree.FileInfoPath = subroot + node.FileInfoPath = subroot - err := tree.add(fs, target, subroot, pc[1:]) + err := node.add(fs, target, subroot, pc[1:]) if err != nil { return err } - t.Nodes[name] = tree + t.Nodes[name] = node return nil } -func (t Tree) String() string { +func (t tree) String() string { return formatTree(t, "") } // Leaf returns true if this is a leaf node, which means Path is set to a // non-empty string and the contents of Path should be inserted at this point // in the tree. -func (t Tree) Leaf() bool { +func (t tree) Leaf() bool { return t.Path != "" } // NodeNames returns the sorted list of subtree names. -func (t Tree) NodeNames() []string { +func (t tree) NodeNames() []string { // iterate over the nodes of atree in lexicographic (=deterministic) order names := make([]string, 0, len(t.Nodes)) for name := range t.Nodes { @@ -219,7 +219,7 @@ func (t Tree) NodeNames() []string { } // formatTree returns a text representation of the tree t. -func formatTree(t Tree, indent string) (s string) { +func formatTree(t tree, indent string) (s string) { for name, node := range t.Nodes { s += fmt.Sprintf("%v/%v, root %q, path %q, meta %q\n", indent, name, node.Root, node.Path, node.FileInfoPath) s += formatTree(node, indent+" ") @@ -228,7 +228,7 @@ func formatTree(t Tree, indent string) (s string) { } // unrollTree unrolls the tree so that only leaf nodes have Path set. -func unrollTree(f fs.FS, t *Tree) error { +func unrollTree(f fs.FS, t *tree) error { // if the current tree is a leaf node (Path is set) and has additional // nodes, add the contents of Path to the nodes. if t.Path != "" && len(t.Nodes) > 0 { @@ -252,7 +252,7 @@ func unrollTree(f fs.FS, t *Tree) error { return errors.Errorf("tree unrollTree: collision on path, node %#v, path %q", node, f.Join(t.Path, entry)) } - t.Nodes[entry] = Tree{Path: f.Join(t.Path, entry)} + t.Nodes[entry] = tree{Path: f.Join(t.Path, entry)} } t.Path = "" } @@ -269,10 +269,10 @@ func unrollTree(f fs.FS, t *Tree) error { return nil } -// NewTree creates a Tree from the target files/directories. -func NewTree(fs fs.FS, targets []string) (*Tree, error) { +// newTree creates a Tree from the target files/directories. +func newTree(fs fs.FS, targets []string) (*tree, error) { debug.Log("targets: %v", targets) - tree := &Tree{} + tree := &tree{} seen := make(map[string]struct{}) for _, target := range targets { target = fs.Clean(target) diff --git a/internal/archiver/tree_saver.go b/internal/archiver/tree_saver.go index 9c11b48f060..aeedefef5ae 100644 --- a/internal/archiver/tree_saver.go +++ b/internal/archiver/tree_saver.go @@ -9,20 +9,20 @@ import ( "golang.org/x/sync/errgroup" ) -// TreeSaver concurrently saves incoming trees to the repo. -type TreeSaver struct { - saveBlob SaveBlobFn +// treeSaver concurrently saves incoming trees to the repo. +type treeSaver struct { + saveBlob saveBlobFn errFn ErrorFunc ch chan<- saveTreeJob } -// NewTreeSaver returns a new tree saver. A worker pool with treeWorkers is +// newTreeSaver returns a new tree saver. A worker pool with treeWorkers is // started, it is stopped when ctx is cancelled. -func NewTreeSaver(ctx context.Context, wg *errgroup.Group, treeWorkers uint, saveBlob SaveBlobFn, errFn ErrorFunc) *TreeSaver { +func newTreeSaver(ctx context.Context, wg *errgroup.Group, treeWorkers uint, saveBlob saveBlobFn, errFn ErrorFunc) *treeSaver { ch := make(chan saveTreeJob) - s := &TreeSaver{ + s := &treeSaver{ ch: ch, saveBlob: saveBlob, errFn: errFn, @@ -37,12 +37,12 @@ func NewTreeSaver(ctx context.Context, wg *errgroup.Group, treeWorkers uint, sav return s } -func (s *TreeSaver) TriggerShutdown() { +func (s *treeSaver) TriggerShutdown() { close(s.ch) } // Save stores the dir d and returns the data once it has been completed. -func (s *TreeSaver) Save(ctx context.Context, snPath string, target string, node *restic.Node, nodes []FutureNode, complete CompleteFunc) FutureNode { +func (s *treeSaver) Save(ctx context.Context, snPath string, target string, node *restic.Node, nodes []futureNode, complete fileCompleteFunc) futureNode { fn, ch := newFutureNode() job := saveTreeJob{ snPath: snPath, @@ -66,13 +66,13 @@ type saveTreeJob struct { snPath string target string node *restic.Node - nodes []FutureNode + nodes []futureNode ch chan<- futureNodeResult - complete CompleteFunc + complete fileCompleteFunc } // save stores the nodes as a tree in the repo. -func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, ItemStats, error) { +func (s *treeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, ItemStats, error) { var stats ItemStats node := job.node nodes := job.nodes @@ -84,7 +84,7 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I for i, fn := range nodes { // fn is a copy, so clear the original value explicitly - nodes[i] = FutureNode{} + nodes[i] = futureNode{} fnr := fn.take(ctx) // return the error if it wasn't ignored @@ -128,9 +128,9 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I return nil, stats, err } - b := &Buffer{Data: buf} - ch := make(chan SaveBlobResponse, 1) - s.saveBlob(ctx, restic.TreeBlob, b, job.target, func(res SaveBlobResponse) { + b := &buffer{Data: buf} + ch := make(chan saveBlobResponse, 1) + s.saveBlob(ctx, restic.TreeBlob, b, job.target, func(res saveBlobResponse) { ch <- res }) @@ -149,7 +149,7 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I } } -func (s *TreeSaver) worker(ctx context.Context, jobs <-chan saveTreeJob) error { +func (s *treeSaver) worker(ctx context.Context, jobs <-chan saveTreeJob) error { for { var job saveTreeJob var ok bool diff --git a/internal/archiver/tree_saver_test.go b/internal/archiver/tree_saver_test.go index 47a3f3842c9..4aa4c51f165 100644 --- a/internal/archiver/tree_saver_test.go +++ b/internal/archiver/tree_saver_test.go @@ -12,8 +12,8 @@ import ( "golang.org/x/sync/errgroup" ) -func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *Buffer, _ string, cb func(res SaveBlobResponse)) { - cb(SaveBlobResponse{ +func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *buffer, _ string, cb func(res saveBlobResponse)) { + cb(saveBlobResponse{ id: restic.NewRandomID(), known: false, length: len(buf.Data), @@ -21,7 +21,7 @@ func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *Buffer, _ string, }) } -func setupTreeSaver() (context.Context, context.CancelFunc, *TreeSaver, func() error) { +func setupTreeSaver() (context.Context, context.CancelFunc, *treeSaver, func() error) { ctx, cancel := context.WithCancel(context.Background()) wg, ctx := errgroup.WithContext(ctx) @@ -29,7 +29,7 @@ func setupTreeSaver() (context.Context, context.CancelFunc, *TreeSaver, func() e return err } - b := NewTreeSaver(ctx, wg, uint(runtime.NumCPU()), treeSaveHelper, errFn) + b := newTreeSaver(ctx, wg, uint(runtime.NumCPU()), treeSaveHelper, errFn) shutdown := func() error { b.TriggerShutdown() @@ -43,7 +43,7 @@ func TestTreeSaver(t *testing.T) { ctx, cancel, b, shutdown := setupTreeSaver() defer cancel() - var results []FutureNode + var results []futureNode for i := 0; i < 20; i++ { node := &restic.Node{ @@ -83,13 +83,13 @@ func TestTreeSaverError(t *testing.T) { ctx, cancel, b, shutdown := setupTreeSaver() defer cancel() - var results []FutureNode + var results []futureNode for i := 0; i < test.trees; i++ { node := &restic.Node{ Name: fmt.Sprintf("file-%d", i), } - nodes := []FutureNode{ + nodes := []futureNode{ newFutureNodeWithResult(futureNodeResult{node: &restic.Node{ Name: fmt.Sprintf("child-%d", i), }}), @@ -128,7 +128,7 @@ func TestTreeSaverDuplicates(t *testing.T) { node := &restic.Node{ Name: "file", } - nodes := []FutureNode{ + nodes := []futureNode{ newFutureNodeWithResult(futureNodeResult{node: &restic.Node{ Name: "child", }}), diff --git a/internal/archiver/tree_test.go b/internal/archiver/tree_test.go index a9d2d97ff23..c9fe776b13e 100644 --- a/internal/archiver/tree_test.go +++ b/internal/archiver/tree_test.go @@ -12,7 +12,7 @@ import ( ) // debug.Log requires Tree.String. -var _ fmt.Stringer = Tree{} +var _ fmt.Stringer = tree{} func TestPathComponents(t *testing.T) { var tests = []struct { @@ -142,20 +142,20 @@ func TestTree(t *testing.T) { var tests = []struct { targets []string src TestDir - want Tree + want tree unix bool win bool mustError bool }{ { targets: []string{"foo"}, - want: Tree{Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ "foo": {Path: "foo", Root: "."}, }}, }, { targets: []string{"foo", "bar", "baz"}, - want: Tree{Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ "foo": {Path: "foo", Root: "."}, "bar": {Path: "bar", Root: "."}, "baz": {Path: "baz", Root: "."}, @@ -163,8 +163,8 @@ func TestTree(t *testing.T) { }, { targets: []string{"foo/user1", "foo/user2", "foo/other"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/user1")}, "user2": {Path: filepath.FromSlash("foo/user2")}, "other": {Path: filepath.FromSlash("foo/other")}, @@ -173,9 +173,9 @@ func TestTree(t *testing.T) { }, { targets: []string{"foo/work/user1", "foo/work/user2"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ - "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ + "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/work/user1")}, "user2": {Path: filepath.FromSlash("foo/work/user2")}, }}, @@ -184,50 +184,50 @@ func TestTree(t *testing.T) { }, { targets: []string{"foo/user1", "bar/user1", "foo/other"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/user1")}, "other": {Path: filepath.FromSlash("foo/other")}, }}, - "bar": {Root: ".", FileInfoPath: "bar", Nodes: map[string]Tree{ + "bar": {Root: ".", FileInfoPath: "bar", Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("bar/user1")}, }}, }}, }, { targets: []string{"../work"}, - want: Tree{Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ "work": {Root: "..", Path: filepath.FromSlash("../work")}, }}, }, { targets: []string{"../work/other"}, - want: Tree{Nodes: map[string]Tree{ - "work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]tree{ "other": {Path: filepath.FromSlash("../work/other")}, }}, }}, }, { targets: []string{"foo/user1", "../work/other", "foo/user2"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/user1")}, "user2": {Path: filepath.FromSlash("foo/user2")}, }}, - "work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{ + "work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]tree{ "other": {Path: filepath.FromSlash("../work/other")}, }}, }}, }, { targets: []string{"foo/user1", "../foo/other", "foo/user2"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/user1")}, "user2": {Path: filepath.FromSlash("foo/user2")}, }}, - "foo-1": {Root: "..", FileInfoPath: filepath.FromSlash("../foo"), Nodes: map[string]Tree{ + "foo-1": {Root: "..", FileInfoPath: filepath.FromSlash("../foo"), Nodes: map[string]tree{ "other": {Path: filepath.FromSlash("../foo/other")}, }}, }}, @@ -240,11 +240,11 @@ func TestTree(t *testing.T) { }, }, targets: []string{"foo", "foo/work"}, - want: Tree{Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ "foo": { Root: ".", FileInfoPath: "foo", - Nodes: map[string]Tree{ + Nodes: map[string]tree{ "file": {Path: filepath.FromSlash("foo/file")}, "work": {Path: filepath.FromSlash("foo/work")}, }, @@ -261,11 +261,11 @@ func TestTree(t *testing.T) { }, }, targets: []string{"foo/work", "foo"}, - want: Tree{Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ "foo": { Root: ".", FileInfoPath: "foo", - Nodes: map[string]Tree{ + Nodes: map[string]tree{ "file": {Path: filepath.FromSlash("foo/file")}, "work": {Path: filepath.FromSlash("foo/work")}, }, @@ -282,11 +282,11 @@ func TestTree(t *testing.T) { }, }, targets: []string{"foo/work", "foo/work/user2"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "work": { FileInfoPath: filepath.FromSlash("foo/work"), - Nodes: map[string]Tree{ + Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/work/user1")}, "user2": {Path: filepath.FromSlash("foo/work/user2")}, }, @@ -304,10 +304,10 @@ func TestTree(t *testing.T) { }, }, targets: []string{"foo/work/user2", "foo/work"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "work": {FileInfoPath: filepath.FromSlash("foo/work"), - Nodes: map[string]Tree{ + Nodes: map[string]tree{ "user1": {Path: filepath.FromSlash("foo/work/user1")}, "user2": {Path: filepath.FromSlash("foo/work/user2")}, }, @@ -332,12 +332,12 @@ func TestTree(t *testing.T) { }, }, targets: []string{"foo/work/user2/data/secret", "foo"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ "other": {Path: filepath.FromSlash("foo/other")}, - "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ - "user2": {FileInfoPath: filepath.FromSlash("foo/work/user2"), Nodes: map[string]Tree{ - "data": {FileInfoPath: filepath.FromSlash("foo/work/user2/data"), Nodes: map[string]Tree{ + "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{ + "user2": {FileInfoPath: filepath.FromSlash("foo/work/user2"), Nodes: map[string]tree{ + "data": {FileInfoPath: filepath.FromSlash("foo/work/user2/data"), Nodes: map[string]tree{ "secret": { Path: filepath.FromSlash("foo/work/user2/data/secret"), }, @@ -368,10 +368,10 @@ func TestTree(t *testing.T) { }, unix: true, targets: []string{"mnt/driveA", "mnt/driveA/work/driveB"}, - want: Tree{Nodes: map[string]Tree{ - "mnt": {Root: ".", FileInfoPath: filepath.FromSlash("mnt"), Nodes: map[string]Tree{ - "driveA": {FileInfoPath: filepath.FromSlash("mnt/driveA"), Nodes: map[string]Tree{ - "work": {FileInfoPath: filepath.FromSlash("mnt/driveA/work"), Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "mnt": {Root: ".", FileInfoPath: filepath.FromSlash("mnt"), Nodes: map[string]tree{ + "driveA": {FileInfoPath: filepath.FromSlash("mnt/driveA"), Nodes: map[string]tree{ + "work": {FileInfoPath: filepath.FromSlash("mnt/driveA/work"), Nodes: map[string]tree{ "driveB": { Path: filepath.FromSlash("mnt/driveA/work/driveB"), }, @@ -384,9 +384,9 @@ func TestTree(t *testing.T) { }, { targets: []string{"foo/work/user", "foo/work/user"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ - "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ + "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{ "user": {Path: filepath.FromSlash("foo/work/user")}, }}, }}, @@ -394,9 +394,9 @@ func TestTree(t *testing.T) { }, { targets: []string{"./foo/work/user", "foo/work/user"}, - want: Tree{Nodes: map[string]Tree{ - "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ - "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{ + "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{ "user": {Path: filepath.FromSlash("foo/work/user")}, }}, }}, @@ -405,10 +405,10 @@ func TestTree(t *testing.T) { { win: true, targets: []string{`c:\users\foobar\temp`}, - want: Tree{Nodes: map[string]Tree{ - "c": {Root: `c:\`, FileInfoPath: `c:\`, Nodes: map[string]Tree{ - "users": {FileInfoPath: `c:\users`, Nodes: map[string]Tree{ - "foobar": {FileInfoPath: `c:\users\foobar`, Nodes: map[string]Tree{ + want: tree{Nodes: map[string]tree{ + "c": {Root: `c:\`, FileInfoPath: `c:\`, Nodes: map[string]tree{ + "users": {FileInfoPath: `c:\users`, Nodes: map[string]tree{ + "foobar": {FileInfoPath: `c:\users\foobar`, Nodes: map[string]tree{ "temp": {Path: `c:\users\foobar\temp`}, }}, }}, @@ -445,7 +445,7 @@ func TestTree(t *testing.T) { back := rtest.Chdir(t, tempdir) defer back() - tree, err := NewTree(fs.Local{}, test.targets) + tree, err := newTree(fs.Local{}, test.targets) if test.mustError { if err == nil { t.Fatal("expected error, got nil") From f9dbcd25319a742e3036a447c92618b61de9a178 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 12:07:26 +0200 Subject: [PATCH 644/893] backup: convert reject funcs to use FS interface Depending on parameters the paths in a snapshot do not directly correspond to real paths on the filesystem. Therefore, reject funcs must use the FS interface to work correctly. --- cmd/restic/cmd_backup.go | 66 +++++++++++++++--------------- cmd/restic/exclude.go | 50 +++++++++++----------- cmd/restic/exclude_test.go | 11 ++--- internal/archiver/archiver.go | 10 ++--- internal/archiver/archiver_test.go | 14 +++---- internal/archiver/scanner.go | 8 ++-- internal/archiver/scanner_test.go | 2 +- internal/fs/fs_local.go | 6 +++ internal/fs/fs_reader.go | 4 ++ internal/fs/interface.go | 1 + 10 files changed, 91 insertions(+), 81 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 562108a3370..1fdec081bd2 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -314,32 +314,19 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) ( } fs = append(fs, fsPatterns...) - if opts.ExcludeCaches { - opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55") - } - - for _, spec := range opts.ExcludeIfPresent { - f, err := rejectIfPresent(spec) - if err != nil { - return nil, err - } - - fs = append(fs, f) - } - return fs, nil } // collectRejectFuncs returns a list of all functions which may reject data // from being saved in a snapshot based on path and file info -func collectRejectFuncs(opts BackupOptions, targets []string) (fs []RejectFunc, err error) { +func collectRejectFuncs(opts BackupOptions, targets []string, fs fs.FS) (funcs []RejectFunc, err error) { // allowed devices if opts.ExcludeOtherFS && !opts.Stdin { - f, err := rejectByDevice(targets) + f, err := rejectByDevice(targets, fs) if err != nil { return nil, err } - fs = append(fs, f) + funcs = append(funcs, f) } if len(opts.ExcludeLargerThan) != 0 && !opts.Stdin { @@ -347,10 +334,23 @@ func collectRejectFuncs(opts BackupOptions, targets []string) (fs []RejectFunc, if err != nil { return nil, err } - fs = append(fs, f) + funcs = append(funcs, f) } - return fs, nil + if opts.ExcludeCaches { + opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55") + } + + for _, spec := range opts.ExcludeIfPresent { + f, err := rejectIfPresent(spec) + if err != nil { + return nil, err + } + + funcs = append(funcs, f) + } + + return funcs, nil } // collectTargets returns a list of target files/dirs from several sources. @@ -505,12 +505,6 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter return err } - // rejectFuncs collect functions that can reject items from the backup based on path and file info - rejectFuncs, err := collectRejectFuncs(opts, targets) - if err != nil { - return err - } - var parentSnapshot *restic.Snapshot if !opts.Stdin { parentSnapshot, err = findParentSnapshot(ctx, repo, opts, targets, timeStamp) @@ -547,15 +541,6 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter return true } - selectFilter := func(item string, fi os.FileInfo) bool { - for _, reject := range rejectFuncs { - if reject(item, fi) { - return false - } - } - return true - } - var targetFS fs.FS = fs.Local{} if runtime.GOOS == "windows" && opts.UseFsSnapshot { if err = fs.HasSufficientPrivilegesForVSS(); err != nil { @@ -598,6 +583,21 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter targets = []string{filename} } + // rejectFuncs collect functions that can reject items from the backup based on path and file info + rejectFuncs, err := collectRejectFuncs(opts, targets, targetFS) + if err != nil { + return err + } + + selectFilter := func(item string, fi os.FileInfo, fs fs.FS) bool { + for _, reject := range rejectFuncs { + if reject(item, fi, fs) { + return false + } + } + return true + } + wg, wgCtx := errgroup.WithContext(ctx) cancelCtx, cancel := context.WithCancel(wgCtx) defer cancel() diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go index 4657e4915dd..f1e1011f218 100644 --- a/cmd/restic/exclude.go +++ b/cmd/restic/exclude.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "os" - "path/filepath" "strings" "sync" @@ -72,7 +71,7 @@ type RejectByNameFunc func(path string) bool // RejectFunc is a function that takes a filename and os.FileInfo of a // file that would be included in the backup. The function returns true if it // should be excluded (rejected) from the backup. -type RejectFunc func(path string, fi os.FileInfo) bool +type RejectFunc func(path string, fi os.FileInfo, fs fs.FS) bool // rejectByPattern returns a RejectByNameFunc which rejects files that match // one of the patterns. @@ -112,7 +111,7 @@ func rejectByInsensitivePattern(patterns []string) RejectByNameFunc { // non-nil if the filename component of excludeFileSpec is empty. If rc is // non-nil, it is going to be used in the RejectByNameFunc to expedite the evaluation // of a directory based on previous visits. -func rejectIfPresent(excludeFileSpec string) (RejectByNameFunc, error) { +func rejectIfPresent(excludeFileSpec string) (RejectFunc, error) { if excludeFileSpec == "" { return nil, errors.New("name for exclusion tagfile is empty") } @@ -129,10 +128,9 @@ func rejectIfPresent(excludeFileSpec string) (RejectByNameFunc, error) { } debug.Log("using %q as exclusion tagfile", tf) rc := &rejectionCache{} - fn := func(filename string) bool { - return isExcludedByFile(filename, tf, tc, rc) - } - return fn, nil + return func(filename string, _ os.FileInfo, fs fs.FS) bool { + return isExcludedByFile(filename, tf, tc, rc, fs) + }, nil } // isExcludedByFile interprets filename as a path and returns true if that file @@ -140,28 +138,28 @@ func rejectIfPresent(excludeFileSpec string) (RejectByNameFunc, error) { // tagfile which bears the name specified in tagFilename and starts with // header. If rc is non-nil, it is used to expedite the evaluation of a // directory based on previous visits. -func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache) bool { +func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache, fs fs.FS) bool { if tagFilename == "" { return false } - dir, base := filepath.Split(filename) - if base == tagFilename { + if fs.Base(filename) == tagFilename { return false // do not exclude the tagfile itself } rc.Lock() defer rc.Unlock() + dir := fs.Dir(filename) rejected, visited := rc.Get(dir) if visited { return rejected } - rejected = isDirExcludedByFile(dir, tagFilename, header) + rejected = isDirExcludedByFile(dir, tagFilename, header, fs) rc.Store(dir, rejected) return rejected } -func isDirExcludedByFile(dir, tagFilename, header string) bool { - tf := filepath.Join(dir, tagFilename) +func isDirExcludedByFile(dir, tagFilename, header string, fs fs.FS) bool { + tf := fs.Join(dir, tagFilename) _, err := fs.Lstat(tf) if os.IsNotExist(err) { return false @@ -178,7 +176,7 @@ func isDirExcludedByFile(dir, tagFilename, header string) bool { // From this stage, errors mean tagFilename exists but it is malformed. // Warnings will be generated so that the user is informed that the // indented ignore-action is not performed. - f, err := os.Open(tf) + f, err := fs.OpenFile(tf, os.O_RDONLY, 0) if err != nil { Warnf("could not open exclusion tagfile: %v", err) return false @@ -210,11 +208,11 @@ func isDirExcludedByFile(dir, tagFilename, header string) bool { type DeviceMap map[string]uint64 // NewDeviceMap creates a new device map from the list of source paths. -func NewDeviceMap(allowedSourcePaths []string) (DeviceMap, error) { +func NewDeviceMap(allowedSourcePaths []string, fs fs.FS) (DeviceMap, error) { deviceMap := make(map[string]uint64) for _, item := range allowedSourcePaths { - item, err := filepath.Abs(filepath.Clean(item)) + item, err := fs.Abs(fs.Clean(item)) if err != nil { return nil, err } @@ -240,15 +238,15 @@ func NewDeviceMap(allowedSourcePaths []string) (DeviceMap, error) { } // IsAllowed returns true if the path is located on an allowed device. -func (m DeviceMap) IsAllowed(item string, deviceID uint64) (bool, error) { - for dir := item; ; dir = filepath.Dir(dir) { +func (m DeviceMap) IsAllowed(item string, deviceID uint64, fs fs.FS) (bool, error) { + for dir := item; ; dir = fs.Dir(dir) { debug.Log("item %v, test dir %v", item, dir) // find a parent directory that is on an allowed device (otherwise // we would not traverse the directory at all) allowedID, ok := m[dir] if !ok { - if dir == filepath.Dir(dir) { + if dir == fs.Dir(dir) { // arrived at root, no allowed device found. this should not happen. break } @@ -272,14 +270,14 @@ func (m DeviceMap) IsAllowed(item string, deviceID uint64) (bool, error) { // rejectByDevice returns a RejectFunc that rejects files which are on a // different file systems than the files/dirs in samples. -func rejectByDevice(samples []string) (RejectFunc, error) { - deviceMap, err := NewDeviceMap(samples) +func rejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { + deviceMap, err := NewDeviceMap(samples, filesystem) if err != nil { return nil, err } debug.Log("allowed devices: %v\n", deviceMap) - return func(item string, fi os.FileInfo) bool { + return func(item string, fi os.FileInfo, fs fs.FS) bool { id, err := fs.DeviceID(fi) if err != nil { // This should never happen because gatherDevices() would have @@ -287,7 +285,7 @@ func rejectByDevice(samples []string) (RejectFunc, error) { panic(err) } - allowed, err := deviceMap.IsAllowed(filepath.Clean(item), id) + allowed, err := deviceMap.IsAllowed(fs.Clean(item), id, fs) if err != nil { // this should not happen panic(fmt.Sprintf("error checking device ID of %v: %v", item, err)) @@ -306,7 +304,7 @@ func rejectByDevice(samples []string) (RejectFunc, error) { // special case: make sure we keep mountpoints (directories which // contain a mounted file system). Test this by checking if the parent // directory would be included. - parentDir := filepath.Dir(filepath.Clean(item)) + parentDir := fs.Dir(fs.Clean(item)) parentFI, err := fs.Lstat(parentDir) if err != nil { @@ -322,7 +320,7 @@ func rejectByDevice(samples []string) (RejectFunc, error) { return true } - parentAllowed, err := deviceMap.IsAllowed(parentDir, parentDeviceID) + parentAllowed, err := deviceMap.IsAllowed(parentDir, parentDeviceID, fs) if err != nil { debug.Log("item %v: error checking parent directory: %v", item, err) // if in doubt, reject @@ -369,7 +367,7 @@ func rejectBySize(maxSizeStr string) (RejectFunc, error) { return nil, err } - return func(item string, fi os.FileInfo) bool { + return func(item string, fi os.FileInfo, _ fs.FS) bool { // directory will be ignored if fi.IsDir() { return false diff --git a/cmd/restic/exclude_test.go b/cmd/restic/exclude_test.go index 9a24418ae32..166ee1d84b3 100644 --- a/cmd/restic/exclude_test.go +++ b/cmd/restic/exclude_test.go @@ -5,6 +5,7 @@ import ( "path/filepath" "testing" + "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/test" ) @@ -102,7 +103,7 @@ func TestIsExcludedByFile(t *testing.T) { if tc.content == "" { h = "" } - if got := isExcludedByFile(foo, tagFilename, h, nil); tc.want != got { + if got := isExcludedByFile(foo, tagFilename, h, nil, &fs.Local{}); tc.want != got { t.Fatalf("expected %v, got %v", tc.want, got) } }) @@ -164,8 +165,8 @@ func TestMultipleIsExcludedByFile(t *testing.T) { if err != nil { return err } - excludedByFoo := fooExclude(p) - excludedByBar := barExclude(p) + excludedByFoo := fooExclude(p, nil, &fs.Local{}) + excludedByBar := barExclude(p, nil, &fs.Local{}) excluded := excludedByFoo || excludedByBar // the log message helps debugging in case the test fails t.Logf("%q: %v || %v = %v", p, excludedByFoo, excludedByBar, excluded) @@ -249,7 +250,7 @@ func TestIsExcludedByFileSize(t *testing.T) { return err } - excluded := sizeExclude(p, fi) + excluded := sizeExclude(p, fi, nil) // the log message helps debugging in case the test fails t.Logf("%q: dir:%t; size:%d; excluded:%v", p, fi.IsDir(), fi.Size(), excluded) m[p] = !excluded @@ -299,7 +300,7 @@ func TestDeviceMap(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - res, err := deviceMap.IsAllowed(filepath.FromSlash(test.item), test.deviceID) + res, err := deviceMap.IsAllowed(filepath.FromSlash(test.item), test.deviceID, &fs.Local{}) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 397347bcba2..eab65bb5fee 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -25,7 +25,7 @@ type SelectByNameFunc func(item string) bool // SelectFunc returns true for all items that should be included (files and // dirs). If false is returned, files are ignored and dirs are not even walked. -type SelectFunc func(item string, fi os.FileInfo) bool +type SelectFunc func(item string, fi os.FileInfo, fs fs.FS) bool // ErrorFunc is called when an error during archiving occurs. When nil is // returned, the archiver continues, otherwise it aborts and passes the error @@ -178,12 +178,12 @@ func (o Options) ApplyDefaults() Options { } // New initializes a new archiver. -func New(repo archiverRepo, fs fs.FS, opts Options) *Archiver { +func New(repo archiverRepo, filesystem fs.FS, opts Options) *Archiver { arch := &Archiver{ Repo: repo, SelectByName: func(_ string) bool { return true }, - Select: func(_ string, _ os.FileInfo) bool { return true }, - FS: fs, + Select: func(_ string, _ os.FileInfo, _ fs.FS) bool { return true }, + FS: filesystem, Options: opts.ApplyDefaults(), CompleteItem: func(string, *restic.Node, *restic.Node, ItemStats, time.Duration) {}, @@ -448,7 +448,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous } return futureNode{}, true, nil } - if !arch.Select(abstarget, fi) { + if !arch.Select(abstarget, fi, arch.FS) { debug.Log("%v is excluded", target) return futureNode{}, true, nil } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 74fecef8067..b5645218284 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1529,7 +1529,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo) bool { + selFn: func(item string, fi os.FileInfo, _ fs.FS) bool { return true }, }, @@ -1546,7 +1546,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo) bool { + selFn: func(item string, fi os.FileInfo, _ fs.FS) bool { return false }, err: "snapshot is empty", @@ -1573,7 +1573,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo) bool { + selFn: func(item string, fi os.FileInfo, _ fs.FS) bool { return filepath.Ext(item) != ".txt" }, }, @@ -1597,8 +1597,8 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo) bool { - return filepath.Base(item) != "subdir" + selFn: func(item string, fi os.FileInfo, fs fs.FS) bool { + return fs.Base(item) != "subdir" }, }, { @@ -1606,8 +1606,8 @@ func TestArchiverSnapshotSelect(t *testing.T) { src: TestDir{ "foo": TestFile{Content: "foo"}, }, - selFn: func(item string, fi os.FileInfo) bool { - return filepath.IsAbs(item) + selFn: func(item string, fi os.FileInfo, fs fs.FS) bool { + return fs.IsAbs(item) }, }, } diff --git a/internal/archiver/scanner.go b/internal/archiver/scanner.go index 433d388212e..cb74a31d6bf 100644 --- a/internal/archiver/scanner.go +++ b/internal/archiver/scanner.go @@ -22,11 +22,11 @@ type Scanner struct { } // NewScanner initializes a new Scanner. -func NewScanner(fs fs.FS) *Scanner { +func NewScanner(filesystem fs.FS) *Scanner { return &Scanner{ - FS: fs, + FS: filesystem, SelectByName: func(_ string) bool { return true }, - Select: func(_ string, _ os.FileInfo) bool { return true }, + Select: func(_ string, _ os.FileInfo, _ fs.FS) bool { return true }, Error: func(_ string, err error) error { return err }, Result: func(_ string, _ ScanStats) {}, } @@ -115,7 +115,7 @@ func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (Sca } // run remaining select functions that require file information - if !s.Select(target, fi) { + if !s.Select(target, fi, s.FS) { return stats, nil } diff --git a/internal/archiver/scanner_test.go b/internal/archiver/scanner_test.go index b5b7057b8ea..e4e2c9f59a1 100644 --- a/internal/archiver/scanner_test.go +++ b/internal/archiver/scanner_test.go @@ -56,7 +56,7 @@ func TestScanner(t *testing.T) { }, }, }, - selFn: func(item string, fi os.FileInfo) bool { + selFn: func(item string, fi os.FileInfo, fs fs.FS) bool { if fi.IsDir() { return true } diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go index 0bcbf7f3a85..33d83bf6344 100644 --- a/internal/fs/fs_local.go +++ b/internal/fs/fs_local.go @@ -46,6 +46,12 @@ func (fs Local) Lstat(name string) (os.FileInfo, error) { return os.Lstat(fixpath(name)) } +// DeviceID extracts the DeviceID from the given FileInfo. If the fs does +// not support a DeviceID, it returns an error instead +func (fs Local) DeviceID(fi os.FileInfo) (deviceID uint64, err error) { + return DeviceID(fi) +} + // Join joins any number of path elements into a single path, adding a // Separator if necessary. Join calls Clean on the result; in particular, all // empty strings are ignored. On Windows, the result is a UNC path if and only diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index 93a42f9eb9f..b3371a8c967 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -122,6 +122,10 @@ func (fs *Reader) Lstat(name string) (os.FileInfo, error) { return nil, pathError("lstat", name, os.ErrNotExist) } +func (fs *Reader) DeviceID(_ os.FileInfo) (deviceID uint64, err error) { + return 0, errors.New("Device IDs are not supported") +} + // Join joins any number of path elements into a single path, adding a // Separator if necessary. Join calls Clean on the result; in particular, all // empty strings are ignored. On Windows, the result is a UNC path if and only diff --git a/internal/fs/interface.go b/internal/fs/interface.go index e1f4ef2d980..1c27c1c13f3 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -10,6 +10,7 @@ type FS interface { OpenFile(name string, flag int, perm os.FileMode) (File, error) Stat(name string) (os.FileInfo, error) Lstat(name string) (os.FileInfo, error) + DeviceID(fi os.FileInfo) (deviceID uint64, err error) Join(elem ...string) string Separator() string From 41c031a19e2d338a2e600a84209204a1507423ec Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 12:30:25 +0200 Subject: [PATCH 645/893] backup: move RejectFuncs to archiver package --- cmd/restic/cmd_backup.go | 12 +- cmd/restic/exclude.go | 307 ----------------------------- cmd/restic/exclude_test.go | 254 ------------------------ internal/archiver/exclude.go | 311 ++++++++++++++++++++++++++++++ internal/archiver/exclude_test.go | 259 +++++++++++++++++++++++++ 5 files changed, 576 insertions(+), 567 deletions(-) create mode 100644 internal/archiver/exclude.go create mode 100644 internal/archiver/exclude_test.go diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 1fdec081bd2..ceb7694b1c1 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -319,18 +319,18 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) ( // collectRejectFuncs returns a list of all functions which may reject data // from being saved in a snapshot based on path and file info -func collectRejectFuncs(opts BackupOptions, targets []string, fs fs.FS) (funcs []RejectFunc, err error) { +func collectRejectFuncs(opts BackupOptions, targets []string, fs fs.FS) (funcs []archiver.RejectFunc, err error) { // allowed devices - if opts.ExcludeOtherFS && !opts.Stdin { - f, err := rejectByDevice(targets, fs) + if opts.ExcludeOtherFS && !opts.Stdin && !opts.StdinCommand { + f, err := archiver.RejectByDevice(targets, fs) if err != nil { return nil, err } funcs = append(funcs, f) } - if len(opts.ExcludeLargerThan) != 0 && !opts.Stdin { - f, err := rejectBySize(opts.ExcludeLargerThan) + if len(opts.ExcludeLargerThan) != 0 && !opts.Stdin && !opts.StdinCommand { + f, err := archiver.RejectBySize(opts.ExcludeLargerThan) if err != nil { return nil, err } @@ -342,7 +342,7 @@ func collectRejectFuncs(opts BackupOptions, targets []string, fs fs.FS) (funcs [ } for _, spec := range opts.ExcludeIfPresent { - f, err := rejectIfPresent(spec) + f, err := archiver.RejectIfPresent(spec, Warnf) if err != nil { return nil, err } diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go index f1e1011f218..40eb93933df 100644 --- a/cmd/restic/exclude.go +++ b/cmd/restic/exclude.go @@ -4,10 +4,8 @@ import ( "bufio" "bytes" "fmt" - "io" "os" "strings" - "sync" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" @@ -15,64 +13,14 @@ import ( "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/textfile" - "github.com/restic/restic/internal/ui" "github.com/spf13/pflag" ) -type rejectionCache struct { - m map[string]bool - mtx sync.Mutex -} - -// Lock locks the mutex in rc. -func (rc *rejectionCache) Lock() { - if rc != nil { - rc.mtx.Lock() - } -} - -// Unlock unlocks the mutex in rc. -func (rc *rejectionCache) Unlock() { - if rc != nil { - rc.mtx.Unlock() - } -} - -// Get returns the last stored value for dir and a second boolean that -// indicates whether that value was actually written to the cache. It is the -// callers responsibility to call rc.Lock and rc.Unlock before using this -// method, otherwise data races may occur. -func (rc *rejectionCache) Get(dir string) (bool, bool) { - if rc == nil || rc.m == nil { - return false, false - } - v, ok := rc.m[dir] - return v, ok -} - -// Store stores a new value for dir. It is the callers responsibility to call -// rc.Lock and rc.Unlock before using this method, otherwise data races may -// occur. -func (rc *rejectionCache) Store(dir string, rejected bool) { - if rc == nil { - return - } - if rc.m == nil { - rc.m = make(map[string]bool) - } - rc.m[dir] = rejected -} - // RejectByNameFunc is a function that takes a filename of a // file that would be included in the backup. The function returns true if it // should be excluded (rejected) from the backup. type RejectByNameFunc func(path string) bool -// RejectFunc is a function that takes a filename and os.FileInfo of a -// file that would be included in the backup. The function returns true if it -// should be excluded (rejected) from the backup. -type RejectFunc func(path string, fi os.FileInfo, fs fs.FS) bool - // rejectByPattern returns a RejectByNameFunc which rejects files that match // one of the patterns. func rejectByPattern(patterns []string) RejectByNameFunc { @@ -104,239 +52,6 @@ func rejectByInsensitivePattern(patterns []string) RejectByNameFunc { } } -// rejectIfPresent returns a RejectByNameFunc which itself returns whether a path -// should be excluded. The RejectByNameFunc considers a file to be excluded when -// it resides in a directory with an exclusion file, that is specified by -// excludeFileSpec in the form "filename[:content]". The returned error is -// non-nil if the filename component of excludeFileSpec is empty. If rc is -// non-nil, it is going to be used in the RejectByNameFunc to expedite the evaluation -// of a directory based on previous visits. -func rejectIfPresent(excludeFileSpec string) (RejectFunc, error) { - if excludeFileSpec == "" { - return nil, errors.New("name for exclusion tagfile is empty") - } - colon := strings.Index(excludeFileSpec, ":") - if colon == 0 { - return nil, fmt.Errorf("no name for exclusion tagfile provided") - } - tf, tc := "", "" - if colon > 0 { - tf = excludeFileSpec[:colon] - tc = excludeFileSpec[colon+1:] - } else { - tf = excludeFileSpec - } - debug.Log("using %q as exclusion tagfile", tf) - rc := &rejectionCache{} - return func(filename string, _ os.FileInfo, fs fs.FS) bool { - return isExcludedByFile(filename, tf, tc, rc, fs) - }, nil -} - -// isExcludedByFile interprets filename as a path and returns true if that file -// is in an excluded directory. A directory is identified as excluded if it contains a -// tagfile which bears the name specified in tagFilename and starts with -// header. If rc is non-nil, it is used to expedite the evaluation of a -// directory based on previous visits. -func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache, fs fs.FS) bool { - if tagFilename == "" { - return false - } - if fs.Base(filename) == tagFilename { - return false // do not exclude the tagfile itself - } - rc.Lock() - defer rc.Unlock() - - dir := fs.Dir(filename) - rejected, visited := rc.Get(dir) - if visited { - return rejected - } - rejected = isDirExcludedByFile(dir, tagFilename, header, fs) - rc.Store(dir, rejected) - return rejected -} - -func isDirExcludedByFile(dir, tagFilename, header string, fs fs.FS) bool { - tf := fs.Join(dir, tagFilename) - _, err := fs.Lstat(tf) - if os.IsNotExist(err) { - return false - } - if err != nil { - Warnf("could not access exclusion tagfile: %v", err) - return false - } - // when no signature is given, the mere presence of tf is enough reason - // to exclude filename - if len(header) == 0 { - return true - } - // From this stage, errors mean tagFilename exists but it is malformed. - // Warnings will be generated so that the user is informed that the - // indented ignore-action is not performed. - f, err := fs.OpenFile(tf, os.O_RDONLY, 0) - if err != nil { - Warnf("could not open exclusion tagfile: %v", err) - return false - } - defer func() { - _ = f.Close() - }() - buf := make([]byte, len(header)) - _, err = io.ReadFull(f, buf) - // EOF is handled with a dedicated message, otherwise the warning were too cryptic - if err == io.EOF { - Warnf("invalid (too short) signature in exclusion tagfile %q\n", tf) - return false - } - if err != nil { - Warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err) - return false - } - if !bytes.Equal(buf, []byte(header)) { - Warnf("invalid signature in exclusion tagfile %q\n", tf) - return false - } - return true -} - -// DeviceMap is used to track allowed source devices for backup. This is used to -// check for crossing mount points during backup (for --one-file-system). It -// maps the name of a source path to its device ID. -type DeviceMap map[string]uint64 - -// NewDeviceMap creates a new device map from the list of source paths. -func NewDeviceMap(allowedSourcePaths []string, fs fs.FS) (DeviceMap, error) { - deviceMap := make(map[string]uint64) - - for _, item := range allowedSourcePaths { - item, err := fs.Abs(fs.Clean(item)) - if err != nil { - return nil, err - } - - fi, err := fs.Lstat(item) - if err != nil { - return nil, err - } - - id, err := fs.DeviceID(fi) - if err != nil { - return nil, err - } - - deviceMap[item] = id - } - - if len(deviceMap) == 0 { - return nil, errors.New("zero allowed devices") - } - - return deviceMap, nil -} - -// IsAllowed returns true if the path is located on an allowed device. -func (m DeviceMap) IsAllowed(item string, deviceID uint64, fs fs.FS) (bool, error) { - for dir := item; ; dir = fs.Dir(dir) { - debug.Log("item %v, test dir %v", item, dir) - - // find a parent directory that is on an allowed device (otherwise - // we would not traverse the directory at all) - allowedID, ok := m[dir] - if !ok { - if dir == fs.Dir(dir) { - // arrived at root, no allowed device found. this should not happen. - break - } - continue - } - - // if the item has a different device ID than the parent directory, - // we crossed a file system boundary - if allowedID != deviceID { - debug.Log("item %v (dir %v) on disallowed device %d", item, dir, deviceID) - return false, nil - } - - // item is on allowed device, accept it - debug.Log("item %v allowed", item) - return true, nil - } - - return false, fmt.Errorf("item %v (device ID %v) not found, deviceMap: %v", item, deviceID, m) -} - -// rejectByDevice returns a RejectFunc that rejects files which are on a -// different file systems than the files/dirs in samples. -func rejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { - deviceMap, err := NewDeviceMap(samples, filesystem) - if err != nil { - return nil, err - } - debug.Log("allowed devices: %v\n", deviceMap) - - return func(item string, fi os.FileInfo, fs fs.FS) bool { - id, err := fs.DeviceID(fi) - if err != nil { - // This should never happen because gatherDevices() would have - // errored out earlier. If it still does that's a reason to panic. - panic(err) - } - - allowed, err := deviceMap.IsAllowed(fs.Clean(item), id, fs) - if err != nil { - // this should not happen - panic(fmt.Sprintf("error checking device ID of %v: %v", item, err)) - } - - if allowed { - // accept item - return false - } - - // reject everything except directories - if !fi.IsDir() { - return true - } - - // special case: make sure we keep mountpoints (directories which - // contain a mounted file system). Test this by checking if the parent - // directory would be included. - parentDir := fs.Dir(fs.Clean(item)) - - parentFI, err := fs.Lstat(parentDir) - if err != nil { - debug.Log("item %v: error running lstat() on parent directory: %v", item, err) - // if in doubt, reject - return true - } - - parentDeviceID, err := fs.DeviceID(parentFI) - if err != nil { - debug.Log("item %v: getting device ID of parent directory: %v", item, err) - // if in doubt, reject - return true - } - - parentAllowed, err := deviceMap.IsAllowed(parentDir, parentDeviceID, fs) - if err != nil { - debug.Log("item %v: error checking parent directory: %v", item, err) - // if in doubt, reject - return true - } - - if parentAllowed { - // we found a mount point, so accept the directory - return false - } - - // reject everything else - return true - }, nil -} - // rejectResticCache returns a RejectByNameFunc that rejects the restic cache // directory (if set). func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) { @@ -361,28 +76,6 @@ func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) { }, nil } -func rejectBySize(maxSizeStr string) (RejectFunc, error) { - maxSize, err := ui.ParseBytes(maxSizeStr) - if err != nil { - return nil, err - } - - return func(item string, fi os.FileInfo, _ fs.FS) bool { - // directory will be ignored - if fi.IsDir() { - return false - } - - filesize := fi.Size() - if filesize > maxSize { - debug.Log("file %s is oversize: %d", item, filesize) - return true - } - - return false - }, nil -} - // readPatternsFromFiles reads all files and returns the list of // patterns. For each line, leading and trailing white space is removed // and comment lines are ignored. For each remaining pattern, environment diff --git a/cmd/restic/exclude_test.go b/cmd/restic/exclude_test.go index 166ee1d84b3..177a81df22a 100644 --- a/cmd/restic/exclude_test.go +++ b/cmd/restic/exclude_test.go @@ -1,12 +1,7 @@ package main import ( - "os" - "path/filepath" "testing" - - "github.com/restic/restic/internal/fs" - "github.com/restic/restic/internal/test" ) func TestRejectByPattern(t *testing.T) { @@ -62,252 +57,3 @@ func TestRejectByInsensitivePattern(t *testing.T) { }) } } - -func TestIsExcludedByFile(t *testing.T) { - const ( - tagFilename = "CACHEDIR.TAG" - header = "Signature: 8a477f597d28d172789f06886806bc55" - ) - tests := []struct { - name string - tagFile string - content string - want bool - }{ - {"NoTagfile", "", "", false}, - {"EmptyTagfile", tagFilename, "", true}, - {"UnnamedTagFile", "", header, false}, - {"WrongTagFile", "notatagfile", header, false}, - {"IncorrectSig", tagFilename, header[1:], false}, - {"ValidSig", tagFilename, header, true}, - {"ValidPlusStuff", tagFilename, header + "foo", true}, - {"ValidPlusNewlineAndStuff", tagFilename, header + "\nbar", true}, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - tempDir := test.TempDir(t) - - foo := filepath.Join(tempDir, "foo") - err := os.WriteFile(foo, []byte("foo"), 0666) - if err != nil { - t.Fatalf("could not write file: %v", err) - } - if tc.tagFile != "" { - tagFile := filepath.Join(tempDir, tc.tagFile) - err = os.WriteFile(tagFile, []byte(tc.content), 0666) - if err != nil { - t.Fatalf("could not write tagfile: %v", err) - } - } - h := header - if tc.content == "" { - h = "" - } - if got := isExcludedByFile(foo, tagFilename, h, nil, &fs.Local{}); tc.want != got { - t.Fatalf("expected %v, got %v", tc.want, got) - } - }) - } -} - -// TestMultipleIsExcludedByFile is for testing that multiple instances of -// the --exclude-if-present parameter (or the shortcut --exclude-caches do not -// cancel each other out. It was initially written to demonstrate a bug in -// rejectIfPresent. -func TestMultipleIsExcludedByFile(t *testing.T) { - tempDir := test.TempDir(t) - - // Create some files in a temporary directory. - // Files in UPPERCASE will be used as exclusion triggers later on. - // We will test the inclusion later, so we add the expected value as - // a bool. - files := []struct { - path string - incl bool - }{ - {"42", true}, - - // everything in foodir except the NOFOO tagfile - // should not be included. - {"foodir/NOFOO", true}, - {"foodir/foo", false}, - {"foodir/foosub/underfoo", false}, - - // everything in bardir except the NOBAR tagfile - // should not be included. - {"bardir/NOBAR", true}, - {"bardir/bar", false}, - {"bardir/barsub/underbar", false}, - - // everything in bazdir should be included. - {"bazdir/baz", true}, - {"bazdir/bazsub/underbaz", true}, - } - var errs []error - for _, f := range files { - // create directories first, then the file - p := filepath.Join(tempDir, filepath.FromSlash(f.path)) - errs = append(errs, os.MkdirAll(filepath.Dir(p), 0700)) - errs = append(errs, os.WriteFile(p, []byte(f.path), 0600)) - } - test.OKs(t, errs) // see if anything went wrong during the creation - - // create two rejection functions, one that tests for the NOFOO file - // and one for the NOBAR file - fooExclude, _ := rejectIfPresent("NOFOO") - barExclude, _ := rejectIfPresent("NOBAR") - - // To mock the archiver scanning walk, we create filepath.WalkFn - // that tests against the two rejection functions and stores - // the result in a map against we can test later. - m := make(map[string]bool) - walk := func(p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - excludedByFoo := fooExclude(p, nil, &fs.Local{}) - excludedByBar := barExclude(p, nil, &fs.Local{}) - excluded := excludedByFoo || excludedByBar - // the log message helps debugging in case the test fails - t.Logf("%q: %v || %v = %v", p, excludedByFoo, excludedByBar, excluded) - m[p] = !excluded - if excluded { - return filepath.SkipDir - } - return nil - } - // walk through the temporary file and check the error - test.OK(t, filepath.Walk(tempDir, walk)) - - // compare whether the walk gave the expected values for the test cases - for _, f := range files { - p := filepath.Join(tempDir, filepath.FromSlash(f.path)) - if m[p] != f.incl { - t.Errorf("inclusion status of %s is wrong: want %v, got %v", f.path, f.incl, m[p]) - } - } -} - -// TestIsExcludedByFileSize is for testing the instance of -// --exclude-larger-than parameters -func TestIsExcludedByFileSize(t *testing.T) { - tempDir := test.TempDir(t) - - // Max size of file is set to be 1k - maxSizeStr := "1k" - - // Create some files in a temporary directory. - // Files in UPPERCASE will be used as exclusion triggers later on. - // We will test the inclusion later, so we add the expected value as - // a bool. - files := []struct { - path string - size int64 - incl bool - }{ - {"42", 100, true}, - - // everything in foodir except the FOOLARGE tagfile - // should not be included. - {"foodir/FOOLARGE", 2048, false}, - {"foodir/foo", 1002, true}, - {"foodir/foosub/underfoo", 100, true}, - - // everything in bardir except the BARLARGE tagfile - // should not be included. - {"bardir/BARLARGE", 1030, false}, - {"bardir/bar", 1000, true}, - {"bardir/barsub/underbar", 500, true}, - - // everything in bazdir should be included. - {"bazdir/baz", 100, true}, - {"bazdir/bazsub/underbaz", 200, true}, - } - var errs []error - for _, f := range files { - // create directories first, then the file - p := filepath.Join(tempDir, filepath.FromSlash(f.path)) - errs = append(errs, os.MkdirAll(filepath.Dir(p), 0700)) - file, err := os.OpenFile(p, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) - errs = append(errs, err) - if err == nil { - // create a file with given size - errs = append(errs, file.Truncate(f.size)) - } - errs = append(errs, file.Close()) - } - test.OKs(t, errs) // see if anything went wrong during the creation - - // create rejection function - sizeExclude, _ := rejectBySize(maxSizeStr) - - // To mock the archiver scanning walk, we create filepath.WalkFn - // that tests against the two rejection functions and stores - // the result in a map against we can test later. - m := make(map[string]bool) - walk := func(p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - excluded := sizeExclude(p, fi, nil) - // the log message helps debugging in case the test fails - t.Logf("%q: dir:%t; size:%d; excluded:%v", p, fi.IsDir(), fi.Size(), excluded) - m[p] = !excluded - return nil - } - // walk through the temporary file and check the error - test.OK(t, filepath.Walk(tempDir, walk)) - - // compare whether the walk gave the expected values for the test cases - for _, f := range files { - p := filepath.Join(tempDir, filepath.FromSlash(f.path)) - if m[p] != f.incl { - t.Errorf("inclusion status of %s is wrong: want %v, got %v", f.path, f.incl, m[p]) - } - } -} - -func TestDeviceMap(t *testing.T) { - deviceMap := DeviceMap{ - filepath.FromSlash("/"): 1, - filepath.FromSlash("/usr/local"): 5, - } - - var tests = []struct { - item string - deviceID uint64 - allowed bool - }{ - {"/root", 1, true}, - {"/usr", 1, true}, - - {"/proc", 2, false}, - {"/proc/1234", 2, false}, - - {"/usr", 3, false}, - {"/usr/share", 3, false}, - - {"/usr/local", 5, true}, - {"/usr/local/foobar", 5, true}, - - {"/usr/local/foobar/submount", 23, false}, - {"/usr/local/foobar/submount/file", 23, false}, - - {"/usr/local/foobar/outhersubmount", 1, false}, - {"/usr/local/foobar/outhersubmount/otherfile", 1, false}, - } - - for _, test := range tests { - t.Run("", func(t *testing.T) { - res, err := deviceMap.IsAllowed(filepath.FromSlash(test.item), test.deviceID, &fs.Local{}) - if err != nil { - t.Fatal(err) - } - - if res != test.allowed { - t.Fatalf("wrong result returned by IsAllowed(%v): want %v, got %v", test.item, test.allowed, res) - } - }) - } -} diff --git a/internal/archiver/exclude.go b/internal/archiver/exclude.go new file mode 100644 index 00000000000..f4444812c75 --- /dev/null +++ b/internal/archiver/exclude.go @@ -0,0 +1,311 @@ +package archiver + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" + "sync" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/ui" +) + +type rejectionCache struct { + m map[string]bool + mtx sync.Mutex +} + +func newRejectionCache() *rejectionCache { + return &rejectionCache{m: make(map[string]bool)} +} + +// Lock locks the mutex in rc. +func (rc *rejectionCache) Lock() { + rc.mtx.Lock() +} + +// Unlock unlocks the mutex in rc. +func (rc *rejectionCache) Unlock() { + rc.mtx.Unlock() +} + +// Get returns the last stored value for dir and a second boolean that +// indicates whether that value was actually written to the cache. It is the +// callers responsibility to call rc.Lock and rc.Unlock before using this +// method, otherwise data races may occur. +func (rc *rejectionCache) Get(dir string) (bool, bool) { + v, ok := rc.m[dir] + return v, ok +} + +// Store stores a new value for dir. It is the callers responsibility to call +// rc.Lock and rc.Unlock before using this method, otherwise data races may +// occur. +func (rc *rejectionCache) Store(dir string, rejected bool) { + rc.m[dir] = rejected +} + +// RejectFunc is a function that takes a filename and os.FileInfo of a +// file that would be included in the backup. The function returns true if it +// should be excluded (rejected) from the backup. +type RejectFunc func(path string, fi os.FileInfo, fs fs.FS) bool + +// RejectIfPresent returns a RejectByNameFunc which itself returns whether a path +// should be excluded. The RejectByNameFunc considers a file to be excluded when +// it resides in a directory with an exclusion file, that is specified by +// excludeFileSpec in the form "filename[:content]". The returned error is +// non-nil if the filename component of excludeFileSpec is empty. If rc is +// non-nil, it is going to be used in the RejectByNameFunc to expedite the evaluation +// of a directory based on previous visits. +func RejectIfPresent(excludeFileSpec string, warnf func(msg string, args ...interface{})) (RejectFunc, error) { + if excludeFileSpec == "" { + return nil, errors.New("name for exclusion tagfile is empty") + } + colon := strings.Index(excludeFileSpec, ":") + if colon == 0 { + return nil, fmt.Errorf("no name for exclusion tagfile provided") + } + tf, tc := "", "" + if colon > 0 { + tf = excludeFileSpec[:colon] + tc = excludeFileSpec[colon+1:] + } else { + tf = excludeFileSpec + } + debug.Log("using %q as exclusion tagfile", tf) + rc := newRejectionCache() + return func(filename string, _ os.FileInfo, fs fs.FS) bool { + return isExcludedByFile(filename, tf, tc, rc, fs, warnf) + }, nil +} + +// isExcludedByFile interprets filename as a path and returns true if that file +// is in an excluded directory. A directory is identified as excluded if it contains a +// tagfile which bears the name specified in tagFilename and starts with +// header. If rc is non-nil, it is used to expedite the evaluation of a +// directory based on previous visits. +func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache, fs fs.FS, warnf func(msg string, args ...interface{})) bool { + if tagFilename == "" { + return false + } + + if fs.Base(filename) == tagFilename { + return false // do not exclude the tagfile itself + } + rc.Lock() + defer rc.Unlock() + + dir := fs.Dir(filename) + rejected, visited := rc.Get(dir) + if visited { + return rejected + } + rejected = isDirExcludedByFile(dir, tagFilename, header, fs, warnf) + rc.Store(dir, rejected) + return rejected +} + +func isDirExcludedByFile(dir, tagFilename, header string, fs fs.FS, warnf func(msg string, args ...interface{})) bool { + tf := fs.Join(dir, tagFilename) + _, err := fs.Lstat(tf) + if os.IsNotExist(err) { + return false + } + if err != nil { + warnf("could not access exclusion tagfile: %v", err) + return false + } + // when no signature is given, the mere presence of tf is enough reason + // to exclude filename + if len(header) == 0 { + return true + } + // From this stage, errors mean tagFilename exists but it is malformed. + // Warnings will be generated so that the user is informed that the + // indented ignore-action is not performed. + f, err := fs.OpenFile(tf, os.O_RDONLY, 0) + if err != nil { + warnf("could not open exclusion tagfile: %v", err) + return false + } + defer func() { + _ = f.Close() + }() + buf := make([]byte, len(header)) + _, err = io.ReadFull(f, buf) + // EOF is handled with a dedicated message, otherwise the warning were too cryptic + if err == io.EOF { + warnf("invalid (too short) signature in exclusion tagfile %q\n", tf) + return false + } + if err != nil { + warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err) + return false + } + if !bytes.Equal(buf, []byte(header)) { + warnf("invalid signature in exclusion tagfile %q\n", tf) + return false + } + return true +} + +// deviceMap is used to track allowed source devices for backup. This is used to +// check for crossing mount points during backup (for --one-file-system). It +// maps the name of a source path to its device ID. +type deviceMap map[string]uint64 + +// newDeviceMap creates a new device map from the list of source paths. +func newDeviceMap(allowedSourcePaths []string, fs fs.FS) (deviceMap, error) { + deviceMap := make(map[string]uint64) + + for _, item := range allowedSourcePaths { + item, err := fs.Abs(fs.Clean(item)) + if err != nil { + return nil, err + } + + fi, err := fs.Lstat(item) + if err != nil { + return nil, err + } + + id, err := fs.DeviceID(fi) + if err != nil { + return nil, err + } + + deviceMap[item] = id + } + + if len(deviceMap) == 0 { + return nil, errors.New("zero allowed devices") + } + + return deviceMap, nil +} + +// IsAllowed returns true if the path is located on an allowed device. +func (m deviceMap) IsAllowed(item string, deviceID uint64, fs fs.FS) (bool, error) { + for dir := item; ; dir = fs.Dir(dir) { + debug.Log("item %v, test dir %v", item, dir) + + // find a parent directory that is on an allowed device (otherwise + // we would not traverse the directory at all) + allowedID, ok := m[dir] + if !ok { + if dir == fs.Dir(dir) { + // arrived at root, no allowed device found. this should not happen. + break + } + continue + } + + // if the item has a different device ID than the parent directory, + // we crossed a file system boundary + if allowedID != deviceID { + debug.Log("item %v (dir %v) on disallowed device %d", item, dir, deviceID) + return false, nil + } + + // item is on allowed device, accept it + debug.Log("item %v allowed", item) + return true, nil + } + + return false, fmt.Errorf("item %v (device ID %v) not found, deviceMap: %v", item, deviceID, m) +} + +// RejectByDevice returns a RejectFunc that rejects files which are on a +// different file systems than the files/dirs in samples. +func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { + deviceMap, err := newDeviceMap(samples, filesystem) + if err != nil { + return nil, err + } + debug.Log("allowed devices: %v\n", deviceMap) + + return func(item string, fi os.FileInfo, fs fs.FS) bool { + id, err := fs.DeviceID(fi) + if err != nil { + // This should never happen because gatherDevices() would have + // errored out earlier. If it still does that's a reason to panic. + panic(err) + } + + allowed, err := deviceMap.IsAllowed(fs.Clean(item), id, fs) + if err != nil { + // this should not happen + panic(fmt.Sprintf("error checking device ID of %v: %v", item, err)) + } + + if allowed { + // accept item + return false + } + + // reject everything except directories + if !fi.IsDir() { + return true + } + + // special case: make sure we keep mountpoints (directories which + // contain a mounted file system). Test this by checking if the parent + // directory would be included. + parentDir := fs.Dir(fs.Clean(item)) + + parentFI, err := fs.Lstat(parentDir) + if err != nil { + debug.Log("item %v: error running lstat() on parent directory: %v", item, err) + // if in doubt, reject + return true + } + + parentDeviceID, err := fs.DeviceID(parentFI) + if err != nil { + debug.Log("item %v: getting device ID of parent directory: %v", item, err) + // if in doubt, reject + return true + } + + parentAllowed, err := deviceMap.IsAllowed(parentDir, parentDeviceID, fs) + if err != nil { + debug.Log("item %v: error checking parent directory: %v", item, err) + // if in doubt, reject + return true + } + + if parentAllowed { + // we found a mount point, so accept the directory + return false + } + + // reject everything else + return true + }, nil +} + +func RejectBySize(maxSizeStr string) (RejectFunc, error) { + maxSize, err := ui.ParseBytes(maxSizeStr) + if err != nil { + return nil, err + } + + return func(item string, fi os.FileInfo, _ fs.FS) bool { + // directory will be ignored + if fi.IsDir() { + return false + } + + filesize := fi.Size() + if filesize > maxSize { + debug.Log("file %s is oversize: %d", item, filesize) + return true + } + + return false + }, nil +} diff --git a/internal/archiver/exclude_test.go b/internal/archiver/exclude_test.go new file mode 100644 index 00000000000..b9f1f8cdd5c --- /dev/null +++ b/internal/archiver/exclude_test.go @@ -0,0 +1,259 @@ +package archiver + +import ( + "os" + "path/filepath" + "testing" + + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/test" +) + +func TestIsExcludedByFile(t *testing.T) { + const ( + tagFilename = "CACHEDIR.TAG" + header = "Signature: 8a477f597d28d172789f06886806bc55" + ) + tests := []struct { + name string + tagFile string + content string + want bool + }{ + {"NoTagfile", "", "", false}, + {"EmptyTagfile", tagFilename, "", true}, + {"UnnamedTagFile", "", header, false}, + {"WrongTagFile", "notatagfile", header, false}, + {"IncorrectSig", tagFilename, header[1:], false}, + {"ValidSig", tagFilename, header, true}, + {"ValidPlusStuff", tagFilename, header + "foo", true}, + {"ValidPlusNewlineAndStuff", tagFilename, header + "\nbar", true}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tempDir := test.TempDir(t) + + foo := filepath.Join(tempDir, "foo") + err := os.WriteFile(foo, []byte("foo"), 0666) + if err != nil { + t.Fatalf("could not write file: %v", err) + } + if tc.tagFile != "" { + tagFile := filepath.Join(tempDir, tc.tagFile) + err = os.WriteFile(tagFile, []byte(tc.content), 0666) + if err != nil { + t.Fatalf("could not write tagfile: %v", err) + } + } + h := header + if tc.content == "" { + h = "" + } + if got := isExcludedByFile(foo, tagFilename, h, newRejectionCache(), &fs.Local{}, func(msg string, args ...interface{}) { t.Logf(msg, args...) }); tc.want != got { + t.Fatalf("expected %v, got %v", tc.want, got) + } + }) + } +} + +// TestMultipleIsExcludedByFile is for testing that multiple instances of +// the --exclude-if-present parameter (or the shortcut --exclude-caches do not +// cancel each other out. It was initially written to demonstrate a bug in +// rejectIfPresent. +func TestMultipleIsExcludedByFile(t *testing.T) { + tempDir := test.TempDir(t) + + // Create some files in a temporary directory. + // Files in UPPERCASE will be used as exclusion triggers later on. + // We will test the inclusion later, so we add the expected value as + // a bool. + files := []struct { + path string + incl bool + }{ + {"42", true}, + + // everything in foodir except the NOFOO tagfile + // should not be included. + {"foodir/NOFOO", true}, + {"foodir/foo", false}, + {"foodir/foosub/underfoo", false}, + + // everything in bardir except the NOBAR tagfile + // should not be included. + {"bardir/NOBAR", true}, + {"bardir/bar", false}, + {"bardir/barsub/underbar", false}, + + // everything in bazdir should be included. + {"bazdir/baz", true}, + {"bazdir/bazsub/underbaz", true}, + } + var errs []error + for _, f := range files { + // create directories first, then the file + p := filepath.Join(tempDir, filepath.FromSlash(f.path)) + errs = append(errs, os.MkdirAll(filepath.Dir(p), 0700)) + errs = append(errs, os.WriteFile(p, []byte(f.path), 0600)) + } + test.OKs(t, errs) // see if anything went wrong during the creation + + // create two rejection functions, one that tests for the NOFOO file + // and one for the NOBAR file + fooExclude, _ := RejectIfPresent("NOFOO", nil) + barExclude, _ := RejectIfPresent("NOBAR", nil) + + // To mock the archiver scanning walk, we create filepath.WalkFn + // that tests against the two rejection functions and stores + // the result in a map against we can test later. + m := make(map[string]bool) + walk := func(p string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + excludedByFoo := fooExclude(p, nil, &fs.Local{}) + excludedByBar := barExclude(p, nil, &fs.Local{}) + excluded := excludedByFoo || excludedByBar + // the log message helps debugging in case the test fails + t.Logf("%q: %v || %v = %v", p, excludedByFoo, excludedByBar, excluded) + m[p] = !excluded + if excluded { + return filepath.SkipDir + } + return nil + } + // walk through the temporary file and check the error + test.OK(t, filepath.Walk(tempDir, walk)) + + // compare whether the walk gave the expected values for the test cases + for _, f := range files { + p := filepath.Join(tempDir, filepath.FromSlash(f.path)) + if m[p] != f.incl { + t.Errorf("inclusion status of %s is wrong: want %v, got %v", f.path, f.incl, m[p]) + } + } +} + +// TestIsExcludedByFileSize is for testing the instance of +// --exclude-larger-than parameters +func TestIsExcludedByFileSize(t *testing.T) { + tempDir := test.TempDir(t) + + // Max size of file is set to be 1k + maxSizeStr := "1k" + + // Create some files in a temporary directory. + // Files in UPPERCASE will be used as exclusion triggers later on. + // We will test the inclusion later, so we add the expected value as + // a bool. + files := []struct { + path string + size int64 + incl bool + }{ + {"42", 100, true}, + + // everything in foodir except the FOOLARGE tagfile + // should not be included. + {"foodir/FOOLARGE", 2048, false}, + {"foodir/foo", 1002, true}, + {"foodir/foosub/underfoo", 100, true}, + + // everything in bardir except the BARLARGE tagfile + // should not be included. + {"bardir/BARLARGE", 1030, false}, + {"bardir/bar", 1000, true}, + {"bardir/barsub/underbar", 500, true}, + + // everything in bazdir should be included. + {"bazdir/baz", 100, true}, + {"bazdir/bazsub/underbaz", 200, true}, + } + var errs []error + for _, f := range files { + // create directories first, then the file + p := filepath.Join(tempDir, filepath.FromSlash(f.path)) + errs = append(errs, os.MkdirAll(filepath.Dir(p), 0700)) + file, err := os.OpenFile(p, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + errs = append(errs, err) + if err == nil { + // create a file with given size + errs = append(errs, file.Truncate(f.size)) + } + errs = append(errs, file.Close()) + } + test.OKs(t, errs) // see if anything went wrong during the creation + + // create rejection function + sizeExclude, _ := RejectBySize(maxSizeStr) + + // To mock the archiver scanning walk, we create filepath.WalkFn + // that tests against the two rejection functions and stores + // the result in a map against we can test later. + m := make(map[string]bool) + walk := func(p string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + excluded := sizeExclude(p, fi, nil) + // the log message helps debugging in case the test fails + t.Logf("%q: dir:%t; size:%d; excluded:%v", p, fi.IsDir(), fi.Size(), excluded) + m[p] = !excluded + return nil + } + // walk through the temporary file and check the error + test.OK(t, filepath.Walk(tempDir, walk)) + + // compare whether the walk gave the expected values for the test cases + for _, f := range files { + p := filepath.Join(tempDir, filepath.FromSlash(f.path)) + if m[p] != f.incl { + t.Errorf("inclusion status of %s is wrong: want %v, got %v", f.path, f.incl, m[p]) + } + } +} + +func TestDeviceMap(t *testing.T) { + deviceMap := deviceMap{ + filepath.FromSlash("/"): 1, + filepath.FromSlash("/usr/local"): 5, + } + + var tests = []struct { + item string + deviceID uint64 + allowed bool + }{ + {"/root", 1, true}, + {"/usr", 1, true}, + + {"/proc", 2, false}, + {"/proc/1234", 2, false}, + + {"/usr", 3, false}, + {"/usr/share", 3, false}, + + {"/usr/local", 5, true}, + {"/usr/local/foobar", 5, true}, + + {"/usr/local/foobar/submount", 23, false}, + {"/usr/local/foobar/submount/file", 23, false}, + + {"/usr/local/foobar/outhersubmount", 1, false}, + {"/usr/local/foobar/outhersubmount/otherfile", 1, false}, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + res, err := deviceMap.IsAllowed(filepath.FromSlash(test.item), test.deviceID, &fs.Local{}) + if err != nil { + t.Fatal(err) + } + + if res != test.allowed { + t.Fatalf("wrong result returned by IsAllowed(%v): want %v, got %v", test.item, test.allowed, res) + } + }) + } +} From 5d58945718dfaf2ee7946542eb26b69c8cca8fb0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 13:48:31 +0200 Subject: [PATCH 646/893] cleanup include / exclude option setup --- cmd/restic/cmd_backup.go | 2 +- cmd/restic/cmd_restore.go | 4 ++-- cmd/restic/cmd_rewrite.go | 2 +- cmd/restic/exclude.go | 2 +- cmd/restic/include.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index ceb7694b1c1..8d72a27b090 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -108,7 +108,7 @@ func init() { f.VarP(&backupOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')") f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the source files/directories (overrides the "parent" flag)`) - initExcludePatternOptions(f, &backupOptions.excludePatternOptions) + backupOptions.excludePatternOptions.Add(f) f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems, don't cross filesystem boundaries and subvolumes") f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes `filename[:header]`, exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)") diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index c58b0b80df2..f20359dc0a2 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -68,8 +68,8 @@ func init() { flags := cmdRestore.Flags() flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to") - initExcludePatternOptions(flags, &restoreOptions.excludePatternOptions) - initIncludePatternOptions(flags, &restoreOptions.includePatternOptions) + restoreOptions.excludePatternOptions.Add(flags) + restoreOptions.includePatternOptions.Add(flags) initSingleSnapshotFilter(flags, &restoreOptions.SnapshotFilter) flags.BoolVar(&restoreOptions.DryRun, "dry-run", false, "do not write any data, just show what would be done") diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 7788016b78e..fc9da5b6092 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -103,7 +103,7 @@ func init() { f.StringVar(&rewriteOptions.Metadata.Time, "new-time", "", "replace time of the backup") initMultiSnapshotFilter(f, &rewriteOptions.SnapshotFilter, true) - initExcludePatternOptions(f, &rewriteOptions.excludePatternOptions) + rewriteOptions.excludePatternOptions.Add(f) } type rewriteFilterFunc func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go index 40eb93933df..a37f9c68ef7 100644 --- a/cmd/restic/exclude.go +++ b/cmd/restic/exclude.go @@ -130,7 +130,7 @@ type excludePatternOptions struct { InsensitiveExcludeFiles []string } -func initExcludePatternOptions(f *pflag.FlagSet, opts *excludePatternOptions) { +func (opts *excludePatternOptions) Add(f *pflag.FlagSet) { f.StringArrayVarP(&opts.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)") f.StringArrayVar(&opts.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames") f.StringArrayVar(&opts.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)") diff --git a/cmd/restic/include.go b/cmd/restic/include.go index dcc4c7f3727..514a2401641 100644 --- a/cmd/restic/include.go +++ b/cmd/restic/include.go @@ -19,7 +19,7 @@ type includePatternOptions struct { InsensitiveIncludeFiles []string } -func initIncludePatternOptions(f *pflag.FlagSet, opts *includePatternOptions) { +func (opts *includePatternOptions) Add(f *pflag.FlagSet) { f.StringArrayVarP(&opts.Includes, "include", "i", nil, "include a `pattern` (can be specified multiple times)") f.StringArrayVar(&opts.InsensitiveIncludes, "iinclude", nil, "same as --include `pattern` but ignores the casing of filenames") f.StringArrayVar(&opts.IncludeFiles, "include-file", nil, "read include patterns from a `file` (can be specified multiple times)") From f1585af0f263e517f1cfbb468fabb8a63dc9bc76 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 14:03:36 +0200 Subject: [PATCH 647/893] move include/exclude options to filter package --- cmd/restic/cmd_backup.go | 13 +- cmd/restic/cmd_restore.go | 13 +- cmd/restic/cmd_rewrite.go | 9 +- cmd/restic/cmd_rewrite_integration_test.go | 3 +- cmd/restic/exclude.go | 160 +---------------- cmd/restic/integration_filter_pattern_test.go | 25 +-- internal/archiver/exclude.go | 15 +- internal/filter/exclude.go | 162 ++++++++++++++++++ .../filter}/exclude_test.go | 6 +- {cmd/restic => internal/filter}/include.go | 37 ++-- .../filter}/include_test.go | 6 +- 11 files changed, 233 insertions(+), 216 deletions(-) create mode 100644 internal/filter/exclude.go rename {cmd/restic => internal/filter}/exclude_test.go (92%) rename {cmd/restic => internal/filter}/include.go (65%) rename {cmd/restic => internal/filter}/include_test.go (92%) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 8d72a27b090..43ef29ba255 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -20,6 +20,7 @@ import ( "github.com/restic/restic/internal/archiver" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -66,7 +67,7 @@ Exit status is 12 if the password is incorrect. // BackupOptions bundles all options for the backup command. type BackupOptions struct { - excludePatternOptions + filter.ExcludePatternOptions Parent string GroupBy restic.SnapshotGroupByOptions @@ -108,7 +109,7 @@ func init() { f.VarP(&backupOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')") f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the source files/directories (overrides the "parent" flag)`) - backupOptions.excludePatternOptions.Add(f) + backupOptions.ExcludePatternOptions.Add(f) f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems, don't cross filesystem boundaries and subvolumes") f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes `filename[:header]`, exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)") @@ -297,7 +298,7 @@ func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error { // collectRejectByNameFuncs returns a list of all functions which may reject data // from being saved in a snapshot based on path only -func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []RejectByNameFunc, err error) { +func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []archiver.RejectByNameFunc, err error) { // exclude restic cache if repo.Cache != nil { f, err := rejectResticCache(repo) @@ -308,11 +309,13 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) ( fs = append(fs, f) } - fsPatterns, err := opts.excludePatternOptions.CollectPatterns() + fsPatterns, err := opts.ExcludePatternOptions.CollectPatterns(Warnf) if err != nil { return nil, err } - fs = append(fs, fsPatterns...) + for _, pat := range fsPatterns { + fs = append(fs, archiver.RejectByNameFunc(pat)) + } return fs, nil } diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index f20359dc0a2..82dd408a854 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -7,6 +7,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/restorer" "github.com/restic/restic/internal/ui" @@ -49,8 +50,8 @@ Exit status is 12 if the password is incorrect. // RestoreOptions collects all options for the restore command. type RestoreOptions struct { - excludePatternOptions - includePatternOptions + filter.ExcludePatternOptions + filter.IncludePatternOptions Target string restic.SnapshotFilter DryRun bool @@ -68,8 +69,8 @@ func init() { flags := cmdRestore.Flags() flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to") - restoreOptions.excludePatternOptions.Add(flags) - restoreOptions.includePatternOptions.Add(flags) + restoreOptions.ExcludePatternOptions.Add(flags) + restoreOptions.IncludePatternOptions.Add(flags) initSingleSnapshotFilter(flags, &restoreOptions.SnapshotFilter) flags.BoolVar(&restoreOptions.DryRun, "dry-run", false, "do not write any data, just show what would be done") @@ -82,12 +83,12 @@ func init() { func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { - excludePatternFns, err := opts.excludePatternOptions.CollectPatterns() + excludePatternFns, err := opts.ExcludePatternOptions.CollectPatterns(Warnf) if err != nil { return err } - includePatternFns, err := opts.includePatternOptions.CollectPatterns() + includePatternFns, err := opts.IncludePatternOptions.CollectPatterns(Warnf) if err != nil { return err } diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index fc9da5b6092..a9f664110da 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -10,6 +10,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/walker" @@ -88,7 +89,7 @@ type RewriteOptions struct { Metadata snapshotMetadataArgs restic.SnapshotFilter - excludePatternOptions + filter.ExcludePatternOptions } var rewriteOptions RewriteOptions @@ -103,7 +104,7 @@ func init() { f.StringVar(&rewriteOptions.Metadata.Time, "new-time", "", "replace time of the backup") initMultiSnapshotFilter(f, &rewriteOptions.SnapshotFilter, true) - rewriteOptions.excludePatternOptions.Add(f) + rewriteOptions.ExcludePatternOptions.Add(f) } type rewriteFilterFunc func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) @@ -113,7 +114,7 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti return false, errors.Errorf("snapshot %v has nil tree", sn.ID().Str()) } - rejectByNameFuncs, err := opts.excludePatternOptions.CollectPatterns() + rejectByNameFuncs, err := opts.ExcludePatternOptions.CollectPatterns(Warnf) if err != nil { return false, err } @@ -263,7 +264,7 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r } func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, args []string) error { - if opts.excludePatternOptions.Empty() && opts.Metadata.empty() { + if opts.ExcludePatternOptions.Empty() && opts.Metadata.empty() { return errors.Fatal("Nothing to do: no excludes provided and no new metadata provided") } diff --git a/cmd/restic/cmd_rewrite_integration_test.go b/cmd/restic/cmd_rewrite_integration_test.go index 781266184aa..6471d49ba21 100644 --- a/cmd/restic/cmd_rewrite_integration_test.go +++ b/cmd/restic/cmd_rewrite_integration_test.go @@ -5,6 +5,7 @@ import ( "path/filepath" "testing" + "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui" @@ -12,7 +13,7 @@ import ( func testRunRewriteExclude(t testing.TB, gopts GlobalOptions, excludes []string, forget bool, metadata snapshotMetadataArgs) { opts := RewriteOptions{ - excludePatternOptions: excludePatternOptions{ + ExcludePatternOptions: filter.ExcludePatternOptions{ Excludes: excludes, }, Forget: forget, diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go index a37f9c68ef7..99d1128a9e1 100644 --- a/cmd/restic/exclude.go +++ b/cmd/restic/exclude.go @@ -1,60 +1,16 @@ package main import ( - "bufio" - "bytes" - "fmt" - "os" - "strings" - + "github.com/restic/restic/internal/archiver" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/textfile" - "github.com/spf13/pflag" ) -// RejectByNameFunc is a function that takes a filename of a -// file that would be included in the backup. The function returns true if it -// should be excluded (rejected) from the backup. -type RejectByNameFunc func(path string) bool - -// rejectByPattern returns a RejectByNameFunc which rejects files that match -// one of the patterns. -func rejectByPattern(patterns []string) RejectByNameFunc { - parsedPatterns := filter.ParsePatterns(patterns) - return func(item string) bool { - matched, err := filter.List(parsedPatterns, item) - if err != nil { - Warnf("error for exclude pattern: %v", err) - } - - if matched { - debug.Log("path %q excluded by an exclude pattern", item) - return true - } - - return false - } -} - -// Same as `rejectByPattern` but case insensitive. -func rejectByInsensitivePattern(patterns []string) RejectByNameFunc { - for index, path := range patterns { - patterns[index] = strings.ToLower(path) - } - - rejFunc := rejectByPattern(patterns) - return func(item string) bool { - return rejFunc(strings.ToLower(item)) - } -} - // rejectResticCache returns a RejectByNameFunc that rejects the restic cache // directory (if set). -func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) { +func rejectResticCache(repo *repository.Repository) (archiver.RejectByNameFunc, error) { if repo.Cache == nil { return func(string) bool { return false @@ -75,115 +31,3 @@ func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) { return false }, nil } - -// readPatternsFromFiles reads all files and returns the list of -// patterns. For each line, leading and trailing white space is removed -// and comment lines are ignored. For each remaining pattern, environment -// variables are resolved. For adding a literal dollar sign ($), write $$ to -// the file. -func readPatternsFromFiles(files []string) ([]string, error) { - getenvOrDollar := func(s string) string { - if s == "$" { - return "$" - } - return os.Getenv(s) - } - - var patterns []string - for _, filename := range files { - err := func() (err error) { - data, err := textfile.Read(filename) - if err != nil { - return err - } - - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - - // ignore empty lines - if line == "" { - continue - } - - // strip comments - if strings.HasPrefix(line, "#") { - continue - } - - line = os.Expand(line, getenvOrDollar) - patterns = append(patterns, line) - } - return scanner.Err() - }() - if err != nil { - return nil, fmt.Errorf("failed to read patterns from file %q: %w", filename, err) - } - } - return patterns, nil -} - -type excludePatternOptions struct { - Excludes []string - InsensitiveExcludes []string - ExcludeFiles []string - InsensitiveExcludeFiles []string -} - -func (opts *excludePatternOptions) Add(f *pflag.FlagSet) { - f.StringArrayVarP(&opts.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)") - f.StringArrayVar(&opts.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames") - f.StringArrayVar(&opts.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)") - f.StringArrayVar(&opts.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns") -} - -func (opts *excludePatternOptions) Empty() bool { - return len(opts.Excludes) == 0 && len(opts.InsensitiveExcludes) == 0 && len(opts.ExcludeFiles) == 0 && len(opts.InsensitiveExcludeFiles) == 0 -} - -func (opts excludePatternOptions) CollectPatterns() ([]RejectByNameFunc, error) { - var fs []RejectByNameFunc - // add patterns from file - if len(opts.ExcludeFiles) > 0 { - excludePatterns, err := readPatternsFromFiles(opts.ExcludeFiles) - if err != nil { - return nil, err - } - - if err := filter.ValidatePatterns(excludePatterns); err != nil { - return nil, errors.Fatalf("--exclude-file: %s", err) - } - - opts.Excludes = append(opts.Excludes, excludePatterns...) - } - - if len(opts.InsensitiveExcludeFiles) > 0 { - excludes, err := readPatternsFromFiles(opts.InsensitiveExcludeFiles) - if err != nil { - return nil, err - } - - if err := filter.ValidatePatterns(excludes); err != nil { - return nil, errors.Fatalf("--iexclude-file: %s", err) - } - - opts.InsensitiveExcludes = append(opts.InsensitiveExcludes, excludes...) - } - - if len(opts.InsensitiveExcludes) > 0 { - if err := filter.ValidatePatterns(opts.InsensitiveExcludes); err != nil { - return nil, errors.Fatalf("--iexclude: %s", err) - } - - fs = append(fs, rejectByInsensitivePattern(opts.InsensitiveExcludes)) - } - - if len(opts.Excludes) > 0 { - if err := filter.ValidatePatterns(opts.Excludes); err != nil { - return nil, errors.Fatalf("--exclude: %s", err) - } - - fs = append(fs, rejectByPattern(opts.Excludes)) - } - return fs, nil -} diff --git a/cmd/restic/integration_filter_pattern_test.go b/cmd/restic/integration_filter_pattern_test.go index dccbcc0a0b5..46badbe4fa2 100644 --- a/cmd/restic/integration_filter_pattern_test.go +++ b/cmd/restic/integration_filter_pattern_test.go @@ -5,6 +5,7 @@ import ( "path/filepath" "testing" + "github.com/restic/restic/internal/filter" rtest "github.com/restic/restic/internal/test" ) @@ -17,14 +18,14 @@ func TestBackupFailsWhenUsingInvalidPatterns(t *testing.T) { var err error // Test --exclude - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{excludePatternOptions: excludePatternOptions{Excludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) + err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{ExcludePatternOptions: filter.ExcludePatternOptions{Excludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --iexclude - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) + err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{ExcludePatternOptions: filter.ExcludePatternOptions{InsensitiveExcludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided: *[._]log[.-][0-9] @@ -47,14 +48,14 @@ func TestBackupFailsWhenUsingInvalidPatternsFromFile(t *testing.T) { var err error // Test --exclude-file: - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{excludePatternOptions: excludePatternOptions{ExcludeFiles: []string{excludeFile}}}, env.gopts) + err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{ExcludePatternOptions: filter.ExcludePatternOptions{ExcludeFiles: []string{excludeFile}}}, env.gopts) rtest.Equals(t, `Fatal: --exclude-file: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --iexclude-file - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludeFiles: []string{excludeFile}}}, env.gopts) + err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{ExcludePatternOptions: filter.ExcludePatternOptions{InsensitiveExcludeFiles: []string{excludeFile}}}, env.gopts) rtest.Equals(t, `Fatal: --iexclude-file: invalid pattern(s) provided: *[._]log[.-][0-9] @@ -70,28 +71,28 @@ func TestRestoreFailsWhenUsingInvalidPatterns(t *testing.T) { var err error // Test --exclude - err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{Excludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{ExcludePatternOptions: filter.ExcludePatternOptions{Excludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --iexclude - err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{ExcludePatternOptions: filter.ExcludePatternOptions{InsensitiveExcludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --include - err = testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{Includes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{IncludePatternOptions: filter.IncludePatternOptions{Includes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --include: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) // Test --iinclude - err = testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{InsensitiveIncludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{IncludePatternOptions: filter.IncludePatternOptions{InsensitiveIncludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) rtest.Equals(t, `Fatal: --iinclude: invalid pattern(s) provided: *[._]log[.-][0-9] @@ -111,22 +112,22 @@ func TestRestoreFailsWhenUsingInvalidPatternsFromFile(t *testing.T) { t.Fatalf("Could not write include file: %v", fileErr) } - err := testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{IncludeFiles: []string{patternsFile}}}, env.gopts) + err := testRunRestoreAssumeFailure("latest", RestoreOptions{IncludePatternOptions: filter.IncludePatternOptions{IncludeFiles: []string{patternsFile}}}, env.gopts) rtest.Equals(t, `Fatal: --include-file: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) - err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{ExcludeFiles: []string{patternsFile}}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{ExcludePatternOptions: filter.ExcludePatternOptions{ExcludeFiles: []string{patternsFile}}}, env.gopts) rtest.Equals(t, `Fatal: --exclude-file: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) - err = testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{InsensitiveIncludeFiles: []string{patternsFile}}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{IncludePatternOptions: filter.IncludePatternOptions{InsensitiveIncludeFiles: []string{patternsFile}}}, env.gopts) rtest.Equals(t, `Fatal: --iinclude-file: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) - err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludeFiles: []string{patternsFile}}}, env.gopts) + err = testRunRestoreAssumeFailure("latest", RestoreOptions{ExcludePatternOptions: filter.ExcludePatternOptions{InsensitiveExcludeFiles: []string{patternsFile}}}, env.gopts) rtest.Equals(t, `Fatal: --iexclude-file: invalid pattern(s) provided: *[._]log[.-][0-9] !*[._]log[.-][0-9]`, err.Error()) diff --git a/internal/archiver/exclude.go b/internal/archiver/exclude.go index f4444812c75..62e4ea17e77 100644 --- a/internal/archiver/exclude.go +++ b/internal/archiver/exclude.go @@ -14,6 +14,16 @@ import ( "github.com/restic/restic/internal/ui" ) +// RejectByNameFunc is a function that takes a filename of a +// file that would be included in the backup. The function returns true if it +// should be excluded (rejected) from the backup. +type RejectByNameFunc func(path string) bool + +// RejectFunc is a function that takes a filename and os.FileInfo of a +// file that would be included in the backup. The function returns true if it +// should be excluded (rejected) from the backup. +type RejectFunc func(path string, fi os.FileInfo, fs fs.FS) bool + type rejectionCache struct { m map[string]bool mtx sync.Mutex @@ -49,11 +59,6 @@ func (rc *rejectionCache) Store(dir string, rejected bool) { rc.m[dir] = rejected } -// RejectFunc is a function that takes a filename and os.FileInfo of a -// file that would be included in the backup. The function returns true if it -// should be excluded (rejected) from the backup. -type RejectFunc func(path string, fi os.FileInfo, fs fs.FS) bool - // RejectIfPresent returns a RejectByNameFunc which itself returns whether a path // should be excluded. The RejectByNameFunc considers a file to be excluded when // it resides in a directory with an exclusion file, that is specified by diff --git a/internal/filter/exclude.go b/internal/filter/exclude.go new file mode 100644 index 00000000000..48ecdfddf77 --- /dev/null +++ b/internal/filter/exclude.go @@ -0,0 +1,162 @@ +package filter + +import ( + "bufio" + "bytes" + "fmt" + "os" + "strings" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/textfile" + "github.com/spf13/pflag" +) + +// RejectByNameFunc is a function that takes a filename of a +// file that would be included in the backup. The function returns true if it +// should be excluded (rejected) from the backup. +type RejectByNameFunc func(path string) bool + +// RejectByPattern returns a RejectByNameFunc which rejects files that match +// one of the patterns. +func RejectByPattern(patterns []string, warnf func(msg string, args ...interface{})) RejectByNameFunc { + parsedPatterns := ParsePatterns(patterns) + return func(item string) bool { + matched, err := List(parsedPatterns, item) + if err != nil { + warnf("error for exclude pattern: %v", err) + } + + if matched { + debug.Log("path %q excluded by an exclude pattern", item) + return true + } + + return false + } +} + +// RejectByInsensitivePattern is like RejectByPattern but case insensitive. +func RejectByInsensitivePattern(patterns []string, warnf func(msg string, args ...interface{})) RejectByNameFunc { + for index, path := range patterns { + patterns[index] = strings.ToLower(path) + } + + rejFunc := RejectByPattern(patterns, warnf) + return func(item string) bool { + return rejFunc(strings.ToLower(item)) + } +} + +// readPatternsFromFiles reads all files and returns the list of +// patterns. For each line, leading and trailing white space is removed +// and comment lines are ignored. For each remaining pattern, environment +// variables are resolved. For adding a literal dollar sign ($), write $$ to +// the file. +func readPatternsFromFiles(files []string) ([]string, error) { + getenvOrDollar := func(s string) string { + if s == "$" { + return "$" + } + return os.Getenv(s) + } + + var patterns []string + for _, filename := range files { + err := func() (err error) { + data, err := textfile.Read(filename) + if err != nil { + return err + } + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + // ignore empty lines + if line == "" { + continue + } + + // strip comments + if strings.HasPrefix(line, "#") { + continue + } + + line = os.Expand(line, getenvOrDollar) + patterns = append(patterns, line) + } + return scanner.Err() + }() + if err != nil { + return nil, fmt.Errorf("failed to read patterns from file %q: %w", filename, err) + } + } + return patterns, nil +} + +type ExcludePatternOptions struct { + Excludes []string + InsensitiveExcludes []string + ExcludeFiles []string + InsensitiveExcludeFiles []string +} + +func (opts *ExcludePatternOptions) Add(f *pflag.FlagSet) { + f.StringArrayVarP(&opts.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)") + f.StringArrayVar(&opts.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames") + f.StringArrayVar(&opts.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)") + f.StringArrayVar(&opts.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns") +} + +func (opts *ExcludePatternOptions) Empty() bool { + return len(opts.Excludes) == 0 && len(opts.InsensitiveExcludes) == 0 && len(opts.ExcludeFiles) == 0 && len(opts.InsensitiveExcludeFiles) == 0 +} + +func (opts ExcludePatternOptions) CollectPatterns(warnf func(msg string, args ...interface{})) ([]RejectByNameFunc, error) { + var fs []RejectByNameFunc + // add patterns from file + if len(opts.ExcludeFiles) > 0 { + excludePatterns, err := readPatternsFromFiles(opts.ExcludeFiles) + if err != nil { + return nil, err + } + + if err := ValidatePatterns(excludePatterns); err != nil { + return nil, errors.Fatalf("--exclude-file: %s", err) + } + + opts.Excludes = append(opts.Excludes, excludePatterns...) + } + + if len(opts.InsensitiveExcludeFiles) > 0 { + excludes, err := readPatternsFromFiles(opts.InsensitiveExcludeFiles) + if err != nil { + return nil, err + } + + if err := ValidatePatterns(excludes); err != nil { + return nil, errors.Fatalf("--iexclude-file: %s", err) + } + + opts.InsensitiveExcludes = append(opts.InsensitiveExcludes, excludes...) + } + + if len(opts.InsensitiveExcludes) > 0 { + if err := ValidatePatterns(opts.InsensitiveExcludes); err != nil { + return nil, errors.Fatalf("--iexclude: %s", err) + } + + fs = append(fs, RejectByInsensitivePattern(opts.InsensitiveExcludes, warnf)) + } + + if len(opts.Excludes) > 0 { + if err := ValidatePatterns(opts.Excludes); err != nil { + return nil, errors.Fatalf("--exclude: %s", err) + } + + fs = append(fs, RejectByPattern(opts.Excludes, warnf)) + } + return fs, nil +} diff --git a/cmd/restic/exclude_test.go b/internal/filter/exclude_test.go similarity index 92% rename from cmd/restic/exclude_test.go rename to internal/filter/exclude_test.go index 177a81df22a..738fb216d18 100644 --- a/cmd/restic/exclude_test.go +++ b/internal/filter/exclude_test.go @@ -1,4 +1,4 @@ -package main +package filter import ( "testing" @@ -21,7 +21,7 @@ func TestRejectByPattern(t *testing.T) { for _, tc := range tests { t.Run("", func(t *testing.T) { - reject := rejectByPattern(patterns) + reject := RejectByPattern(patterns, nil) res := reject(tc.filename) if res != tc.reject { t.Fatalf("wrong result for filename %v: want %v, got %v", @@ -48,7 +48,7 @@ func TestRejectByInsensitivePattern(t *testing.T) { for _, tc := range tests { t.Run("", func(t *testing.T) { - reject := rejectByInsensitivePattern(patterns) + reject := RejectByInsensitivePattern(patterns, nil) res := reject(tc.filename) if res != tc.reject { t.Fatalf("wrong result for filename %v: want %v, got %v", diff --git a/cmd/restic/include.go b/internal/filter/include.go similarity index 65% rename from cmd/restic/include.go rename to internal/filter/include.go index 514a2401641..87d5f12079b 100644 --- a/cmd/restic/include.go +++ b/internal/filter/include.go @@ -1,10 +1,9 @@ -package main +package filter import ( "strings" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/filter" "github.com/spf13/pflag" ) @@ -12,21 +11,21 @@ import ( // in the restore process and returns whether it should be included. type IncludeByNameFunc func(item string) (matched bool, childMayMatch bool) -type includePatternOptions struct { +type IncludePatternOptions struct { Includes []string InsensitiveIncludes []string IncludeFiles []string InsensitiveIncludeFiles []string } -func (opts *includePatternOptions) Add(f *pflag.FlagSet) { +func (opts *IncludePatternOptions) Add(f *pflag.FlagSet) { f.StringArrayVarP(&opts.Includes, "include", "i", nil, "include a `pattern` (can be specified multiple times)") f.StringArrayVar(&opts.InsensitiveIncludes, "iinclude", nil, "same as --include `pattern` but ignores the casing of filenames") f.StringArrayVar(&opts.IncludeFiles, "include-file", nil, "read include patterns from a `file` (can be specified multiple times)") f.StringArrayVar(&opts.InsensitiveIncludeFiles, "iinclude-file", nil, "same as --include-file but ignores casing of `file`names in patterns") } -func (opts includePatternOptions) CollectPatterns() ([]IncludeByNameFunc, error) { +func (opts IncludePatternOptions) CollectPatterns(warnf func(msg string, args ...interface{})) ([]IncludeByNameFunc, error) { var fs []IncludeByNameFunc if len(opts.IncludeFiles) > 0 { includePatterns, err := readPatternsFromFiles(opts.IncludeFiles) @@ -34,7 +33,7 @@ func (opts includePatternOptions) CollectPatterns() ([]IncludeByNameFunc, error) return nil, err } - if err := filter.ValidatePatterns(includePatterns); err != nil { + if err := ValidatePatterns(includePatterns); err != nil { return nil, errors.Fatalf("--include-file: %s", err) } @@ -47,7 +46,7 @@ func (opts includePatternOptions) CollectPatterns() ([]IncludeByNameFunc, error) return nil, err } - if err := filter.ValidatePatterns(includePatterns); err != nil { + if err := ValidatePatterns(includePatterns); err != nil { return nil, errors.Fatalf("--iinclude-file: %s", err) } @@ -55,45 +54,45 @@ func (opts includePatternOptions) CollectPatterns() ([]IncludeByNameFunc, error) } if len(opts.InsensitiveIncludes) > 0 { - if err := filter.ValidatePatterns(opts.InsensitiveIncludes); err != nil { + if err := ValidatePatterns(opts.InsensitiveIncludes); err != nil { return nil, errors.Fatalf("--iinclude: %s", err) } - fs = append(fs, includeByInsensitivePattern(opts.InsensitiveIncludes)) + fs = append(fs, IncludeByInsensitivePattern(opts.InsensitiveIncludes, warnf)) } if len(opts.Includes) > 0 { - if err := filter.ValidatePatterns(opts.Includes); err != nil { + if err := ValidatePatterns(opts.Includes); err != nil { return nil, errors.Fatalf("--include: %s", err) } - fs = append(fs, includeByPattern(opts.Includes)) + fs = append(fs, IncludeByPattern(opts.Includes, warnf)) } return fs, nil } -// includeByPattern returns a IncludeByNameFunc which includes files that match +// IncludeByPattern returns a IncludeByNameFunc which includes files that match // one of the patterns. -func includeByPattern(patterns []string) IncludeByNameFunc { - parsedPatterns := filter.ParsePatterns(patterns) +func IncludeByPattern(patterns []string, warnf func(msg string, args ...interface{})) IncludeByNameFunc { + parsedPatterns := ParsePatterns(patterns) return func(item string) (matched bool, childMayMatch bool) { - matched, childMayMatch, err := filter.ListWithChild(parsedPatterns, item) + matched, childMayMatch, err := ListWithChild(parsedPatterns, item) if err != nil { - Warnf("error for include pattern: %v", err) + warnf("error for include pattern: %v", err) } return matched, childMayMatch } } -// includeByInsensitivePattern returns a IncludeByNameFunc which includes files that match +// IncludeByInsensitivePattern returns a IncludeByNameFunc which includes files that match // one of the patterns, ignoring the casing of the filenames. -func includeByInsensitivePattern(patterns []string) IncludeByNameFunc { +func IncludeByInsensitivePattern(patterns []string, warnf func(msg string, args ...interface{})) IncludeByNameFunc { for index, path := range patterns { patterns[index] = strings.ToLower(path) } - includeFunc := includeByPattern(patterns) + includeFunc := IncludeByPattern(patterns, warnf) return func(item string) (matched bool, childMayMatch bool) { return includeFunc(strings.ToLower(item)) } diff --git a/cmd/restic/include_test.go b/internal/filter/include_test.go similarity index 92% rename from cmd/restic/include_test.go rename to internal/filter/include_test.go index 751bfbb7685..2f474622cdb 100644 --- a/cmd/restic/include_test.go +++ b/internal/filter/include_test.go @@ -1,4 +1,4 @@ -package main +package filter import ( "testing" @@ -21,7 +21,7 @@ func TestIncludeByPattern(t *testing.T) { for _, tc := range tests { t.Run(tc.filename, func(t *testing.T) { - includeFunc := includeByPattern(patterns) + includeFunc := IncludeByPattern(patterns, nil) matched, _ := includeFunc(tc.filename) if matched != tc.include { t.Fatalf("wrong result for filename %v: want %v, got %v", @@ -48,7 +48,7 @@ func TestIncludeByInsensitivePattern(t *testing.T) { for _, tc := range tests { t.Run(tc.filename, func(t *testing.T) { - includeFunc := includeByInsensitivePattern(patterns) + includeFunc := IncludeByInsensitivePattern(patterns, nil) matched, _ := includeFunc(tc.filename) if matched != tc.include { t.Fatalf("wrong result for filename %v: want %v, got %v", From 6fd5d5f2d5f142a0a36a21d8e6ca6421b54f632b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 14:10:57 +0200 Subject: [PATCH 648/893] archiver: move helper functions to combine rejects --- cmd/restic/cmd_backup.go | 20 ++------------------ internal/archiver/exclude.go | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 43ef29ba255..eaca150d9f6 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -529,21 +529,11 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) - err = repo.LoadIndex(ctx, bar) if err != nil { return err } - selectByNameFilter := func(item string) bool { - for _, reject := range rejectByNameFuncs { - if reject(item) { - return false - } - } - return true - } - var targetFS fs.FS = fs.Local{} if runtime.GOOS == "windows" && opts.UseFsSnapshot { if err = fs.HasSufficientPrivilegesForVSS(); err != nil { @@ -592,14 +582,8 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter return err } - selectFilter := func(item string, fi os.FileInfo, fs fs.FS) bool { - for _, reject := range rejectFuncs { - if reject(item, fi, fs) { - return false - } - } - return true - } + selectByNameFilter := archiver.CombineRejectByNames(rejectByNameFuncs) + selectFilter := archiver.CombineRejects(rejectFuncs) wg, wgCtx := errgroup.WithContext(ctx) cancelCtx, cancel := context.WithCancel(wgCtx) diff --git a/internal/archiver/exclude.go b/internal/archiver/exclude.go index 62e4ea17e77..280322f3c1b 100644 --- a/internal/archiver/exclude.go +++ b/internal/archiver/exclude.go @@ -24,6 +24,28 @@ type RejectByNameFunc func(path string) bool // should be excluded (rejected) from the backup. type RejectFunc func(path string, fi os.FileInfo, fs fs.FS) bool +func CombineRejectByNames(funcs []RejectByNameFunc) SelectByNameFunc { + return func(item string) bool { + for _, reject := range funcs { + if reject(item) { + return false + } + } + return true + } +} + +func CombineRejects(funcs []RejectFunc) SelectFunc { + return func(item string, fi os.FileInfo, fs fs.FS) bool { + for _, reject := range funcs { + if reject(item, fi, fs) { + return false + } + } + return true + } +} + type rejectionCache struct { m map[string]bool mtx sync.Mutex From 70fbad662391334c285bf7b8623610face9df7ee Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 14:25:35 +0200 Subject: [PATCH 649/893] archiver: minimize imports --- cmd/restic/cmd_backup.go | 8 +++++++- internal/archiver/exclude.go | 10 ++-------- internal/archiver/exclude_test.go | 5 +---- internal/archiver/scanner.go | 3 +-- 4 files changed, 11 insertions(+), 15 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index eaca150d9f6..107e8bbe0a2 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -25,6 +25,7 @@ import ( "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/textfile" + "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/backup" "github.com/restic/restic/internal/ui/termstatus" ) @@ -333,7 +334,12 @@ func collectRejectFuncs(opts BackupOptions, targets []string, fs fs.FS) (funcs [ } if len(opts.ExcludeLargerThan) != 0 && !opts.Stdin && !opts.StdinCommand { - f, err := archiver.RejectBySize(opts.ExcludeLargerThan) + maxSize, err := ui.ParseBytes(opts.ExcludeLargerThan) + if err != nil { + return nil, err + } + + f, err := archiver.RejectBySize(maxSize) if err != nil { return nil, err } diff --git a/internal/archiver/exclude.go b/internal/archiver/exclude.go index 280322f3c1b..1e855fc3a37 100644 --- a/internal/archiver/exclude.go +++ b/internal/archiver/exclude.go @@ -11,7 +11,6 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" - "github.com/restic/restic/internal/ui" ) // RejectByNameFunc is a function that takes a filename of a @@ -139,7 +138,7 @@ func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache, func isDirExcludedByFile(dir, tagFilename, header string, fs fs.FS, warnf func(msg string, args ...interface{})) bool { tf := fs.Join(dir, tagFilename) _, err := fs.Lstat(tf) - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return false } if err != nil { @@ -315,12 +314,7 @@ func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { }, nil } -func RejectBySize(maxSizeStr string) (RejectFunc, error) { - maxSize, err := ui.ParseBytes(maxSizeStr) - if err != nil { - return nil, err - } - +func RejectBySize(maxSize int64) (RejectFunc, error) { return func(item string, fi os.FileInfo, _ fs.FS) bool { // directory will be ignored if fi.IsDir() { diff --git a/internal/archiver/exclude_test.go b/internal/archiver/exclude_test.go index b9f1f8cdd5c..7eb24b08b6e 100644 --- a/internal/archiver/exclude_test.go +++ b/internal/archiver/exclude_test.go @@ -139,9 +139,6 @@ func TestMultipleIsExcludedByFile(t *testing.T) { func TestIsExcludedByFileSize(t *testing.T) { tempDir := test.TempDir(t) - // Max size of file is set to be 1k - maxSizeStr := "1k" - // Create some files in a temporary directory. // Files in UPPERCASE will be used as exclusion triggers later on. // We will test the inclusion later, so we add the expected value as @@ -185,7 +182,7 @@ func TestIsExcludedByFileSize(t *testing.T) { test.OKs(t, errs) // see if anything went wrong during the creation // create rejection function - sizeExclude, _ := RejectBySize(maxSizeStr) + sizeExclude, _ := RejectBySize(1024) // To mock the archiver scanning walk, we create filepath.WalkFn // that tests against the two rejection functions and stores diff --git a/internal/archiver/scanner.go b/internal/archiver/scanner.go index cb74a31d6bf..debd09aa32c 100644 --- a/internal/archiver/scanner.go +++ b/internal/archiver/scanner.go @@ -3,7 +3,6 @@ package archiver import ( "context" "os" - "path/filepath" "sort" "github.com/restic/restic/internal/debug" @@ -131,7 +130,7 @@ func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (Sca sort.Strings(names) for _, name := range names { - stats, err = s.scan(ctx, stats, filepath.Join(target, name)) + stats, err = s.scan(ctx, stats, s.FS.Join(target, name)) if err != nil { return stats, err } From e79dca644e03b0be946a0b2e2b78b64a303ef22e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 14:35:10 +0200 Subject: [PATCH 650/893] fs: unexport DeviceID --- internal/fs/deviceid_unix.go | 4 ++-- internal/fs/deviceid_windows.go | 4 ++-- internal/fs/fs_local.go | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/fs/deviceid_unix.go b/internal/fs/deviceid_unix.go index c366221ab29..4d5593335ee 100644 --- a/internal/fs/deviceid_unix.go +++ b/internal/fs/deviceid_unix.go @@ -10,9 +10,9 @@ import ( "github.com/restic/restic/internal/errors" ) -// DeviceID extracts the device ID from an os.FileInfo object by casting it +// deviceID extracts the device ID from an os.FileInfo object by casting it // to syscall.Stat_t -func DeviceID(fi os.FileInfo) (deviceID uint64, err error) { +func deviceID(fi os.FileInfo) (deviceID uint64, err error) { if fi == nil { return 0, errors.New("unable to determine device: fi is nil") } diff --git a/internal/fs/deviceid_windows.go b/internal/fs/deviceid_windows.go index 42355817d5f..bfb22dc9a08 100644 --- a/internal/fs/deviceid_windows.go +++ b/internal/fs/deviceid_windows.go @@ -9,8 +9,8 @@ import ( "github.com/restic/restic/internal/errors" ) -// DeviceID extracts the device ID from an os.FileInfo object by casting it +// deviceID extracts the device ID from an os.FileInfo object by casting it // to syscall.Stat_t -func DeviceID(fi os.FileInfo) (deviceID uint64, err error) { +func deviceID(_ os.FileInfo) (deviceID uint64, err error) { return 0, errors.New("Device IDs are not supported on Windows") } diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go index 33d83bf6344..019069a55b6 100644 --- a/internal/fs/fs_local.go +++ b/internal/fs/fs_local.go @@ -48,8 +48,8 @@ func (fs Local) Lstat(name string) (os.FileInfo, error) { // DeviceID extracts the DeviceID from the given FileInfo. If the fs does // not support a DeviceID, it returns an error instead -func (fs Local) DeviceID(fi os.FileInfo) (deviceID uint64, err error) { - return DeviceID(fi) +func (fs Local) DeviceID(fi os.FileInfo) (id uint64, err error) { + return deviceID(fi) } // Join joins any number of path elements into a single path, adding a From 7bb92dc7bda46201fe84f4b5a67b670ee5544083 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 14:35:40 +0200 Subject: [PATCH 651/893] archiver: use ExtendedStat from FS interface With this change, NodeFromFileInfo is the last function that bypasses the FS interface in the archiver. --- internal/archiver/archiver.go | 4 ++-- internal/archiver/archiver_test.go | 11 ++++++----- internal/fs/fs_local.go | 5 +++++ internal/fs/fs_reader.go | 6 ++++++ internal/fs/interface.go | 1 + 5 files changed, 20 insertions(+), 7 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index eab65bb5fee..a783f6c7f7d 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -459,7 +459,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous // check if the file has not changed before performing a fopen operation (more expensive, specially // in network filesystems) - if previous != nil && !fileChanged(fi, previous, arch.ChangeIgnoreFlags) { + if previous != nil && !fileChanged(arch.FS, fi, previous, arch.ChangeIgnoreFlags) { if arch.allBlobsPresent(previous) { debug.Log("%v hasn't changed, using old list of blobs", target) arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start)) @@ -579,7 +579,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous // fileChanged tries to detect whether a file's content has changed compared // to the contents of node, which describes the same path in the parent backup. // It should only be run for regular files. -func fileChanged(fi os.FileInfo, node *restic.Node, ignoreFlags uint) bool { +func fileChanged(fs fs.FS, fi os.FileInfo, node *restic.Node, ignoreFlags uint) bool { switch { case node == nil: return true diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index b5645218284..962fd548108 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -686,10 +686,11 @@ func TestFileChanged(t *testing.T) { } save(t, filename, content) + fs := &fs.Local{} fiBefore := lstat(t, filename) node := nodeFromFI(t, filename, fiBefore) - if fileChanged(fiBefore, node, 0) { + if fileChanged(fs, fiBefore, node, 0) { t.Fatalf("unchanged file detected as changed") } @@ -699,12 +700,12 @@ func TestFileChanged(t *testing.T) { if test.SameFile { // file should be detected as unchanged - if fileChanged(fiAfter, node, test.ChangeIgnore) { + if fileChanged(fs, fiAfter, node, test.ChangeIgnore) { t.Fatalf("unmodified file detected as changed") } } else { // file should be detected as changed - if !fileChanged(fiAfter, node, test.ChangeIgnore) && !test.SameFile { + if !fileChanged(fs, fiAfter, node, test.ChangeIgnore) && !test.SameFile { t.Fatalf("modified file detected as unchanged") } } @@ -721,7 +722,7 @@ func TestFilChangedSpecialCases(t *testing.T) { t.Run("nil-node", func(t *testing.T) { fi := lstat(t, filename) - if !fileChanged(fi, nil, 0) { + if !fileChanged(&fs.Local{}, fi, nil, 0) { t.Fatal("nil node detected as unchanged") } }) @@ -730,7 +731,7 @@ func TestFilChangedSpecialCases(t *testing.T) { fi := lstat(t, filename) node := nodeFromFI(t, filename, fi) node.Type = "symlink" - if !fileChanged(fi, node, 0) { + if !fileChanged(&fs.Local{}, fi, node, 0) { t.Fatal("node with changed type detected as unchanged") } }) diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go index 019069a55b6..034d1aa2477 100644 --- a/internal/fs/fs_local.go +++ b/internal/fs/fs_local.go @@ -52,6 +52,11 @@ func (fs Local) DeviceID(fi os.FileInfo) (id uint64, err error) { return deviceID(fi) } +// ExtendedStat converts the give FileInfo into ExtendedFileInfo. +func (fs Local) ExtendedStat(fi os.FileInfo) ExtendedFileInfo { + return ExtendedStat(fi) +} + // Join joins any number of path elements into a single path, adding a // Separator if necessary. Join calls Clean on the result; in particular, all // empty strings are ignored. On Windows, the result is a UNC path if and only diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index b3371a8c967..84a79168e60 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -126,6 +126,12 @@ func (fs *Reader) DeviceID(_ os.FileInfo) (deviceID uint64, err error) { return 0, errors.New("Device IDs are not supported") } +func (fs *Reader) ExtendedStat(fi os.FileInfo) ExtendedFileInfo { + return ExtendedFileInfo{ + FileInfo: fi, + } +} + // Join joins any number of path elements into a single path, adding a // Separator if necessary. Join calls Clean on the result; in particular, all // empty strings are ignored. On Windows, the result is a UNC path if and only diff --git a/internal/fs/interface.go b/internal/fs/interface.go index 1c27c1c13f3..bc6aab44a12 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -11,6 +11,7 @@ type FS interface { Stat(name string) (os.FileInfo, error) Lstat(name string) (os.FileInfo, error) DeviceID(fi os.FileInfo) (deviceID uint64, err error) + ExtendedStat(fi os.FileInfo) ExtendedFileInfo Join(elem ...string) string Separator() string From ca1e5e10b64c0ee56037b450a3d300345b8fb679 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 9 Jul 2024 19:51:44 +0200 Subject: [PATCH 652/893] add proper constants for node type --- cmd/restic/cmd_diff.go | 30 ++++++++-------- cmd/restic/cmd_dump.go | 8 ++--- cmd/restic/cmd_find.go | 6 ++-- cmd/restic/cmd_ls.go | 10 +++--- cmd/restic/cmd_ls_test.go | 16 ++++----- cmd/restic/cmd_recover.go | 4 +-- cmd/restic/cmd_repair_snapshots.go | 2 +- cmd/restic/cmd_stats.go | 2 +- cmd/restic/format.go | 14 ++++---- cmd/restic/format_test.go | 2 +- internal/archiver/archiver.go | 10 +++--- internal/archiver/archiver_test.go | 2 +- internal/archiver/file_saver.go | 2 +- internal/archiver/testing.go | 8 ++--- internal/checker/checker.go | 6 ++-- internal/checker/checker_test.go | 6 ++-- internal/dump/common.go | 19 ++-------- internal/dump/tar.go | 6 ++-- internal/dump/tar_test.go | 2 +- internal/dump/zip.go | 4 +-- internal/fs/file_windows.go | 7 ++-- internal/fs/node.go | 54 ++++++++++++++--------------- internal/fs/node_test.go | 28 +++++++-------- internal/fs/node_unix_test.go | 6 ++-- internal/fs/node_windows.go | 4 +-- internal/fs/node_windows_test.go | 32 ++++++++--------- internal/fs/node_xattr_all_test.go | 4 +-- internal/fuse/dir.go | 18 +++++----- internal/fuse/fuse_test.go | 14 ++++---- internal/fuse/inode.go | 2 +- internal/restic/find.go | 2 +- internal/restic/node.go | 30 +++++++++++----- internal/restic/testing.go | 4 +-- internal/restic/tree.go | 4 +-- internal/restorer/restorer.go | 14 ++++---- internal/restorer/restorer_test.go | 6 ++-- internal/ui/backup/progress.go | 4 +-- internal/ui/backup/progress_test.go | 4 +-- internal/walker/rewriter.go | 4 +-- internal/walker/rewriter_test.go | 4 +-- internal/walker/walker.go | 4 +-- internal/walker/walker_test.go | 4 +-- 42 files changed, 206 insertions(+), 206 deletions(-) diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 594e387e895..d1067b5ecd1 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -108,9 +108,9 @@ func (s *DiffStat) Add(node *restic.Node) { } switch node.Type { - case "file": + case restic.NodeTypeFile: s.Files++ - case "dir": + case restic.NodeTypeDir: s.Dirs++ default: s.Others++ @@ -124,7 +124,7 @@ func addBlobs(bs restic.BlobSet, node *restic.Node) { } switch node.Type { - case "file": + case restic.NodeTypeFile: for _, blob := range node.Content { h := restic.BlobHandle{ ID: blob, @@ -132,7 +132,7 @@ func addBlobs(bs restic.BlobSet, node *restic.Node) { } bs.Insert(h) } - case "dir": + case restic.NodeTypeDir: h := restic.BlobHandle{ ID: *node.Subtree, Type: restic.TreeBlob, @@ -184,14 +184,14 @@ func (c *Comparer) printDir(ctx context.Context, mode string, stats *DiffStat, b } name := path.Join(prefix, node.Name) - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { name += "/" } c.printChange(NewChange(name, mode)) stats.Add(node) addBlobs(blobs, node) - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { err := c.printDir(ctx, mode, stats, blobs, name, *node.Subtree) if err != nil && err != context.Canceled { Warnf("error: %v\n", err) @@ -216,7 +216,7 @@ func (c *Comparer) collectDir(ctx context.Context, blobs restic.BlobSet, id rest addBlobs(blobs, node) - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { err := c.collectDir(ctx, blobs, *node.Subtree) if err != nil && err != context.Canceled { Warnf("error: %v\n", err) @@ -284,12 +284,12 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref mod += "T" } - if node2.Type == "dir" { + if node2.Type == restic.NodeTypeDir { name += "/" } - if node1.Type == "file" && - node2.Type == "file" && + if node1.Type == restic.NodeTypeFile && + node2.Type == restic.NodeTypeFile && !reflect.DeepEqual(node1.Content, node2.Content) { mod += "M" stats.ChangedFiles++ @@ -311,7 +311,7 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref c.printChange(NewChange(name, mod)) } - if node1.Type == "dir" && node2.Type == "dir" { + if node1.Type == restic.NodeTypeDir && node2.Type == restic.NodeTypeDir { var err error if (*node1.Subtree).Equal(*node2.Subtree) { err = c.collectDir(ctx, stats.BlobsCommon, *node1.Subtree) @@ -324,13 +324,13 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref } case t1 && !t2: prefix := path.Join(prefix, name) - if node1.Type == "dir" { + if node1.Type == restic.NodeTypeDir { prefix += "/" } c.printChange(NewChange(prefix, "-")) stats.Removed.Add(node1) - if node1.Type == "dir" { + if node1.Type == restic.NodeTypeDir { err := c.printDir(ctx, "-", &stats.Removed, stats.BlobsBefore, prefix, *node1.Subtree) if err != nil && err != context.Canceled { Warnf("error: %v\n", err) @@ -338,13 +338,13 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref } case !t1 && t2: prefix := path.Join(prefix, name) - if node2.Type == "dir" { + if node2.Type == restic.NodeTypeDir { prefix += "/" } c.printChange(NewChange(prefix, "+")) stats.Added.Add(node2) - if node2.Type == "dir" { + if node2.Type == restic.NodeTypeDir { err := c.printDir(ctx, "+", &stats.Added, stats.BlobsAfter, prefix, *node2.Subtree) if err != nil && err != context.Canceled { Warnf("error: %v\n", err) diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 7d6652e1744..6b7f8d01285 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -95,15 +95,15 @@ func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.BlobLoade // first item it finds and dump that according to the switch case below. if node.Name == pathComponents[0] { switch { - case l == 1 && dump.IsFile(node): + case l == 1 && node.Type == restic.NodeTypeFile: return d.WriteNode(ctx, node) - case l > 1 && dump.IsDir(node): + case l > 1 && node.Type == restic.NodeTypeDir: subtree, err := restic.LoadTree(ctx, repo, *node.Subtree) if err != nil { return errors.Wrapf(err, "cannot load subtree for %q", item) } return printFromTree(ctx, subtree, repo, item, pathComponents[1:], d, canWriteArchiveFunc) - case dump.IsDir(node): + case node.Type == restic.NodeTypeDir: if err := canWriteArchiveFunc(); err != nil { return err } @@ -114,7 +114,7 @@ func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.BlobLoade return d.DumpTree(ctx, subtree, item) case l > 1: return fmt.Errorf("%q should be a dir, but is a %q", item, node.Type) - case !dump.IsFile(node): + case node.Type != restic.NodeTypeFile: return fmt.Errorf("%q should be a file, but is a %q", item, node.Type) } } diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index cb5c0e5e080..92e88203fbb 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -298,7 +298,7 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error } var errIfNoMatch error - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { var childMayMatch bool for _, pat := range f.pat.pattern { mayMatch, err := filter.ChildMatch(pat, normalizedNodepath) @@ -357,7 +357,7 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error { return nil } - if node.Type == "dir" && f.treeIDs != nil { + if node.Type == restic.NodeTypeDir && f.treeIDs != nil { treeID := node.Subtree found := false if _, ok := f.treeIDs[treeID.Str()]; ok { @@ -377,7 +377,7 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error { } } - if node.Type == "file" && f.blobIDs != nil { + if node.Type == restic.NodeTypeFile && f.blobIDs != nil { for _, id := range node.Content { if ctx.Err() != nil { return ctx.Err() diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 69e2781037e..2213d8e7ae5 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -137,7 +137,7 @@ func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error { size uint64 // Target for Size pointer. }{ Name: node.Name, - Type: node.Type, + Type: string(node.Type), Path: path, UID: node.UID, GID: node.GID, @@ -153,7 +153,7 @@ func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error { } // Always print size for regular files, even when empty, // but never for other types. - if node.Type == "file" { + if node.Type == restic.NodeTypeFile { n.Size = &n.size } @@ -208,7 +208,7 @@ func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { Dev: node.DeviceID, Ino: node.Inode, NLink: node.Links, - NotReg: node.Type != "dir" && node.Type != "file", + NotReg: node.Type != restic.NodeTypeDir && node.Type != restic.NodeTypeFile, UID: node.UID, GID: node.GID, Mode: uint16(node.Mode & os.ModePerm), @@ -238,7 +238,7 @@ func (p *ncduLsPrinter) Node(path string, node *restic.Node, _ bool) { Warnf("JSON encode failed: %v\n", err) } - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { fmt.Fprintf(p.out, ",\n%s[\n%s%s", strings.Repeat(" ", p.depth), strings.Repeat(" ", p.depth+1), string(out)) p.depth++ } else { @@ -409,7 +409,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri // otherwise, signal the walker to not walk recursively into any // subdirs - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { // immediately generate leaveDir if the directory is skipped if printedDir { printer.LeaveDir(nodepath) diff --git a/cmd/restic/cmd_ls_test.go b/cmd/restic/cmd_ls_test.go index a1fcd479bdd..b8b07424245 100644 --- a/cmd/restic/cmd_ls_test.go +++ b/cmd/restic/cmd_ls_test.go @@ -23,7 +23,7 @@ var lsTestNodes = []lsTestNode{ path: "/bar/baz", Node: restic.Node{ Name: "baz", - Type: "file", + Type: restic.NodeTypeFile, Size: 12345, UID: 10000000, GID: 20000000, @@ -39,7 +39,7 @@ var lsTestNodes = []lsTestNode{ path: "/foo/empty", Node: restic.Node{ Name: "empty", - Type: "file", + Type: restic.NodeTypeFile, Size: 0, UID: 1001, GID: 1001, @@ -56,7 +56,7 @@ var lsTestNodes = []lsTestNode{ path: "/foo/link", Node: restic.Node{ Name: "link", - Type: "symlink", + Type: restic.NodeTypeSymlink, Mode: os.ModeSymlink | 0777, LinkTarget: "not printed", }, @@ -66,7 +66,7 @@ var lsTestNodes = []lsTestNode{ path: "/some/directory", Node: restic.Node{ Name: "directory", - Type: "dir", + Type: restic.NodeTypeDir, Mode: os.ModeDir | 0755, ModTime: time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC), AccessTime: time.Date(2021, 2, 3, 4, 5, 6, 7, time.UTC), @@ -79,7 +79,7 @@ var lsTestNodes = []lsTestNode{ path: "/some/sticky", Node: restic.Node{ Name: "sticky", - Type: "dir", + Type: restic.NodeTypeDir, Mode: os.ModeDir | 0755 | os.ModeSetuid | os.ModeSetgid | os.ModeSticky, }, }, @@ -139,19 +139,19 @@ func TestLsNcdu(t *testing.T) { Paths: []string{"/example"}, }) printer.Node("/directory", &restic.Node{ - Type: "dir", + Type: restic.NodeTypeDir, Name: "directory", ModTime: modTime, }, false) printer.Node("/directory/data", &restic.Node{ - Type: "file", + Type: restic.NodeTypeFile, Name: "data", Size: 42, ModTime: modTime, }, false) printer.LeaveDir("/directory") printer.Node("/file", &restic.Node{ - Type: "file", + Type: restic.NodeTypeFile, Name: "file", Size: 12345, ModTime: modTime, diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index a6ef59cc205..133f779783a 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -88,7 +88,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { } for _, node := range tree.Nodes { - if node.Type == "dir" && node.Subtree != nil { + if node.Type == restic.NodeTypeDir && node.Subtree != nil { trees[*node.Subtree] = true } } @@ -128,7 +128,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { for id := range roots { var subtreeID = id node := restic.Node{ - Type: "dir", + Type: restic.NodeTypeDir, Name: id.Str(), Mode: 0755, Subtree: &subtreeID, diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index 385854312ad..46ba52dd415 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -92,7 +92,7 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt // - files whose contents are not fully available (-> file will be modified) rewriter := walker.NewTreeRewriter(walker.RewriteOpts{ RewriteNode: func(node *restic.Node, path string) *restic.Node { - if node.Type != "file" { + if node.Type != restic.NodeTypeFile { return node } diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index dd51175d310..e0b60a29ebf 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -276,7 +276,7 @@ func statsWalkTree(repo restic.Loader, opts StatsOptions, stats *statsContainer, // will still be restored stats.TotalFileCount++ - if node.Links == 1 || node.Type == "dir" { + if node.Links == 1 || node.Type == restic.NodeTypeDir { stats.TotalSize += node.Size } else { // if hardlinks are present only count each deviceID+inode once diff --git a/cmd/restic/format.go b/cmd/restic/format.go index 063cd4e7125..e7d178b4ef5 100644 --- a/cmd/restic/format.go +++ b/cmd/restic/format.go @@ -24,20 +24,20 @@ func formatNode(path string, n *restic.Node, long bool, human bool) string { } switch n.Type { - case "file": + case restic.NodeTypeFile: mode = 0 - case "dir": + case restic.NodeTypeDir: mode = os.ModeDir - case "symlink": + case restic.NodeTypeSymlink: mode = os.ModeSymlink target = fmt.Sprintf(" -> %v", n.LinkTarget) - case "dev": + case restic.NodeTypeDev: mode = os.ModeDevice - case "chardev": + case restic.NodeTypeCharDev: mode = os.ModeDevice | os.ModeCharDevice - case "fifo": + case restic.NodeTypeFifo: mode = os.ModeNamedPipe - case "socket": + case restic.NodeTypeSocket: mode = os.ModeSocket } diff --git a/cmd/restic/format_test.go b/cmd/restic/format_test.go index 689bd27a56a..e232a200b79 100644 --- a/cmd/restic/format_test.go +++ b/cmd/restic/format_test.go @@ -19,7 +19,7 @@ func TestFormatNode(t *testing.T) { testPath := "/test/path" node := restic.Node{ Name: "baz", - Type: "file", + Type: restic.NodeTypeFile, Size: 14680064, UID: 1000, GID: 2000, diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index a783f6c7f7d..0a0c18d2891 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -232,7 +232,7 @@ func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s I } switch current.Type { - case "dir": + case restic.NodeTypeDir: switch { case previous == nil: arch.summary.Dirs.New++ @@ -242,7 +242,7 @@ func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s I arch.summary.Dirs.Changed++ } - case "file": + case restic.NodeTypeFile: switch { case previous == nil: arch.summary.Files.New++ @@ -261,7 +261,7 @@ func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, node.AccessTime = node.ModTime } if feature.Flag.Enabled(feature.DeviceIDForHardlinks) { - if node.Links == 1 || node.Type == "dir" { + if node.Links == 1 || node.Type == restic.NodeTypeDir { // the DeviceID is only necessary for hardlinked files // when using subvolumes or snapshots their deviceIDs tend to change which causes // restic to upload new tree blobs @@ -280,7 +280,7 @@ func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, // loadSubtree tries to load the subtree referenced by node. In case of an error, nil is returned. // If there is no node to load, then nil is returned without an error. func (arch *Archiver) loadSubtree(ctx context.Context, node *restic.Node) (*restic.Tree, error) { - if node == nil || node.Type != "dir" || node.Subtree == nil { + if node == nil || node.Type != restic.NodeTypeDir || node.Subtree == nil { return nil, nil } @@ -583,7 +583,7 @@ func fileChanged(fs fs.FS, fi os.FileInfo, node *restic.Node, ignoreFlags uint) switch { case node == nil: return true - case node.Type != "file": + case node.Type != restic.NodeTypeFile: // We're only called for regular files, so this is a type change. return true case uint64(fi.Size()) != node.Size: diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 962fd548108..04d0b876b6d 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -730,7 +730,7 @@ func TestFilChangedSpecialCases(t *testing.T) { t.Run("type-change", func(t *testing.T) { fi := lstat(t, filename) node := nodeFromFI(t, filename, fi) - node.Type = "symlink" + node.Type = "restic.NodeTypeSymlink" if !fileChanged(&fs.Local{}, fi, node, 0) { t.Fatal("node with changed type detected as unchanged") } diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index fa19cab861a..b9d07434a35 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -163,7 +163,7 @@ func (s *fileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat return } - if node.Type != "file" { + if node.Type != restic.NodeTypeFile { _ = f.Close() completeError(errors.Errorf("node type %q is wrong", node.Type)) return diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go index 8bd854904f2..97bada64433 100644 --- a/internal/archiver/testing.go +++ b/internal/archiver/testing.go @@ -289,7 +289,7 @@ func TestEnsureTree(ctx context.Context, t testing.TB, prefix string, repo resti switch e := entry.(type) { case TestDir: - if node.Type != "dir" { + if node.Type != restic.NodeTypeDir { t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "dir") return } @@ -301,13 +301,13 @@ func TestEnsureTree(ctx context.Context, t testing.TB, prefix string, repo resti TestEnsureTree(ctx, t, path.Join(prefix, node.Name), repo, *node.Subtree, e) case TestFile: - if node.Type != "file" { + if node.Type != restic.NodeTypeFile { t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file") } TestEnsureFileContent(ctx, t, repo, nodePrefix, node, e) case TestSymlink: - if node.Type != "symlink" { - t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file") + if node.Type != restic.NodeTypeSymlink { + t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "symlink") } if e.Target != node.LinkTarget { diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 76bb15f637d..12020891a13 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -344,7 +344,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { for _, node := range tree.Nodes { switch node.Type { - case "file": + case restic.NodeTypeFile: if node.Content == nil { errs = append(errs, &Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)}) } @@ -380,7 +380,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { c.blobRefs.Unlock() } - case "dir": + case restic.NodeTypeDir: if node.Subtree == nil { errs = append(errs, &Error{TreeID: id, Err: errors.Errorf("dir node %q has no subtree", node.Name)}) continue @@ -391,7 +391,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { continue } - case "symlink", "socket", "chardev", "dev", "fifo": + case restic.NodeTypeSymlink, restic.NodeTypeSocket, restic.NodeTypeCharDev, restic.NodeTypeDev, restic.NodeTypeFifo: // nothing to check default: diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index 5eaf550ba31..0e2125bba43 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -482,7 +482,7 @@ func TestCheckerBlobTypeConfusion(t *testing.T) { damagedNode := &restic.Node{ Name: "damaged", - Type: "file", + Type: restic.NodeTypeFile, Mode: 0644, Size: 42, Content: restic.IDs{restic.TestParseID("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")}, @@ -507,14 +507,14 @@ func TestCheckerBlobTypeConfusion(t *testing.T) { malNode := &restic.Node{ Name: "aaaaa", - Type: "file", + Type: restic.NodeTypeFile, Mode: 0644, Size: uint64(len(buf)), Content: restic.IDs{id}, } dirNode := &restic.Node{ Name: "bbbbb", - Type: "dir", + Type: restic.NodeTypeDir, Mode: 0755, Subtree: &id, } diff --git a/internal/dump/common.go b/internal/dump/common.go index 62145ba9cd8..4bc404fe0e5 100644 --- a/internal/dump/common.go +++ b/internal/dump/common.go @@ -67,7 +67,7 @@ func sendNodes(ctx context.Context, repo restic.BlobLoader, root *restic.Node, c } // If this is no directory we are finished - if !IsDir(root) { + if root.Type != restic.NodeTypeDir { return nil } @@ -81,7 +81,7 @@ func sendNodes(ctx context.Context, repo restic.BlobLoader, root *restic.Node, c node.Path = path.Join(root.Path, nodepath) - if !IsFile(node) && !IsDir(node) && !IsLink(node) { + if node.Type != restic.NodeTypeFile && node.Type != restic.NodeTypeDir && node.Type != restic.NodeTypeSymlink { return nil } @@ -176,18 +176,3 @@ func (d *Dumper) writeNode(ctx context.Context, w io.Writer, node *restic.Node) return wg.Wait() } - -// IsDir checks if the given node is a directory. -func IsDir(node *restic.Node) bool { - return node.Type == "dir" -} - -// IsLink checks if the given node as a link. -func IsLink(node *restic.Node) bool { - return node.Type == "symlink" -} - -// IsFile checks if the given node is a file. -func IsFile(node *restic.Node) bool { - return node.Type == "file" -} diff --git a/internal/dump/tar.go b/internal/dump/tar.go index e8f34deb15a..c5933d4f8ac 100644 --- a/internal/dump/tar.go +++ b/internal/dump/tar.go @@ -79,16 +79,16 @@ func (d *Dumper) dumpNodeTar(ctx context.Context, node *restic.Node, w *tar.Writ header.Mode |= cISVTX } - if IsFile(node) { + if node.Type == restic.NodeTypeFile { header.Typeflag = tar.TypeReg } - if IsLink(node) { + if node.Type == restic.NodeTypeSymlink { header.Typeflag = tar.TypeSymlink header.Linkname = node.LinkTarget } - if IsDir(node) { + if node.Type == restic.NodeTypeDir { header.Typeflag = tar.TypeDir header.Name += "/" } diff --git a/internal/dump/tar_test.go b/internal/dump/tar_test.go index 3556e6aeb30..0c97eae35b4 100644 --- a/internal/dump/tar_test.go +++ b/internal/dump/tar_test.go @@ -124,7 +124,7 @@ func TestFieldTooLong(t *testing.T) { node := restic.Node{ Name: "file_with_xattr", Path: "/file_with_xattr", - Type: "file", + Type: restic.NodeTypeFile, Mode: 0644, ExtendedAttributes: []restic.ExtendedAttribute{ { diff --git a/internal/dump/zip.go b/internal/dump/zip.go index e5ef5c95b78..d32475770f1 100644 --- a/internal/dump/zip.go +++ b/internal/dump/zip.go @@ -40,7 +40,7 @@ func (d *Dumper) dumpNodeZip(ctx context.Context, node *restic.Node, zw *zip.Wri } header.SetMode(node.Mode) - if IsDir(node) { + if node.Type == restic.NodeTypeDir { header.Name += "/" } @@ -49,7 +49,7 @@ func (d *Dumper) dumpNodeZip(ctx context.Context, node *restic.Node, zw *zip.Wri return errors.Wrap(err, "ZipHeader") } - if IsLink(node) { + if node.Type == restic.NodeTypeSymlink { if _, err = w.Write([]byte(node.LinkTarget)); err != nil { return errors.Wrap(err, "Write") } diff --git a/internal/fs/file_windows.go b/internal/fs/file_windows.go index 50c7e9938f6..7a6d79f2521 100644 --- a/internal/fs/file_windows.go +++ b/internal/fs/file_windows.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/restic/restic/internal/restic" "golang.org/x/sys/windows" ) @@ -105,7 +106,7 @@ func ClearAttribute(path string, attribute uint32) error { } // OpenHandleForEA return a file handle for file or dir for setting/getting EAs -func OpenHandleForEA(nodeType, path string, writeAccess bool) (handle windows.Handle, err error) { +func OpenHandleForEA(nodeType restic.NodeType, path string, writeAccess bool) (handle windows.Handle, err error) { path = fixpath(path) fileAccess := windows.FILE_READ_EA if writeAccess { @@ -113,10 +114,10 @@ func OpenHandleForEA(nodeType, path string, writeAccess bool) (handle windows.Ha } switch nodeType { - case "file": + case restic.NodeTypeFile: utf16Path := windows.StringToUTF16Ptr(path) handle, err = windows.CreateFile(utf16Path, uint32(fileAccess), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) - case "dir": + case restic.NodeTypeDir: utf16Path := windows.StringToUTF16Ptr(path) handle, err = windows.CreateFile(utf16Path, uint32(fileAccess), 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) default: diff --git a/internal/fs/node.go b/internal/fs/node.go index 9bd507ba564..e29011cc265 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -25,7 +25,7 @@ func NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (* } node.Type = nodeTypeFromFileInfo(fi) - if node.Type == "file" { + if node.Type == restic.NodeTypeFile { node.Size = uint64(fi.Size()) } @@ -33,27 +33,27 @@ func NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (* return node, err } -func nodeTypeFromFileInfo(fi os.FileInfo) string { +func nodeTypeFromFileInfo(fi os.FileInfo) restic.NodeType { switch fi.Mode() & os.ModeType { case 0: - return "file" + return restic.NodeTypeFile case os.ModeDir: - return "dir" + return restic.NodeTypeDir case os.ModeSymlink: - return "symlink" + return restic.NodeTypeSymlink case os.ModeDevice | os.ModeCharDevice: - return "chardev" + return restic.NodeTypeCharDev case os.ModeDevice: - return "dev" + return restic.NodeTypeDev case os.ModeNamedPipe: - return "fifo" + return restic.NodeTypeFifo case os.ModeSocket: - return "socket" + return restic.NodeTypeSocket case os.ModeIrregular: - return "irregular" + return restic.NodeTypeIrregular } - return "" + return restic.NodeTypeInvalid } func nodeFillExtra(node *restic.Node, path string, fi os.FileInfo, ignoreXattrListError bool) error { @@ -74,25 +74,25 @@ func nodeFillExtra(node *restic.Node, path string, fi os.FileInfo, ignoreXattrLi nodeFillUser(node, stat) switch node.Type { - case "file": + case restic.NodeTypeFile: node.Size = uint64(stat.size()) node.Links = uint64(stat.nlink()) - case "dir": - case "symlink": + case restic.NodeTypeDir: + case restic.NodeTypeSymlink: var err error node.LinkTarget, err = Readlink(path) node.Links = uint64(stat.nlink()) if err != nil { return errors.WithStack(err) } - case "dev": + case restic.NodeTypeDev: node.Device = uint64(stat.rdev()) node.Links = uint64(stat.nlink()) - case "chardev": + case restic.NodeTypeCharDev: node.Device = uint64(stat.rdev()) node.Links = uint64(stat.nlink()) - case "fifo": - case "socket": + case restic.NodeTypeFifo: + case restic.NodeTypeSocket: default: return errors.Errorf("unsupported file type %q", node.Type) } @@ -178,31 +178,31 @@ func NodeCreateAt(node *restic.Node, path string) error { debug.Log("create node %v at %v", node.Name, path) switch node.Type { - case "dir": + case restic.NodeTypeDir: if err := nodeCreateDirAt(node, path); err != nil { return err } - case "file": + case restic.NodeTypeFile: if err := nodeCreateFileAt(path); err != nil { return err } - case "symlink": + case restic.NodeTypeSymlink: if err := nodeCreateSymlinkAt(node, path); err != nil { return err } - case "dev": + case restic.NodeTypeDev: if err := nodeCreateDevAt(node, path); err != nil { return err } - case "chardev": + case restic.NodeTypeCharDev: if err := nodeCreateCharDevAt(node, path); err != nil { return err } - case "fifo": + case restic.NodeTypeFifo: if err := nodeCreateFifoAt(path); err != nil { return err } - case "socket": + case restic.NodeTypeSocket: return nil default: return errors.Errorf("filetype %q not implemented", node.Type) @@ -305,7 +305,7 @@ func nodeRestoreMetadata(node *restic.Node, path string, warn func(msg string)) // Moving RestoreTimestamps and restoreExtendedAttributes calls above as for readonly files in windows // calling Chmod below will no longer allow any modifications to be made on the file and the // calls above would fail. - if node.Type != "symlink" { + if node.Type != restic.NodeTypeSymlink { if err := Chmod(path, node.Mode); err != nil { if firsterr == nil { firsterr = errors.WithStack(err) @@ -322,7 +322,7 @@ func NodeRestoreTimestamps(node *restic.Node, path string) error { syscall.NsecToTimespec(node.ModTime.UnixNano()), } - if node.Type == "symlink" { + if node.Type == restic.NodeTypeSymlink { return nodeRestoreSymlinkTimestamps(path, utimes) } diff --git a/internal/fs/node_test.go b/internal/fs/node_test.go index e7f608352c2..2623513a802 100644 --- a/internal/fs/node_test.go +++ b/internal/fs/node_test.go @@ -79,7 +79,7 @@ func parseTime(s string) time.Time { var nodeTests = []restic.Node{ { Name: "testFile", - Type: "file", + Type: restic.NodeTypeFile, Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -90,7 +90,7 @@ var nodeTests = []restic.Node{ }, { Name: "testSuidFile", - Type: "file", + Type: restic.NodeTypeFile, Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -101,7 +101,7 @@ var nodeTests = []restic.Node{ }, { Name: "testSuidFile2", - Type: "file", + Type: restic.NodeTypeFile, Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -112,7 +112,7 @@ var nodeTests = []restic.Node{ }, { Name: "testSticky", - Type: "file", + Type: restic.NodeTypeFile, Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -123,7 +123,7 @@ var nodeTests = []restic.Node{ }, { Name: "testDir", - Type: "dir", + Type: restic.NodeTypeDir, Subtree: nil, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -134,7 +134,7 @@ var nodeTests = []restic.Node{ }, { Name: "testSymlink", - Type: "symlink", + Type: restic.NodeTypeSymlink, LinkTarget: "invalid", UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -148,7 +148,7 @@ var nodeTests = []restic.Node{ // metadata, so we can test if CreateAt works with pre-existing files. { Name: "testFile", - Type: "file", + Type: restic.NodeTypeFile, Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -159,7 +159,7 @@ var nodeTests = []restic.Node{ }, { Name: "testDir", - Type: "dir", + Type: restic.NodeTypeDir, Subtree: nil, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -170,7 +170,7 @@ var nodeTests = []restic.Node{ }, { Name: "testXattrFile", - Type: "file", + Type: restic.NodeTypeFile, Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -184,7 +184,7 @@ var nodeTests = []restic.Node{ }, { Name: "testXattrDir", - Type: "dir", + Type: restic.NodeTypeDir, Subtree: nil, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -198,7 +198,7 @@ var nodeTests = []restic.Node{ }, { Name: "testXattrFileMacOSResourceFork", - Type: "file", + Type: restic.NodeTypeFile, Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -268,7 +268,7 @@ func TestNodeRestoreAt(t *testing.T) { "%v: UID doesn't match (%v != %v)", test.Type, test.UID, n2.UID) rtest.Assert(t, test.GID == n2.GID, "%v: GID doesn't match (%v != %v)", test.Type, test.GID, n2.GID) - if test.Type != "symlink" { + if test.Type != restic.NodeTypeSymlink { // On OpenBSD only root can set sticky bit (see sticky(8)). if runtime.GOOS != "openbsd" && runtime.GOOS != "netbsd" && runtime.GOOS != "solaris" && test.Name == "testSticky" { rtest.Assert(t, test.Mode == n2.Mode, @@ -288,11 +288,11 @@ func TestNodeRestoreAt(t *testing.T) { } } -func AssertFsTimeEqual(t *testing.T, label string, nodeType string, t1 time.Time, t2 time.Time) { +func AssertFsTimeEqual(t *testing.T, label string, nodeType restic.NodeType, t1 time.Time, t2 time.Time) { var equal bool // Go currently doesn't support setting timestamps of symbolic links on darwin and bsd - if nodeType == "symlink" { + if nodeType == restic.NodeTypeSymlink { switch runtime.GOOS { case "darwin", "freebsd", "openbsd", "netbsd", "solaris": return diff --git a/internal/fs/node_unix_test.go b/internal/fs/node_unix_test.go index b505357f2fc..3658c835662 100644 --- a/internal/fs/node_unix_test.go +++ b/internal/fs/node_unix_test.go @@ -42,7 +42,7 @@ func checkFile(t testing.TB, stat *syscall.Stat_t, node *restic.Node) { t.Errorf("Dev does not match, want %v, got %v", stat.Dev, node.DeviceID) } - if node.Size != uint64(stat.Size) && node.Type != "symlink" { + if node.Size != uint64(stat.Size) && node.Type != restic.NodeTypeSymlink { t.Errorf("Size does not match, want %v, got %v", stat.Size, node.Size) } @@ -135,9 +135,9 @@ func TestNodeFromFileInfo(t *testing.T) { } switch node.Type { - case "file", "symlink": + case restic.NodeTypeFile, restic.NodeTypeSymlink: checkFile(t, s, node) - case "dev", "chardev": + case restic.NodeTypeDev, restic.NodeTypeCharDev: checkFile(t, s, node) checkDevice(t, s, node) default: diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index 90fa3462c3a..f2a2001e968 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -139,7 +139,7 @@ func closeFileHandle(fileHandle windows.Handle, path string) { // restoreExtendedAttributes handles restore of the Windows Extended Attributes to the specified path. // The Windows API requires setting of all the Extended Attributes in one call. -func restoreExtendedAttributes(nodeType, path string, eas []ExtendedAttribute) (err error) { +func restoreExtendedAttributes(nodeType restic.NodeType, path string, eas []ExtendedAttribute) (err error) { var fileHandle windows.Handle if fileHandle, err = OpenHandleForEA(nodeType, path, true); fileHandle == 0 { return nil @@ -386,7 +386,7 @@ func nodeFillGenericAttributes(node *restic.Node, path string, fi os.FileInfo, s } var sd *[]byte - if node.Type == "file" || node.Type == "dir" { + if node.Type == restic.NodeTypeFile || node.Type == restic.NodeTypeDir { // Check EA support and get security descriptor for file/dir only allowExtended, err = checkAndStoreEASupport(path) if err != nil { diff --git a/internal/fs/node_windows_test.go b/internal/fs/node_windows_test.go index 046c1984c81..729ce2adb86 100644 --- a/internal/fs/node_windows_test.go +++ b/internal/fs/node_windows_test.go @@ -24,14 +24,14 @@ func TestRestoreSecurityDescriptors(t *testing.T) { t.Parallel() tempDir := t.TempDir() for i, sd := range TestFileSDs { - testRestoreSecurityDescriptor(t, sd, tempDir, "file", fmt.Sprintf("testfile%d", i)) + testRestoreSecurityDescriptor(t, sd, tempDir, restic.NodeTypeFile, fmt.Sprintf("testfile%d", i)) } for i, sd := range TestDirSDs { - testRestoreSecurityDescriptor(t, sd, tempDir, "dir", fmt.Sprintf("testdir%d", i)) + testRestoreSecurityDescriptor(t, sd, tempDir, restic.NodeTypeDir, fmt.Sprintf("testdir%d", i)) } } -func testRestoreSecurityDescriptor(t *testing.T, sd string, tempDir, fileType, fileName string) { +func testRestoreSecurityDescriptor(t *testing.T, sd string, tempDir string, fileType restic.NodeType, fileName string) { // Decode the encoded string SD to get the security descriptor input in bytes. sdInputBytes, err := base64.StdEncoding.DecodeString(sd) test.OK(t, errors.Wrapf(err, "Error decoding SD for: %s", fileName)) @@ -56,7 +56,7 @@ func testRestoreSecurityDescriptor(t *testing.T, sd string, tempDir, fileType, f CompareSecurityDescriptors(t, testPath, *sdByteFromRestoredNode, *sdBytesFromRestoredPath) } -func getNode(name string, fileType string, genericAttributes map[restic.GenericAttributeType]json.RawMessage) restic.Node { +func getNode(name string, fileType restic.NodeType, genericAttributes map[restic.GenericAttributeType]json.RawMessage) restic.Node { return restic.Node{ Name: name, Type: fileType, @@ -113,7 +113,7 @@ func TestRestoreFileAttributes(t *testing.T) { expectedNodes := []restic.Node{ { Name: fmt.Sprintf("testfile%d", i), - Type: "file", + Type: restic.NodeTypeFile, Mode: 0655, ModTime: parseTime("2005-05-14 21:07:03.111"), AccessTime: parseTime("2005-05-14 21:07:04.222"), @@ -146,7 +146,7 @@ func TestRestoreFileAttributes(t *testing.T) { expectedNodes := []restic.Node{ { Name: fmt.Sprintf("testdirectory%d", i), - Type: "dir", + Type: restic.NodeTypeDir, Mode: 0755, ModTime: parseTime("2005-05-14 21:07:03.111"), AccessTime: parseTime("2005-05-14 21:07:04.222"), @@ -164,7 +164,7 @@ func runGenericAttributesTest(t *testing.T, tempDir string, genericAttributeName expectedNodes := []restic.Node{ { Name: "testfile", - Type: "file", + Type: restic.NodeTypeFile, Mode: 0644, ModTime: parseTime("2005-05-14 21:07:03.111"), AccessTime: parseTime("2005-05-14 21:07:04.222"), @@ -173,7 +173,7 @@ func runGenericAttributesTest(t *testing.T, tempDir string, genericAttributeName }, { Name: "testdirectory", - Type: "dir", + Type: restic.NodeTypeDir, Mode: 0755, ModTime: parseTime("2005-05-14 21:07:03.111"), AccessTime: parseTime("2005-05-14 21:07:04.222"), @@ -200,12 +200,12 @@ func restoreAndGetNode(t *testing.T, tempDir string, testNode *restic.Node, warn err := os.MkdirAll(filepath.Dir(testPath), testNode.Mode) test.OK(t, errors.Wrapf(err, "Failed to create parent directories for: %s", testPath)) - if testNode.Type == "file" { + if testNode.Type == restic.NodeTypeFile { testFile, err := os.Create(testPath) test.OK(t, errors.Wrapf(err, "Failed to create test file: %s", testPath)) testFile.Close() - } else if testNode.Type == "dir" { + } else if testNode.Type == restic.NodeTypeDir { err := os.Mkdir(testPath, testNode.Mode) test.OK(t, errors.Wrapf(err, "Failed to create test directory: %s", testPath)) @@ -242,7 +242,7 @@ func TestNewGenericAttributeType(t *testing.T) { expectedNodes := []restic.Node{ { Name: "testfile", - Type: "file", + Type: restic.NodeTypeFile, Mode: 0644, ModTime: parseTime("2005-05-14 21:07:03.111"), AccessTime: parseTime("2005-05-14 21:07:04.222"), @@ -251,7 +251,7 @@ func TestNewGenericAttributeType(t *testing.T) { }, { Name: "testdirectory", - Type: "dir", + Type: restic.NodeTypeDir, Mode: 0755, ModTime: parseTime("2005-05-14 21:07:03.111"), AccessTime: parseTime("2005-05-14 21:07:04.222"), @@ -274,7 +274,7 @@ func TestRestoreExtendedAttributes(t *testing.T) { expectedNodes := []restic.Node{ { Name: "testfile", - Type: "file", + Type: restic.NodeTypeFile, Mode: 0644, ModTime: parseTime("2005-05-14 21:07:03.111"), AccessTime: parseTime("2005-05-14 21:07:04.222"), @@ -285,7 +285,7 @@ func TestRestoreExtendedAttributes(t *testing.T) { }, { Name: "testdirectory", - Type: "dir", + Type: restic.NodeTypeDir, Mode: 0755, ModTime: parseTime("2005-05-14 21:07:03.111"), AccessTime: parseTime("2005-05-14 21:07:04.222"), @@ -301,9 +301,9 @@ func TestRestoreExtendedAttributes(t *testing.T) { var handle windows.Handle var err error utf16Path := windows.StringToUTF16Ptr(testPath) - if node.Type == "file" { + if node.Type == restic.NodeTypeFile { handle, err = windows.CreateFile(utf16Path, windows.FILE_READ_EA, 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, 0) - } else if node.Type == "dir" { + } else if node.Type == restic.NodeTypeDir { handle, err = windows.CreateFile(utf16Path, windows.FILE_READ_EA, 0, nil, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL|windows.FILE_FLAG_BACKUP_SEMANTICS, 0) } test.OK(t, errors.Wrapf(err, "Error opening file/directory for: %s", testPath)) diff --git a/internal/fs/node_xattr_all_test.go b/internal/fs/node_xattr_all_test.go index 39670d6e1e0..690302f707c 100644 --- a/internal/fs/node_xattr_all_test.go +++ b/internal/fs/node_xattr_all_test.go @@ -23,13 +23,13 @@ func setAndVerifyXattr(t *testing.T, file string, attrs []restic.ExtendedAttribu } node := &restic.Node{ - Type: "file", + Type: restic.NodeTypeFile, ExtendedAttributes: attrs, } rtest.OK(t, nodeRestoreExtendedAttributes(node, file)) nodeActual := &restic.Node{ - Type: "file", + Type: restic.NodeTypeFile, } rtest.OK(t, nodeFillExtendedAttributes(nodeActual, file, false)) diff --git a/internal/fuse/dir.go b/internal/fuse/dir.go index fd030295b6d..62298cf249d 100644 --- a/internal/fuse/dir.go +++ b/internal/fuse/dir.go @@ -59,7 +59,7 @@ func unwrapCtxCanceled(err error) error { // replaceSpecialNodes replaces nodes with name "." and "/" by their contents. // Otherwise, the node is returned. func replaceSpecialNodes(ctx context.Context, repo restic.BlobLoader, node *restic.Node) ([]*restic.Node, error) { - if node.Type != "dir" || node.Subtree == nil { + if node.Type != restic.NodeTypeDir || node.Subtree == nil { return []*restic.Node{node}, nil } @@ -147,7 +147,7 @@ func (d *dir) calcNumberOfLinks() uint32 { // of directories contained by d count := uint32(2) for _, node := range d.items { - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { count++ } } @@ -182,11 +182,11 @@ func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { name := cleanupNodeName(node.Name) var typ fuse.DirentType switch node.Type { - case "dir": + case restic.NodeTypeDir: typ = fuse.DT_Dir - case "file": + case restic.NodeTypeFile: typ = fuse.DT_File - case "symlink": + case restic.NodeTypeSymlink: typ = fuse.DT_Link } @@ -215,13 +215,13 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { } inode := inodeFromNode(d.inode, node) switch node.Type { - case "dir": + case restic.NodeTypeDir: return newDir(d.root, inode, d.inode, node) - case "file": + case restic.NodeTypeFile: return newFile(d.root, inode, node) - case "symlink": + case restic.NodeTypeSymlink: return newLink(d.root, inode, node) - case "dev", "chardev", "fifo", "socket": + case restic.NodeTypeDev, restic.NodeTypeCharDev, restic.NodeTypeFifo, restic.NodeTypeSocket: return newOther(d.root, inode, node) default: debug.Log(" node %v has unknown type %v", name, node.Type) diff --git a/internal/fuse/fuse_test.go b/internal/fuse/fuse_test.go index aebcb127282..bbdfe6b3ff6 100644 --- a/internal/fuse/fuse_test.go +++ b/internal/fuse/fuse_test.go @@ -249,7 +249,7 @@ func TestBlocks(t *testing.T) { } func TestInodeFromNode(t *testing.T) { - node := &restic.Node{Name: "foo.txt", Type: "chardev", Links: 2} + node := &restic.Node{Name: "foo.txt", Type: restic.NodeTypeCharDev, Links: 2} ino1 := inodeFromNode(1, node) ino2 := inodeFromNode(2, node) rtest.Assert(t, ino1 == ino2, "inodes %d, %d of hard links differ", ino1, ino2) @@ -261,9 +261,9 @@ func TestInodeFromNode(t *testing.T) { // Regression test: in a path a/b/b, the grandchild should not get the // same inode as the grandparent. - a := &restic.Node{Name: "a", Type: "dir", Links: 2} - ab := &restic.Node{Name: "b", Type: "dir", Links: 2} - abb := &restic.Node{Name: "b", Type: "dir", Links: 2} + a := &restic.Node{Name: "a", Type: restic.NodeTypeDir, Links: 2} + ab := &restic.Node{Name: "b", Type: restic.NodeTypeDir, Links: 2} + abb := &restic.Node{Name: "b", Type: restic.NodeTypeDir, Links: 2} inoA := inodeFromNode(1, a) inoAb := inodeFromNode(inoA, ab) inoAbb := inodeFromNode(inoAb, abb) @@ -272,7 +272,7 @@ func TestInodeFromNode(t *testing.T) { } func TestLink(t *testing.T) { - node := &restic.Node{Name: "foo.txt", Type: "symlink", Links: 1, LinkTarget: "dst", ExtendedAttributes: []restic.ExtendedAttribute{ + node := &restic.Node{Name: "foo.txt", Type: restic.NodeTypeSymlink, Links: 1, LinkTarget: "dst", ExtendedAttributes: []restic.ExtendedAttribute{ {Name: "foo", Value: []byte("bar")}, }} @@ -305,11 +305,11 @@ func BenchmarkInode(b *testing.B) { }{ { name: "no_hard_links", - node: restic.Node{Name: "a somewhat long-ish filename.svg.bz2", Type: "fifo"}, + node: restic.Node{Name: "a somewhat long-ish filename.svg.bz2", Type: restic.NodeTypeFifo}, }, { name: "hard_link", - node: restic.Node{Name: "some other filename", Type: "file", Links: 2}, + node: restic.Node{Name: "some other filename", Type: restic.NodeTypeFile, Links: 2}, }, } { b.Run(sub.name, func(b *testing.B) { diff --git a/internal/fuse/inode.go b/internal/fuse/inode.go index 5e2ece4ac99..88d5b8bb812 100644 --- a/internal/fuse/inode.go +++ b/internal/fuse/inode.go @@ -25,7 +25,7 @@ func inodeFromName(parent uint64, name string) uint64 { // inodeFromNode generates an inode number for a file within a snapshot. func inodeFromNode(parent uint64, node *restic.Node) (inode uint64) { - if node.Links > 1 && node.Type != "dir" { + if node.Links > 1 && node.Type != restic.NodeTypeDir { // If node has hard links, give them all the same inode, // irrespective of the parent. var buf [16]byte diff --git a/internal/restic/find.go b/internal/restic/find.go index d7b032bf808..f9b4e8bdfda 100644 --- a/internal/restic/find.go +++ b/internal/restic/find.go @@ -46,7 +46,7 @@ func FindUsedBlobs(ctx context.Context, repo Loader, treeIDs IDs, blobs FindBlob lock.Lock() for _, node := range tree.Nodes { switch node.Type { - case "file": + case NodeTypeFile: for _, blob := range node.Content { blobs.Insert(BlobHandle{ID: blob, Type: DataBlob}) } diff --git a/internal/restic/node.go b/internal/restic/node.go index 8bf97e59c1d..c572996a5fe 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -67,10 +67,24 @@ func storeGenericAttributeType(attributeTypes ...GenericAttributeType) { } } +type NodeType string + +var ( + NodeTypeFile = NodeType("file") + NodeTypeDir = NodeType("dir") + NodeTypeSymlink = NodeType("symlink") + NodeTypeDev = NodeType("dev") + NodeTypeCharDev = NodeType("chardev") + NodeTypeFifo = NodeType("fifo") + NodeTypeSocket = NodeType("socket") + NodeTypeIrregular = NodeType("irregular") + NodeTypeInvalid = NodeType("") +) + // Node is a file, directory or other item in a backup. type Node struct { Name string `json:"name"` - Type string `json:"type"` + Type NodeType `json:"type"` Mode os.FileMode `json:"mode,omitempty"` ModTime time.Time `json:"mtime,omitempty"` AccessTime time.Time `json:"atime,omitempty"` @@ -110,19 +124,19 @@ func (n Nodes) Swap(i, j int) { n[i], n[j] = n[j], n[i] } func (node Node) String() string { var mode os.FileMode switch node.Type { - case "file": + case NodeTypeFile: mode = 0 - case "dir": + case NodeTypeDir: mode = os.ModeDir - case "symlink": + case NodeTypeSymlink: mode = os.ModeSymlink - case "dev": + case NodeTypeDev: mode = os.ModeDevice - case "chardev": + case NodeTypeCharDev: mode = os.ModeDevice | os.ModeCharDevice - case "fifo": + case NodeTypeFifo: mode = os.ModeNamedPipe - case "socket": + case NodeTypeSocket: mode = os.ModeSocket } diff --git a/internal/restic/testing.go b/internal/restic/testing.go index 8f86a7b2c42..3e056343035 100644 --- a/internal/restic/testing.go +++ b/internal/restic/testing.go @@ -81,7 +81,7 @@ func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) I node := &Node{ Name: fmt.Sprintf("dir-%v", treeSeed), - Type: "dir", + Type: NodeTypeDir, Mode: 0755, Subtree: &id, } @@ -95,7 +95,7 @@ func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) I node := &Node{ Name: fmt.Sprintf("file-%v", fileSeed), - Type: "file", + Type: NodeTypeFile, Mode: 0644, Size: uint64(fileSize), } diff --git a/internal/restic/tree.go b/internal/restic/tree.go index 3c3e3ab5660..c4125653b12 100644 --- a/internal/restic/tree.go +++ b/internal/restic/tree.go @@ -96,7 +96,7 @@ func (t *Tree) Sort() { // Subtrees returns a slice of all subtree IDs of the tree. func (t *Tree) Subtrees() (trees IDs) { for _, node := range t.Nodes { - if node.Type == "dir" && node.Subtree != nil { + if node.Type == NodeTypeDir && node.Subtree != nil { trees = append(trees, *node.Subtree) } } @@ -208,7 +208,7 @@ func FindTreeDirectory(ctx context.Context, repo BlobLoader, id *ID, dir string) if node == nil { return nil, fmt.Errorf("path %s: not found", subfolder) } - if node.Type != "dir" || node.Subtree == nil { + if node.Type != NodeTypeDir || node.Subtree == nil { return nil, fmt.Errorf("path %s: not a directory", subfolder) } id = node.Subtree diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 26b6f347498..f28cd0ba3f7 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -202,18 +202,18 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str } // sockets cannot be restored - if node.Type == "socket" { + if node.Type == restic.NodeTypeSocket { continue } - selectedForRestore, childMayBeSelected := res.SelectFilter(nodeLocation, node.Type == "dir") + selectedForRestore, childMayBeSelected := res.SelectFilter(nodeLocation, node.Type == restic.NodeTypeDir) debug.Log("SelectFilter returned %v %v for %q", selectedForRestore, childMayBeSelected, nodeLocation) if selectedForRestore { hasRestored = true } - if node.Type == "dir" { + if node.Type == restic.NodeTypeDir { if node.Subtree == nil { return nil, hasRestored, errors.Errorf("Dir without subtree in tree %v", treeID.Str()) } @@ -377,7 +377,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error) return err } - if node.Type != "file" { + if node.Type != restic.NodeTypeFile { res.opts.Progress.AddFile(0) return nil } @@ -433,7 +433,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error) err = res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{ visitNode: func(node *restic.Node, target, location string) error { debug.Log("second pass, visitNode: restore node %q", location) - if node.Type != "file" { + if node.Type != restic.NodeTypeFile { _, err := res.withOverwriteCheck(ctx, node, target, location, false, nil, func(_ bool, _ *fileState) error { return res.restoreNodeTo(node, target, location) }) @@ -547,7 +547,7 @@ func (res *Restorer) withOverwriteCheck(ctx context.Context, node *restic.Node, var matches *fileState updateMetadataOnly := false - if node.Type == "file" && !isHardlink { + if node.Type == restic.NodeTypeFile && !isHardlink { // if a file fails to verify, then matches is nil which results in restoring from scratch matches, buf, _ = res.verifyFile(ctx, target, node, false, res.opts.Overwrite == OverwriteIfChanged, buf) // skip files that are already correct completely @@ -616,7 +616,7 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string, countRestoredF err := res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{ visitNode: func(node *restic.Node, target, location string) error { - if node.Type != "file" { + if node.Type != restic.NodeTypeFile { return nil } if metadataOnly, ok := res.hasRestoredFile(location); !ok || metadataOnly { diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 7d48950681c..8844435ca74 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -108,7 +108,7 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u mode = 0644 } err := tree.Insert(&restic.Node{ - Type: "file", + Type: restic.NodeTypeFile, Mode: mode, ModTime: node.ModTime, Name: name, @@ -123,7 +123,7 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u rtest.OK(t, err) case Symlink: err := tree.Insert(&restic.Node{ - Type: "symlink", + Type: restic.NodeTypeSymlink, Mode: os.ModeSymlink | 0o777, ModTime: node.ModTime, Name: name, @@ -143,7 +143,7 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u } err := tree.Insert(&restic.Node{ - Type: "dir", + Type: restic.NodeTypeDir, Mode: mode, ModTime: node.ModTime, Name: name, diff --git a/internal/ui/backup/progress.go b/internal/ui/backup/progress.go index 1d494bf142a..24640d71f06 100644 --- a/internal/ui/backup/progress.go +++ b/internal/ui/backup/progress.go @@ -124,7 +124,7 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a } switch current.Type { - case "dir": + case restic.NodeTypeDir: p.mu.Lock() p.addProcessed(Counter{Dirs: 1}) p.mu.Unlock() @@ -138,7 +138,7 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a p.printer.CompleteItem("dir modified", item, s, d) } - case "file": + case restic.NodeTypeFile: p.mu.Lock() p.addProcessed(Counter{Files: 1}) delete(p.currentFiles, item) diff --git a/internal/ui/backup/progress_test.go b/internal/ui/backup/progress_test.go index 6b242a0f333..512fbab26be 100644 --- a/internal/ui/backup/progress_test.go +++ b/internal/ui/backup/progress_test.go @@ -55,10 +55,10 @@ func TestProgress(t *testing.T) { prog.CompleteBlob(1024) // "dir unchanged" - node := restic.Node{Type: "dir"} + node := restic.Node{Type: restic.NodeTypeDir} prog.CompleteItem("foo", &node, &node, archiver.ItemStats{}, 0) // "file new" - node.Type = "file" + node.Type = restic.NodeTypeFile prog.CompleteItem("foo", nil, &node, archiver.ItemStats{}, 0) time.Sleep(10 * time.Millisecond) diff --git a/internal/walker/rewriter.go b/internal/walker/rewriter.go index 7e984ae2505..968ef44f352 100644 --- a/internal/walker/rewriter.go +++ b/internal/walker/rewriter.go @@ -65,7 +65,7 @@ func NewSnapshotSizeRewriter(rewriteNode NodeRewriteFunc) (*TreeRewriter, QueryR t := NewTreeRewriter(RewriteOpts{ RewriteNode: func(node *restic.Node, path string) *restic.Node { node = rewriteNode(node, path) - if node != nil && node.Type == "file" { + if node != nil && node.Type == restic.NodeTypeFile { count++ size += node.Size } @@ -126,7 +126,7 @@ func (t *TreeRewriter) RewriteTree(ctx context.Context, repo BlobLoadSaver, node continue } - if node.Type != "dir" { + if node.Type != restic.NodeTypeDir { err = tb.AddNode(node) if err != nil { return restic.ID{}, err diff --git a/internal/walker/rewriter_test.go b/internal/walker/rewriter_test.go index f05e50f9bf1..58dd25cd017 100644 --- a/internal/walker/rewriter_test.go +++ b/internal/walker/rewriter_test.go @@ -110,7 +110,7 @@ func checkIncreaseNodeSize(increase uint64) checkRewriteFunc { return func(t testing.TB) (rewriter *TreeRewriter, final func(testing.TB)) { rewriter = NewTreeRewriter(RewriteOpts{ RewriteNode: func(node *restic.Node, path string) *restic.Node { - if node.Type == "file" { + if node.Type == restic.NodeTypeFile { node.Size += increase } return node @@ -329,7 +329,7 @@ func TestSnapshotSizeQuery(t *testing.T) { if path == "/bar" { return nil } - if node.Type == "file" { + if node.Type == restic.NodeTypeFile { node.Size += 21 } return node diff --git a/internal/walker/walker.go b/internal/walker/walker.go index 788ece1cf1a..8acfed2f2b8 100644 --- a/internal/walker/walker.go +++ b/internal/walker/walker.go @@ -63,11 +63,11 @@ func walk(ctx context.Context, repo restic.BlobLoader, prefix string, parentTree p := path.Join(prefix, node.Name) - if node.Type == "" { + if node.Type == restic.NodeTypeInvalid { return errors.Errorf("node type is empty for node %q", node.Name) } - if node.Type != "dir" { + if node.Type != restic.NodeTypeDir { err := visitor.ProcessNode(parentTreeID, p, node, nil) if err != nil { if err == ErrSkipNode { diff --git a/internal/walker/walker_test.go b/internal/walker/walker_test.go index 75f80e57f37..fa377bb8fa6 100644 --- a/internal/walker/walker_test.go +++ b/internal/walker/walker_test.go @@ -38,7 +38,7 @@ func buildTreeMap(tree TestTree, m TreeMap) restic.ID { case TestFile: err := tb.AddNode(&restic.Node{ Name: name, - Type: "file", + Type: restic.NodeTypeFile, Size: elem.Size, }) if err != nil { @@ -49,7 +49,7 @@ func buildTreeMap(tree TestTree, m TreeMap) restic.ID { err := tb.AddNode(&restic.Node{ Name: name, Subtree: &id, - Type: "dir", + Type: restic.NodeTypeDir, }) if err != nil { panic(err) From 24f4e780f1019b49fd9f0c6644d3925a1e754056 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 21 Jul 2024 15:22:21 +0200 Subject: [PATCH 653/893] backend: consistently use os package for filesystem access The go std library should be good enough to manage the files in the backend and cache folders. --- cmd/restic/cmd_cache.go | 3 +-- cmd/restic/cmd_check.go | 3 +-- cmd/restic/global.go | 3 +-- internal/backend/cache/cache.go | 13 ++++++------- internal/backend/cache/file.go | 21 ++++++++++----------- internal/backend/cache/file_test.go | 3 +-- internal/backend/local/local.go | 24 ++++++++++++------------ internal/backend/local/local_unix.go | 4 +--- internal/fs/file.go | 9 --------- 9 files changed, 33 insertions(+), 50 deletions(-) diff --git a/cmd/restic/cmd_cache.go b/cmd/restic/cmd_cache.go index e54c734518e..cd970b699e6 100644 --- a/cmd/restic/cmd_cache.go +++ b/cmd/restic/cmd_cache.go @@ -10,7 +10,6 @@ import ( "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/table" "github.com/spf13/cobra" @@ -89,7 +88,7 @@ func runCache(opts CacheOptions, gopts GlobalOptions, args []string) error { for _, item := range oldDirs { dir := filepath.Join(cachedir, item.Name()) - err = fs.RemoveAll(dir) + err = os.RemoveAll(dir) if err != nil { Warnf("unable to remove %v: %v\n", dir, err) } diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index fc460e39e38..8788b0caf33 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -14,7 +14,6 @@ import ( "github.com/restic/restic/internal/backend/cache" "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" @@ -202,7 +201,7 @@ func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions, printer progress printer.P("using temporary cache in %v\n", tempdir) cleanup = func() { - err := fs.RemoveAll(tempdir) + err := os.RemoveAll(tempdir) if err != nil { printer.E("error removing temporary cache directory: %v\n", err) } diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 375b57f98bd..5fb37f9f2cb 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -29,7 +29,6 @@ import ( "github.com/restic/restic/internal/backend/sftp" "github.com/restic/restic/internal/backend/swift" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/options" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -548,7 +547,7 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi } for _, item := range oldCacheDirs { dir := filepath.Join(c.Base, item.Name()) - err = fs.RemoveAll(dir) + err = os.RemoveAll(dir) if err != nil { Warnf("unable to remove %v: %v\n", dir, err) } diff --git a/internal/backend/cache/cache.go b/internal/backend/cache/cache.go index a55b51c700f..2893df5014a 100644 --- a/internal/backend/cache/cache.go +++ b/internal/backend/cache/cache.go @@ -12,7 +12,6 @@ import ( "github.com/pkg/errors" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" ) @@ -54,7 +53,7 @@ const cachedirTagSignature = "Signature: 8a477f597d28d172789f06886806bc55\n" func writeCachedirTag(dir string) error { tagfile := filepath.Join(dir, "CACHEDIR.TAG") - f, err := fs.OpenFile(tagfile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, fileMode) + f, err := os.OpenFile(tagfile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, fileMode) if err != nil { if errors.Is(err, os.ErrExist) { return nil @@ -85,7 +84,7 @@ func New(id string, basedir string) (c *Cache, err error) { } } - err = fs.MkdirAll(basedir, dirMode) + err = os.MkdirAll(basedir, dirMode) if err != nil { return nil, errors.WithStack(err) } @@ -113,7 +112,7 @@ func New(id string, basedir string) (c *Cache, err error) { case errors.Is(err, os.ErrNotExist): // Create the repo cache dir. The parent exists, so Mkdir suffices. - err := fs.Mkdir(cachedir, dirMode) + err := os.Mkdir(cachedir, dirMode) switch { case err == nil: created = true @@ -134,7 +133,7 @@ func New(id string, basedir string) (c *Cache, err error) { } for _, p := range cacheLayoutPaths { - if err = fs.MkdirAll(filepath.Join(cachedir, p), dirMode); err != nil { + if err = os.MkdirAll(filepath.Join(cachedir, p), dirMode); err != nil { return nil, errors.WithStack(err) } } @@ -152,7 +151,7 @@ func New(id string, basedir string) (c *Cache, err error) { // directory d to the current time. func updateTimestamp(d string) error { t := time.Now() - return fs.Chtimes(d, t, t) + return os.Chtimes(d, t, t) } // MaxCacheAge is the default age (30 days) after which cache directories are considered old. @@ -165,7 +164,7 @@ func validCacheDirName(s string) bool { // listCacheDirs returns the list of cache directories. func listCacheDirs(basedir string) ([]os.FileInfo, error) { - f, err := fs.Open(basedir) + f, err := os.Open(basedir) if err != nil { if errors.Is(err, os.ErrNotExist) { err = nil diff --git a/internal/backend/cache/file.go b/internal/backend/cache/file.go index 12f5f23c571..41fd0b49be5 100644 --- a/internal/backend/cache/file.go +++ b/internal/backend/cache/file.go @@ -12,7 +12,6 @@ import ( "github.com/restic/restic/internal/backend/util" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" ) @@ -44,7 +43,7 @@ func (c *Cache) load(h backend.Handle, length int, offset int64) (io.ReadCloser, return nil, false, errors.New("cannot be cached") } - f, err := fs.Open(c.filename(h)) + f, err := os.Open(c.filename(h)) if err != nil { return nil, false, errors.WithStack(err) } @@ -91,7 +90,7 @@ func (c *Cache) save(h backend.Handle, rd io.Reader) error { finalname := c.filename(h) dir := filepath.Dir(finalname) - err := fs.Mkdir(dir, 0700) + err := os.Mkdir(dir, 0700) if err != nil && !errors.Is(err, os.ErrExist) { return err } @@ -106,26 +105,26 @@ func (c *Cache) save(h backend.Handle, rd io.Reader) error { n, err := io.Copy(f, rd) if err != nil { _ = f.Close() - _ = fs.Remove(f.Name()) + _ = os.Remove(f.Name()) return errors.Wrap(err, "Copy") } if n <= int64(crypto.CiphertextLength(0)) { _ = f.Close() - _ = fs.Remove(f.Name()) + _ = os.Remove(f.Name()) debug.Log("trying to cache truncated file %v, removing", h) return nil } // Close, then rename. Windows doesn't like the reverse order. if err = f.Close(); err != nil { - _ = fs.Remove(f.Name()) + _ = os.Remove(f.Name()) return errors.WithStack(err) } - err = fs.Rename(f.Name(), finalname) + err = os.Rename(f.Name(), finalname) if err != nil { - _ = fs.Remove(f.Name()) + _ = os.Remove(f.Name()) } if runtime.GOOS == "windows" && errors.Is(err, os.ErrPermission) { // On Windows, renaming over an existing file is ok @@ -162,7 +161,7 @@ func (c *Cache) remove(h backend.Handle) (bool, error) { return false, nil } - err := fs.Remove(c.filename(h)) + err := os.Remove(c.filename(h)) removed := err == nil if errors.Is(err, os.ErrNotExist) { err = nil @@ -189,7 +188,7 @@ func (c *Cache) Clear(t restic.FileType, valid restic.IDSet) error { } // ignore ErrNotExist to gracefully handle multiple processes running Clear() concurrently - if err = fs.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil && !errors.Is(err, os.ErrNotExist) { + if err = os.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil && !errors.Is(err, os.ErrNotExist) { return err } } @@ -236,6 +235,6 @@ func (c *Cache) Has(h backend.Handle) bool { return false } - _, err := fs.Stat(c.filename(h)) + _, err := os.Stat(c.filename(h)) return err == nil } diff --git a/internal/backend/cache/file_test.go b/internal/backend/cache/file_test.go index ed2cd295abc..942f71f91b8 100644 --- a/internal/backend/cache/file_test.go +++ b/internal/backend/cache/file_test.go @@ -12,7 +12,6 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -278,7 +277,7 @@ func TestFileSaveConcurrent(t *testing.T) { func TestFileSaveAfterDamage(t *testing.T) { c := TestNewCache(t) - rtest.OK(t, fs.RemoveAll(c.path)) + rtest.OK(t, os.RemoveAll(c.path)) // save a few bytes of data in the cache data := rtest.Random(123456789, 42) diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go index 8985ef4c488..ee87ae5d6e6 100644 --- a/internal/backend/local/local.go +++ b/internal/backend/local/local.go @@ -40,7 +40,7 @@ func NewFactory() location.Factory { func open(cfg Config) (*Local, error) { l := layout.NewDefaultLayout(cfg.Path, filepath.Join) - fi, err := fs.Stat(l.Filename(backend.Handle{Type: backend.ConfigFile})) + fi, err := os.Stat(l.Filename(backend.Handle{Type: backend.ConfigFile})) m := util.DeriveModesFromFileInfo(fi, err) debug.Log("using (%03O file, %03O dir) permissions", m.File, m.Dir) @@ -68,14 +68,14 @@ func Create(_ context.Context, cfg Config) (*Local, error) { } // test if config file already exists - _, err = fs.Lstat(be.Filename(backend.Handle{Type: backend.ConfigFile})) + _, err = os.Lstat(be.Filename(backend.Handle{Type: backend.ConfigFile})) if err == nil { return nil, errors.New("config file already exists") } // create paths for data and refs for _, d := range be.Paths() { - err := fs.MkdirAll(d, be.Modes.Dir) + err := os.MkdirAll(d, be.Modes.Dir) if err != nil { return nil, errors.WithStack(err) } @@ -127,7 +127,7 @@ func (b *Local) Save(_ context.Context, h backend.Handle, rd backend.RewindReade debug.Log("error %v: creating dir", err) // error is caused by a missing directory, try to create it - mkdirErr := fs.MkdirAll(dir, b.Modes.Dir) + mkdirErr := os.MkdirAll(dir, b.Modes.Dir) if mkdirErr != nil { debug.Log("error creating dir %v: %v", dir, mkdirErr) } else { @@ -147,7 +147,7 @@ func (b *Local) Save(_ context.Context, h backend.Handle, rd backend.RewindReade // temporary's name and no other goroutine will get the same data to // Save, so the temporary name should never be reused by another // goroutine. - _ = fs.Remove(f.Name()) + _ = os.Remove(f.Name()) } }(f) @@ -211,7 +211,7 @@ func (b *Local) Load(ctx context.Context, h backend.Handle, length int, offset i } func (b *Local) openReader(_ context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) { - f, err := fs.Open(b.Filename(h)) + f, err := os.Open(b.Filename(h)) if err != nil { return nil, err } @@ -245,7 +245,7 @@ func (b *Local) openReader(_ context.Context, h backend.Handle, length int, offs // Stat returns information about a blob. func (b *Local) Stat(_ context.Context, h backend.Handle) (backend.FileInfo, error) { - fi, err := fs.Stat(b.Filename(h)) + fi, err := os.Stat(b.Filename(h)) if err != nil { return backend.FileInfo{}, errors.WithStack(err) } @@ -258,12 +258,12 @@ func (b *Local) Remove(_ context.Context, h backend.Handle) error { fn := b.Filename(h) // reset read-only flag - err := fs.Chmod(fn, 0666) + err := os.Chmod(fn, 0666) if err != nil && !os.IsPermission(err) { return errors.WithStack(err) } - return fs.Remove(fn) + return os.Remove(fn) } // List runs fn for each file in the backend which has the type t. When an @@ -289,7 +289,7 @@ func (b *Local) List(ctx context.Context, t backend.FileType, fn func(backend.Fi // Also, visitDirs assumes it sees a directory full of directories, while // visitFiles wants a directory full or regular files. func visitDirs(ctx context.Context, dir string, fn func(backend.FileInfo) error) error { - d, err := fs.Open(dir) + d, err := os.Open(dir) if err != nil { return err } @@ -316,7 +316,7 @@ func visitDirs(ctx context.Context, dir string, fn func(backend.FileInfo) error) } func visitFiles(ctx context.Context, dir string, fn func(backend.FileInfo) error, ignoreNotADirectory bool) error { - d, err := fs.Open(dir) + d, err := os.Open(dir) if err != nil { return err } @@ -362,7 +362,7 @@ func visitFiles(ctx context.Context, dir string, fn func(backend.FileInfo) error // Delete removes the repository and all files. func (b *Local) Delete(_ context.Context) error { - return fs.RemoveAll(b.Path) + return os.RemoveAll(b.Path) } // Close closes all open files. diff --git a/internal/backend/local/local_unix.go b/internal/backend/local/local_unix.go index e3256ed7a6b..e525874564d 100644 --- a/internal/backend/local/local_unix.go +++ b/internal/backend/local/local_unix.go @@ -8,8 +8,6 @@ import ( "os" "runtime" "syscall" - - "github.com/restic/restic/internal/fs" ) // fsyncDir flushes changes to the directory dir. @@ -45,5 +43,5 @@ func isMacENOTTY(err error) bool { // set file to readonly func setFileReadonly(f string, mode os.FileMode) error { - return fs.Chmod(f, mode&^0222) + return os.Chmod(f, mode&^0222) } diff --git a/internal/fs/file.go b/internal/fs/file.go index 356b466c3d0..b727df79c17 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -88,15 +88,6 @@ func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { return os.OpenFile(fixpath(name), flag, perm) } -// Chtimes changes the access and modification times of the named file, -// similar to the Unix utime() or utimes() functions. -// -// The underlying filesystem may truncate or round the values to a less -// precise time unit. If there is an error, it will be of type *PathError. -func Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(fixpath(name), atime, mtime) -} - // IsAccessDenied checks if the error is due to permission error. func IsAccessDenied(err error) bool { return os.IsPermission(err) From 65a71573836a2f1677472fca514beeaf76fe7cba Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 21 Jul 2024 15:56:03 +0200 Subject: [PATCH 654/893] mount: use os instead of fs package --- cmd/restic/cmd_mount.go | 3 +-- internal/fs/file.go | 6 ------ 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 2f57a6d1f7d..b8a66dc908f 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -15,7 +15,6 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" - resticfs "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/fuse" systemFuse "github.com/anacrolix/fuse" @@ -122,7 +121,7 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args // Check the existence of the mount point at the earliest stage to // prevent unnecessary computations while opening the repository. - if _, err := resticfs.Stat(mountpoint); errors.Is(err, os.ErrNotExist) { + if _, err := os.Stat(mountpoint); errors.Is(err, os.ErrNotExist) { Verbosef("Mountpoint %s doesn't exist\n", mountpoint) return err } diff --git a/internal/fs/file.go b/internal/fs/file.go index b727df79c17..c1ebae08192 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -60,12 +60,6 @@ func Link(oldname, newname string) error { return os.Link(fixpath(oldname), fixpath(newname)) } -// Stat returns a FileInfo structure describing the named file. -// If there is an error, it will be of type *PathError. -func Stat(name string) (os.FileInfo, error) { - return os.Stat(fixpath(name)) -} - // Lstat returns the FileInfo structure describing the named file. // If the file is a symbolic link, the returned FileInfo // describes the symbolic link. Lstat makes no attempt to follow the link. From cc7f99125a7cc25b0da073c29f787c8ba1be30c9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 21 Jul 2024 15:58:41 +0200 Subject: [PATCH 655/893] minimize usage of internal/fs in tests --- internal/archiver/archiver_test.go | 4 ++-- internal/archiver/testing.go | 10 +++++----- internal/archiver/testing_test.go | 12 +++++------ internal/dump/tar_test.go | 3 +-- internal/dump/zip_test.go | 4 +--- internal/fs/file.go | 32 ------------------------------ internal/fs/node.go | 6 +++--- internal/fs/node_linux.go | 3 ++- internal/fs/node_windows.go | 2 +- internal/fs/setflags_linux_test.go | 2 +- internal/restorer/restorer_test.go | 2 +- 11 files changed, 23 insertions(+), 57 deletions(-) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 04d0b876b6d..b95947a2e60 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -846,7 +846,7 @@ func TestArchiverSaveDir(t *testing.T) { back := rtest.Chdir(t, chdir) defer back() - fi, err := fs.Lstat(test.target) + fi, err := os.Lstat(test.target) if err != nil { t.Fatal(err) } @@ -920,7 +920,7 @@ func TestArchiverSaveDirIncremental(t *testing.T) { arch.runWorkers(ctx, wg) arch.summary = &Summary{} - fi, err := fs.Lstat(tempdir) + fi, err := os.Lstat(tempdir) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go index 97bada64433..e555a70d627 100644 --- a/internal/archiver/testing.go +++ b/internal/archiver/testing.go @@ -95,17 +95,17 @@ func TestCreateFiles(t testing.TB, target string, dir TestDir) { t.Fatal(err) } case TestSymlink: - err := fs.Symlink(filepath.FromSlash(it.Target), targetPath) + err := os.Symlink(filepath.FromSlash(it.Target), targetPath) if err != nil { t.Fatal(err) } case TestHardlink: - err := fs.Link(filepath.Join(target, filepath.FromSlash(it.Target)), targetPath) + err := os.Link(filepath.Join(target, filepath.FromSlash(it.Target)), targetPath) if err != nil { t.Fatal(err) } case TestDir: - err := fs.Mkdir(targetPath, 0755) + err := os.Mkdir(targetPath, 0755) if err != nil { t.Fatal(err) } @@ -157,7 +157,7 @@ func TestEnsureFiles(t testing.TB, target string, dir TestDir) { // first, test that all items are there TestWalkFiles(t, target, dir, func(path string, item interface{}) error { - fi, err := fs.Lstat(path) + fi, err := os.Lstat(path) if err != nil { return err } @@ -188,7 +188,7 @@ func TestEnsureFiles(t testing.TB, target string, dir TestDir) { return nil } - target, err := fs.Readlink(path) + target, err := os.Readlink(path) if err != nil { return err } diff --git a/internal/archiver/testing_test.go b/internal/archiver/testing_test.go index bb4b63a827b..a217abe2531 100644 --- a/internal/archiver/testing_test.go +++ b/internal/archiver/testing_test.go @@ -54,7 +54,7 @@ func (t *MockT) Errorf(msg string, args ...interface{}) { func createFilesAt(t testing.TB, targetdir string, files map[string]interface{}) { for name, item := range files { target := filepath.Join(targetdir, filepath.FromSlash(name)) - err := fs.MkdirAll(filepath.Dir(target), 0700) + err := os.MkdirAll(filepath.Dir(target), 0700) if err != nil { t.Fatal(err) } @@ -66,7 +66,7 @@ func createFilesAt(t testing.TB, targetdir string, files map[string]interface{}) t.Fatal(err) } case TestSymlink: - err := fs.Symlink(filepath.FromSlash(it.Target), target) + err := os.Symlink(filepath.FromSlash(it.Target), target) if err != nil { t.Fatal(err) } @@ -105,7 +105,7 @@ func TestTestCreateFiles(t *testing.T) { t.Run("", func(t *testing.T) { tempdir := filepath.Join(tempdir, fmt.Sprintf("test-%d", i)) - err := fs.MkdirAll(tempdir, 0700) + err := os.MkdirAll(tempdir, 0700) if err != nil { t.Fatal(err) } @@ -114,7 +114,7 @@ func TestTestCreateFiles(t *testing.T) { for name, item := range test.files { targetPath := filepath.Join(tempdir, filepath.FromSlash(name)) - fi, err := fs.Lstat(targetPath) + fi, err := os.Lstat(targetPath) if err != nil { t.Error(err) continue @@ -142,7 +142,7 @@ func TestTestCreateFiles(t *testing.T) { continue } - target, err := fs.Readlink(targetPath) + target, err := os.Readlink(targetPath) if err != nil { t.Error(err) continue @@ -455,7 +455,7 @@ func TestTestEnsureSnapshot(t *testing.T) { tempdir := rtest.TempDir(t) targetDir := filepath.Join(tempdir, "target") - err := fs.Mkdir(targetDir, 0700) + err := os.Mkdir(targetDir, 0700) if err != nil { t.Fatal(err) } diff --git a/internal/dump/tar_test.go b/internal/dump/tar_test.go index 0c97eae35b4..cb3cb08c438 100644 --- a/internal/dump/tar_test.go +++ b/internal/dump/tar_test.go @@ -13,7 +13,6 @@ import ( "testing" "time" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -83,7 +82,7 @@ func checkTar(t *testing.T, testDir string, srcTar *bytes.Buffer) error { return fmt.Errorf("foldernames must end with separator got %v", hdr.Name) } case tar.TypeSymlink: - target, err := fs.Readlink(matchPath) + target, err := os.Readlink(matchPath) if err != nil { return err } diff --git a/internal/dump/zip_test.go b/internal/dump/zip_test.go index 0c304d3da56..6f5f60f54fa 100644 --- a/internal/dump/zip_test.go +++ b/internal/dump/zip_test.go @@ -9,8 +9,6 @@ import ( "strings" "testing" "time" - - "github.com/restic/restic/internal/fs" ) func TestWriteZip(t *testing.T) { @@ -91,7 +89,7 @@ func checkZip(t *testing.T, testDir string, srcZip *bytes.Buffer) error { return fmt.Errorf("foldernames must end with separator got %v", f.Name) } case f.Mode()&os.ModeSymlink != 0: - target, err := fs.Readlink(matchPath) + target, err := os.Readlink(matchPath) if err != nil { return err } diff --git a/internal/fs/file.go b/internal/fs/file.go index c1ebae08192..8d60ed15978 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -3,15 +3,8 @@ package fs import ( "fmt" "os" - "time" ) -// Mkdir creates a new directory with the specified name and permission bits. -// If there is an error, it will be of type *PathError. -func Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(fixpath(name), perm) -} - // MkdirAll creates a directory named path, along with any necessary parents, // and returns nil, or else returns an error. The permission bits perm are used // for all directories that MkdirAll creates. If path is already a directory, @@ -20,12 +13,6 @@ func MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(fixpath(path), perm) } -// Readlink returns the destination of the named symbolic link. -// If there is an error, it will be of type *PathError. -func Readlink(name string) (string, error) { - return os.Readlink(fixpath(name)) -} - // Remove removes the named file or directory. // If there is an error, it will be of type *PathError. func Remove(name string) error { @@ -40,20 +27,6 @@ func RemoveAll(path string) error { return os.RemoveAll(fixpath(path)) } -// Rename renames (moves) oldpath to newpath. -// If newpath already exists, Rename replaces it. -// OS-specific restrictions may apply when oldpath and newpath are in different directories. -// If there is an error, it will be of type *LinkError. -func Rename(oldpath, newpath string) error { - return os.Rename(fixpath(oldpath), fixpath(newpath)) -} - -// Symlink creates newname as a symbolic link to oldname. -// If there is an error, it will be of type *LinkError. -func Symlink(oldname, newname string) error { - return os.Symlink(oldname, fixpath(newname)) -} - // Link creates newname as a hard link to oldname. // If there is an error, it will be of type *LinkError. func Link(oldname, newname string) error { @@ -68,11 +41,6 @@ func Lstat(name string) (os.FileInfo, error) { return os.Lstat(fixpath(name)) } -// Open opens a file for reading. -func Open(name string) (File, error) { - return os.Open(fixpath(name)) -} - // OpenFile is the generalized open call; most users will use Open // or Create instead. It opens the named file with specified flag // (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, diff --git a/internal/fs/node.go b/internal/fs/node.go index e29011cc265..3945c426e2d 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -80,7 +80,7 @@ func nodeFillExtra(node *restic.Node, path string, fi os.FileInfo, ignoreXattrLi case restic.NodeTypeDir: case restic.NodeTypeSymlink: var err error - node.LinkTarget, err = Readlink(path) + node.LinkTarget, err = os.Readlink(fixpath(path)) node.Links = uint64(stat.nlink()) if err != nil { return errors.WithStack(err) @@ -212,7 +212,7 @@ func NodeCreateAt(node *restic.Node, path string) error { } func nodeCreateDirAt(node *restic.Node, path string) error { - err := Mkdir(path, node.Mode) + err := os.Mkdir(fixpath(path), node.Mode) if err != nil && !os.IsExist(err) { return errors.WithStack(err) } @@ -234,7 +234,7 @@ func nodeCreateFileAt(path string) error { } func nodeCreateSymlinkAt(node *restic.Node, path string) error { - if err := Symlink(node.LinkTarget, path); err != nil { + if err := os.Symlink(node.LinkTarget, fixpath(path)); err != nil { return errors.WithStack(err) } diff --git a/internal/fs/node_linux.go b/internal/fs/node_linux.go index 1cb4ee1ae15..4cb9bdeaa67 100644 --- a/internal/fs/node_linux.go +++ b/internal/fs/node_linux.go @@ -1,6 +1,7 @@ package fs import ( + "os" "path/filepath" "syscall" @@ -10,7 +11,7 @@ import ( ) func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - dir, err := Open(filepath.Dir(path)) + dir, err := os.Open(fixpath(filepath.Dir(path))) if err != nil { return errors.WithStack(err) } diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index f2a2001e968..d904c37feec 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -58,7 +58,7 @@ func lchown(_ string, _ int, _ int) (err error) { // restoreSymlinkTimestamps restores timestamps for symlinks func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { // tweaked version of UtimesNano from go/src/syscall/syscall_windows.go - pathp, e := syscall.UTF16PtrFromString(path) + pathp, e := syscall.UTF16PtrFromString(fixpath(path)) if e != nil { return e } diff --git a/internal/fs/setflags_linux_test.go b/internal/fs/setflags_linux_test.go index b561a1009d8..8fe14a5a61f 100644 --- a/internal/fs/setflags_linux_test.go +++ b/internal/fs/setflags_linux_test.go @@ -19,7 +19,7 @@ func TestNoatime(t *testing.T) { defer func() { _ = f.Close() - err = Remove(f.Name()) + err = os.Remove(f.Name()) if err != nil { t.Fatal(err) } diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 8844435ca74..e0306ce01cc 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -1223,7 +1223,7 @@ func TestRestorerOverwriteSpecial(t *testing.T) { } } for filename, target := range links { - link, err := fs.Readlink(filepath.Join(tempdir, filepath.FromSlash(filename))) + link, err := os.Readlink(filepath.Join(tempdir, filepath.FromSlash(filename))) rtest.OK(t, err) rtest.Equals(t, link, target, "wrong symlink target") } From cf051e777a7c2bd443171a9e8c35ce55161fdcab Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 21 Jul 2024 16:47:13 +0200 Subject: [PATCH 656/893] fs: remove Readdir method from File interface --- internal/fs/fs_reader.go | 11 ---- internal/fs/fs_reader_test.go | 95 ----------------------------------- internal/fs/interface.go | 1 - 3 files changed, 107 deletions(-) diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index 84a79168e60..9dd5ff6eb8f 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -237,10 +237,6 @@ func (f fakeFile) Readdirnames(_ int) ([]string, error) { return nil, pathError("readdirnames", f.name, os.ErrInvalid) } -func (f fakeFile) Readdir(_ int) ([]os.FileInfo, error) { - return nil, pathError("readdir", f.name, os.ErrInvalid) -} - func (f fakeFile) Seek(int64, int) (int64, error) { return 0, pathError("seek", f.name, os.ErrInvalid) } @@ -279,13 +275,6 @@ func (d fakeDir) Readdirnames(n int) ([]string, error) { return names, nil } -func (d fakeDir) Readdir(n int) ([]os.FileInfo, error) { - if n > 0 { - return nil, pathError("readdir", d.name, errors.New("not implemented")) - } - return d.entries, nil -} - // fakeFileInfo implements the bare minimum of os.FileInfo. type fakeFileInfo struct { name string diff --git a/internal/fs/fs_reader_test.go b/internal/fs/fs_reader_test.go index 9fa67b5ac31..442912fe3af 100644 --- a/internal/fs/fs_reader_test.go +++ b/internal/fs/fs_reader_test.go @@ -60,77 +60,6 @@ func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) { } } -type fiSlice []os.FileInfo - -func (s fiSlice) Len() int { - return len(s) -} - -func (s fiSlice) Less(i, j int) bool { - return s[i].Name() < s[j].Name() -} - -func (s fiSlice) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func verifyDirectoryContentsFI(t testing.TB, fs FS, dir string, want []os.FileInfo) { - f, err := fs.OpenFile(dir, os.O_RDONLY, 0) - if err != nil { - t.Fatal(err) - } - - entries, err := f.Readdir(-1) - if err != nil { - t.Fatal(err) - } - - err = f.Close() - if err != nil { - t.Fatal(err) - } - - sort.Sort(fiSlice(want)) - sort.Sort(fiSlice(entries)) - - if len(want) != len(entries) { - t.Errorf("wrong number of entries returned, want %d, got %d", len(want), len(entries)) - } - max := len(want) - if len(entries) < max { - max = len(entries) - } - - for i := 0; i < max; i++ { - fi1 := want[i] - fi2 := entries[i] - - if fi1.Name() != fi2.Name() { - t.Errorf("entry %d: wrong value for Name: want %q, got %q", i, fi1.Name(), fi2.Name()) - } - - if fi1.IsDir() != fi2.IsDir() { - t.Errorf("entry %d: wrong value for IsDir: want %v, got %v", i, fi1.IsDir(), fi2.IsDir()) - } - - if fi1.Mode() != fi2.Mode() { - t.Errorf("entry %d: wrong value for Mode: want %v, got %v", i, fi1.Mode(), fi2.Mode()) - } - - if fi1.ModTime() != fi2.ModTime() { - t.Errorf("entry %d: wrong value for ModTime: want %v, got %v", i, fi1.ModTime(), fi2.ModTime()) - } - - if fi1.Size() != fi2.Size() { - t.Errorf("entry %d: wrong value for Size: want %v, got %v", i, fi1.Size(), fi2.Size()) - } - - if fi1.Sys() != fi2.Sys() { - t.Errorf("entry %d: wrong value for Sys: want %v, got %v", i, fi1.Sys(), fi2.Sys()) - } - } -} - func checkFileInfo(t testing.TB, fi os.FileInfo, filename string, modtime time.Time, mode os.FileMode, isdir bool) { if fi.IsDir() != isdir { t.Errorf("IsDir returned %t, want %t", fi.IsDir(), isdir) @@ -174,30 +103,6 @@ func TestFSReader(t *testing.T) { verifyDirectoryContents(t, fs, ".", []string{filename}) }, }, - { - name: "Readdir-slash", - f: func(t *testing.T, fs FS) { - fi := fakeFileInfo{ - mode: 0644, - modtime: now, - name: filename, - size: int64(len(data)), - } - verifyDirectoryContentsFI(t, fs, "/", []os.FileInfo{fi}) - }, - }, - { - name: "Readdir-current", - f: func(t *testing.T, fs FS) { - fi := fakeFileInfo{ - mode: 0644, - modtime: now, - name: filename, - size: int64(len(data)), - } - verifyDirectoryContentsFI(t, fs, ".", []os.FileInfo{fi}) - }, - }, { name: "file/OpenFile", f: func(t *testing.T, fs FS) { diff --git a/internal/fs/interface.go b/internal/fs/interface.go index bc6aab44a12..7707abd1d73 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -31,7 +31,6 @@ type File interface { Fd() uintptr Readdirnames(n int) ([]string, error) - Readdir(int) ([]os.FileInfo, error) Seek(int64, int) (int64, error) Stat() (os.FileInfo, error) Name() string From 6d3a5260d358cd89e8727fd279a94dd8d553ff35 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 21 Jul 2024 16:00:47 +0200 Subject: [PATCH 657/893] fs: unexport a several windows functions --- internal/fs/ea_windows.go | 36 ++++++++++----------- internal/fs/ea_windows_test.go | 32 +++++++++---------- internal/fs/file_unix.go | 4 +-- internal/fs/file_windows.go | 16 +++++----- internal/fs/fs_local_vss.go | 4 +-- internal/fs/fs_local_vss_test.go | 7 ++-- internal/fs/node.go | 2 +- internal/fs/node_windows.go | 36 ++++++++++----------- internal/fs/node_windows_test.go | 18 +++++------ internal/fs/sd_windows.go | 44 +++++++++++++------------- internal/fs/sd_windows_test.go | 10 +++--- internal/fs/sd_windows_test_helpers.go | 18 +++++------ internal/fs/vss.go | 4 +-- internal/fs/vss_windows.go | 5 +-- 14 files changed, 119 insertions(+), 117 deletions(-) diff --git a/internal/fs/ea_windows.go b/internal/fs/ea_windows.go index bf7b02fd407..6bfe2020906 100644 --- a/internal/fs/ea_windows.go +++ b/internal/fs/ea_windows.go @@ -56,14 +56,14 @@ var ( errEaValueTooLarge = errors.New("extended attribute value too large") ) -// ExtendedAttribute represents a single Windows EA. -type ExtendedAttribute struct { +// extendedAttribute represents a single Windows EA. +type extendedAttribute struct { Name string Value []byte Flags uint8 } -func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { +func parseEa(b []byte) (ea extendedAttribute, nb []byte, err error) { var info fileFullEaInformation err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) if err != nil { @@ -90,9 +90,9 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { return ea, nb, err } -// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// decodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION // buffer retrieved from BackupRead, ZwQueryEaFile, etc. -func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { +func decodeExtendedAttributes(b []byte) (eas []extendedAttribute, err error) { for len(b) != 0 { ea, nb, err := parseEa(b) if err != nil { @@ -105,7 +105,7 @@ func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { return eas, err } -func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { +func writeEa(buf *bytes.Buffer, ea *extendedAttribute, last bool) error { if int(uint8(len(ea.Name))) != len(ea.Name) { return errEaNameTooLarge } @@ -153,9 +153,9 @@ func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { return nil } -// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// encodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION // buffer for use with BackupWrite, ZwSetEaFile, etc. -func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { +func encodeExtendedAttributes(eas []extendedAttribute) ([]byte, error) { var buf bytes.Buffer for i := range eas { last := false @@ -217,11 +217,11 @@ const ( STATUS_NO_EAS_ON_FILE = -1073741742 ) -// GetFileEA retrieves the extended attributes for the file represented by `handle`. The +// fgetEA retrieves the extended attributes for the file represented by `handle`. The // `handle` must have been opened with file access flag FILE_READ_EA (0x8). // The extended file attribute names in windows are case-insensitive and when fetching // the attributes the names are generally returned in UPPER case. -func GetFileEA(handle windows.Handle) ([]ExtendedAttribute, error) { +func fgetEA(handle windows.Handle) ([]extendedAttribute, error) { // default buffer size to start with bufLen := 1024 buf := make([]byte, bufLen) @@ -246,13 +246,13 @@ func GetFileEA(handle windows.Handle) ([]ExtendedAttribute, error) { } break } - return DecodeExtendedAttributes(buf) + return decodeExtendedAttributes(buf) } -// SetFileEA sets the extended attributes for the file represented by `handle`. The +// fsetEA sets the extended attributes for the file represented by `handle`. The // handle must have been opened with the file access flag FILE_WRITE_EA(0x10). -func SetFileEA(handle windows.Handle, attrs []ExtendedAttribute) error { - encodedEA, err := EncodeExtendedAttributes(attrs) +func fsetEA(handle windows.Handle, attrs []extendedAttribute) error { + encodedEA, err := encodeExtendedAttributes(attrs) if err != nil { return fmt.Errorf("failed to encoded extended attributes: %w", err) } @@ -285,8 +285,8 @@ func setFileEA(handle windows.Handle, iosb *ioStatusBlock, buf *uint8, bufLen ui return } -// PathSupportsExtendedAttributes returns true if the path supports extended attributes. -func PathSupportsExtendedAttributes(path string) (supported bool, err error) { +// pathSupportsExtendedAttributes returns true if the path supports extended attributes. +func pathSupportsExtendedAttributes(path string) (supported bool, err error) { var fileSystemFlags uint32 utf16Path, err := windows.UTF16PtrFromString(path) if err != nil { @@ -300,8 +300,8 @@ func PathSupportsExtendedAttributes(path string) (supported bool, err error) { return supported, nil } -// GetVolumePathName returns the volume path name for the given path. -func GetVolumePathName(path string) (volumeName string, err error) { +// getVolumePathName returns the volume path name for the given path. +func getVolumePathName(path string) (volumeName string, err error) { utf16Path, err := windows.UTF16PtrFromString(path) if err != nil { return "", err diff --git a/internal/fs/ea_windows_test.go b/internal/fs/ea_windows_test.go index e474a3735f6..64bc7f7b6aa 100644 --- a/internal/fs/ea_windows_test.go +++ b/internal/fs/ea_windows_test.go @@ -46,7 +46,7 @@ import ( // under MIT license. var ( - testEas = []ExtendedAttribute{ + testEas = []extendedAttribute{ {Name: "foo", Value: []byte("bar")}, {Name: "fizz", Value: []byte("buzz")}, } @@ -58,14 +58,14 @@ var ( ) func TestRoundTripEas(t *testing.T) { - b, err := EncodeExtendedAttributes(testEas) + b, err := encodeExtendedAttributes(testEas) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(testEasEncoded, b) { t.Fatalf("Encoded mismatch %v %v", testEasEncoded, b) } - eas, err := DecodeExtendedAttributes(b) + eas, err := decodeExtendedAttributes(b) if err != nil { t.Fatal(err) } @@ -75,7 +75,7 @@ func TestRoundTripEas(t *testing.T) { } func TestEasDontNeedPaddingAtEnd(t *testing.T) { - eas, err := DecodeExtendedAttributes(testEasNotPadded) + eas, err := decodeExtendedAttributes(testEasNotPadded) if err != nil { t.Fatal(err) } @@ -85,21 +85,21 @@ func TestEasDontNeedPaddingAtEnd(t *testing.T) { } func TestTruncatedEasFailCorrectly(t *testing.T) { - _, err := DecodeExtendedAttributes(testEasTruncated) + _, err := decodeExtendedAttributes(testEasTruncated) if err == nil { t.Fatal("expected error") } } func TestNilEasEncodeAndDecodeAsNil(t *testing.T) { - b, err := EncodeExtendedAttributes(nil) + b, err := encodeExtendedAttributes(nil) if err != nil { t.Fatal(err) } if len(b) != 0 { t.Fatal("expected empty") } - eas, err := DecodeExtendedAttributes(nil) + eas, err := decodeExtendedAttributes(nil) if err != nil { t.Fatal(err) } @@ -178,8 +178,8 @@ func setupTestFolder(t *testing.T) string { return testfolderPath } -func generateTestEAs(t *testing.T, nAttrs int, path string) []ExtendedAttribute { - testEAs := make([]ExtendedAttribute, nAttrs) +func generateTestEAs(t *testing.T, nAttrs int, path string) []extendedAttribute { + testEAs := make([]extendedAttribute, nAttrs) for i := 0; i < nAttrs; i++ { testEAs[i].Name = fmt.Sprintf("TESTEA%d", i+1) testEAs[i].Value = make([]byte, getRandomInt()) @@ -231,12 +231,12 @@ func cleanupTestFile(t *testing.T, path string) { } } -func testSetGetEA(t *testing.T, path string, handle windows.Handle, testEAs []ExtendedAttribute) { - if err := SetFileEA(handle, testEAs); err != nil { +func testSetGetEA(t *testing.T, path string, handle windows.Handle, testEAs []extendedAttribute) { + if err := fsetEA(handle, testEAs); err != nil { t.Fatalf("set EA for path %s failed: %s", path, err) } - readEAs, err := GetFileEA(handle) + readEAs, err := fgetEA(handle) if err != nil { t.Fatalf("get EA for path %s failed: %s", path, err) } @@ -262,7 +262,7 @@ func TestPathSupportsExtendedAttributes(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - supported, err := PathSupportsExtendedAttributes(tc.path) + supported, err := pathSupportsExtendedAttributes(tc.path) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -273,7 +273,7 @@ func TestPathSupportsExtendedAttributes(t *testing.T) { } // Test with an invalid path - _, err := PathSupportsExtendedAttributes("Z:\\NonExistentPath-UAS664da5s4dyu56das45f5as") + _, err := pathSupportsExtendedAttributes("Z:\\NonExistentPath-UAS664da5s4dyu56das45f5as") if err == nil { t.Error("Expected an error for non-existent path, but got nil") } @@ -305,7 +305,7 @@ func TestGetVolumePathName(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - volumeName, err := GetVolumePathName(tc.path) + volumeName, err := getVolumePathName(tc.path) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -316,7 +316,7 @@ func TestGetVolumePathName(t *testing.T) { } // Test with an invalid path - _, err := GetVolumePathName("Z:\\NonExistentPath") + _, err := getVolumePathName("Z:\\NonExistentPath") if err == nil { t.Error("Expected an error for non-existent path, but got nil") } diff --git a/internal/fs/file_unix.go b/internal/fs/file_unix.go index b562d15b1db..4e7765c3035 100644 --- a/internal/fs/file_unix.go +++ b/internal/fs/file_unix.go @@ -37,8 +37,8 @@ func isNotSupported(err error) bool { return false } -// Chmod changes the mode of the named file to mode. -func Chmod(name string, mode os.FileMode) error { +// chmod changes the mode of the named file to mode. +func chmod(name string, mode os.FileMode) error { err := os.Chmod(fixpath(name), mode) // ignore the error if the FS does not support setting this mode (e.g. CIFS with gvfs on Linux) diff --git a/internal/fs/file_windows.go b/internal/fs/file_windows.go index 7a6d79f2521..3d011f719cb 100644 --- a/internal/fs/file_windows.go +++ b/internal/fs/file_windows.go @@ -75,17 +75,17 @@ func TempFile(dir, prefix string) (f *os.File, err error) { } // Chmod changes the mode of the named file to mode. -func Chmod(name string, mode os.FileMode) error { +func chmod(name string, mode os.FileMode) error { return os.Chmod(fixpath(name), mode) } -// ClearSystem removes the system attribute from the file. -func ClearSystem(path string) error { - return ClearAttribute(path, windows.FILE_ATTRIBUTE_SYSTEM) +// clearSystem removes the system attribute from the file. +func clearSystem(path string) error { + return clearAttribute(path, windows.FILE_ATTRIBUTE_SYSTEM) } -// ClearAttribute removes the specified attribute from the file. -func ClearAttribute(path string, attribute uint32) error { +// clearAttribute removes the specified attribute from the file. +func clearAttribute(path string, attribute uint32) error { ptr, err := windows.UTF16PtrFromString(fixpath(path)) if err != nil { return err @@ -105,8 +105,8 @@ func ClearAttribute(path string, attribute uint32) error { return nil } -// OpenHandleForEA return a file handle for file or dir for setting/getting EAs -func OpenHandleForEA(nodeType restic.NodeType, path string, writeAccess bool) (handle windows.Handle, err error) { +// openHandleForEA return a file handle for file or dir for setting/getting EAs +func openHandleForEA(nodeType restic.NodeType, path string, writeAccess bool) (handle windows.Handle, err error) { path = fixpath(path) fileAccess := windows.FILE_READ_EA if writeAccess { diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 46b40d013e9..27abcf9eab4 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -79,7 +79,7 @@ func parseMountPoints(list string, msgError ErrorHandler) (volumes map[string]st return } for _, s := range strings.Split(list, ";") { - if v, err := GetVolumeNameForVolumeMountPoint(s); err != nil { + if v, err := getVolumeNameForVolumeMountPoint(s); err != nil { msgError(s, errors.Errorf("failed to parse vss.exclude-volumes [%s]: %s", s, err)) } else { if volumes == nil { @@ -146,7 +146,7 @@ func (fs *LocalVss) isMountPointIncluded(mountPoint string) bool { return true } - volume, err := GetVolumeNameForVolumeMountPoint(mountPoint) + volume, err := getVolumeNameForVolumeMountPoint(mountPoint) if err != nil { fs.msgError(mountPoint, errors.Errorf("failed to get volume from mount point [%s]: %s", mountPoint, err)) return true diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index 60262c8735b..a59882381d7 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package fs @@ -120,10 +121,10 @@ func TestVSSConfig(t *testing.T) { func TestParseMountPoints(t *testing.T) { volumeMatch := regexp.MustCompile(`^\\\\\?\\Volume\{[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}\}\\$`) - // It's not a good idea to test functions based on GetVolumeNameForVolumeMountPoint by calling - // GetVolumeNameForVolumeMountPoint itself, but we have restricted test environment: + // It's not a good idea to test functions based on getVolumeNameForVolumeMountPoint by calling + // getVolumeNameForVolumeMountPoint itself, but we have restricted test environment: // cannot manage volumes and can only be sure that the mount point C:\ exists - sysVolume, err := GetVolumeNameForVolumeMountPoint("C:") + sysVolume, err := getVolumeNameForVolumeMountPoint("C:") if err != nil { t.Fatal(err) } diff --git a/internal/fs/node.go b/internal/fs/node.go index 3945c426e2d..2dfe5417241 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -306,7 +306,7 @@ func nodeRestoreMetadata(node *restic.Node, path string, warn func(msg string)) // calling Chmod below will no longer allow any modifications to be made on the file and the // calls above would fail. if node.Type != restic.NodeTypeSymlink { - if err := Chmod(path, node.Mode); err != nil { + if err := chmod(path, node.Mode); err != nil { if firsterr == nil { firsterr = errors.WithStack(err) } diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index d904c37feec..d529e35039f 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -85,9 +85,9 @@ func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error func nodeRestoreExtendedAttributes(node *restic.Node, path string) (err error) { count := len(node.ExtendedAttributes) if count > 0 { - eas := make([]ExtendedAttribute, count) + eas := make([]extendedAttribute, count) for i, attr := range node.ExtendedAttributes { - eas[i] = ExtendedAttribute{Name: attr.Name, Value: attr.Value} + eas[i] = extendedAttribute{Name: attr.Name, Value: attr.Value} } if errExt := restoreExtendedAttributes(node.Type, path, eas); errExt != nil { return errExt @@ -99,7 +99,7 @@ func nodeRestoreExtendedAttributes(node *restic.Node, path string) (err error) { // fill extended attributes in the node. This also includes the Generic attributes for windows. func nodeFillExtendedAttributes(node *restic.Node, path string, _ bool) (err error) { var fileHandle windows.Handle - if fileHandle, err = OpenHandleForEA(node.Type, path, false); fileHandle == 0 { + if fileHandle, err = openHandleForEA(node.Type, path, false); fileHandle == 0 { return nil } if err != nil { @@ -107,8 +107,8 @@ func nodeFillExtendedAttributes(node *restic.Node, path string, _ bool) (err err } defer closeFileHandle(fileHandle, path) // Replaced inline defer with named function call //Get the windows Extended Attributes using the file handle - var extAtts []ExtendedAttribute - extAtts, err = GetFileEA(fileHandle) + var extAtts []extendedAttribute + extAtts, err = fgetEA(fileHandle) debug.Log("fillExtendedAttributes(%v) %v", path, extAtts) if err != nil { return errors.Errorf("get EA failed for path %v, with: %v", path, err) @@ -139,9 +139,9 @@ func closeFileHandle(fileHandle windows.Handle, path string) { // restoreExtendedAttributes handles restore of the Windows Extended Attributes to the specified path. // The Windows API requires setting of all the Extended Attributes in one call. -func restoreExtendedAttributes(nodeType restic.NodeType, path string, eas []ExtendedAttribute) (err error) { +func restoreExtendedAttributes(nodeType restic.NodeType, path string, eas []extendedAttribute) (err error) { var fileHandle windows.Handle - if fileHandle, err = OpenHandleForEA(nodeType, path, true); fileHandle == 0 { + if fileHandle, err = openHandleForEA(nodeType, path, true); fileHandle == 0 { return nil } if err != nil { @@ -150,7 +150,7 @@ func restoreExtendedAttributes(nodeType restic.NodeType, path string, eas []Exte defer closeFileHandle(fileHandle, path) // Replaced inline defer with named function call // clear old unexpected xattrs by setting them to an empty value - oldEAs, err := GetFileEA(fileHandle) + oldEAs, err := fgetEA(fileHandle) if err != nil { return err } @@ -165,11 +165,11 @@ func restoreExtendedAttributes(nodeType restic.NodeType, path string, eas []Exte } if !found { - eas = append(eas, ExtendedAttribute{Name: oldEA.Name, Value: nil}) + eas = append(eas, extendedAttribute{Name: oldEA.Name, Value: nil}) } } - if err = SetFileEA(fileHandle, eas); err != nil { + if err = fsetEA(fileHandle, eas); err != nil { return errors.Errorf("set EA failed for path %v, with: %v", path, err) } return nil @@ -230,7 +230,7 @@ func nodeRestoreGenericAttributes(node *restic.Node, path string, warn func(msg } } if windowsAttributes.SecurityDescriptor != nil { - if err := SetSecurityDescriptor(path, windowsAttributes.SecurityDescriptor); err != nil { + if err := setSecurityDescriptor(path, windowsAttributes.SecurityDescriptor); err != nil { errs = append(errs, fmt.Errorf("error restoring security descriptor for: %s : %v", path, err)) } } @@ -296,7 +296,7 @@ func fixEncryptionAttribute(path string, attrs *uint32, pathPointer *uint16) (er if err != nil { return fmt.Errorf("failed to encrypt file: failed to reset permissions: %s : %v", path, err) } - err = ClearSystem(path) + err = clearSystem(path) if err != nil { return fmt.Errorf("failed to encrypt file: failed to clear system flag: %s : %v", path, err) } @@ -324,7 +324,7 @@ func fixEncryptionAttribute(path string, attrs *uint32, pathPointer *uint16) (er if err != nil { return fmt.Errorf("failed to encrypt file: failed to reset permissions: %s : %v", path, err) } - err = ClearSystem(path) + err = clearSystem(path) if err != nil { return fmt.Errorf("failed to decrypt file: failed to clear system flag: %s : %v", path, err) } @@ -392,7 +392,7 @@ func nodeFillGenericAttributes(node *restic.Node, path string, fi os.FileInfo, s if err != nil { return false, err } - if sd, err = GetSecurityDescriptor(path); err != nil { + if sd, err = getSecurityDescriptor(path); err != nil { return allowExtended, err } } @@ -422,7 +422,7 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { return eaSupportedValue.(bool), nil } // If not found, check if EA is supported with manually prepared volume name - isEASupportedVolume, err = PathSupportsExtendedAttributes(volumeName + `\`) + isEASupportedVolume, err = pathSupportsExtendedAttributes(volumeName + `\`) // If the prepared volume name is not valid, we will fetch the actual volume name next. if err != nil && !errors.Is(err, windows.DNS_ERROR_INVALID_NAME) { debug.Log("Error checking if extended attributes are supported for prepared volume name %s: %v", volumeName, err) @@ -431,8 +431,8 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { return false, nil } } - // If an entry is not found, get the actual volume name using the GetVolumePathName function - volumeNameActual, err := GetVolumePathName(path) + // If an entry is not found, get the actual volume name + volumeNameActual, err := getVolumePathName(path) if err != nil { debug.Log("Error getting actual volume name %s for path %s: %v", volumeName, path, err) // There can be multiple errors like path does not exist, bad network path, etc. @@ -447,7 +447,7 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { return eaSupportedValue.(bool), nil } // If the actual volume name is different and is not in the map, again check if the new volume supports extended attributes with the actual volume name - isEASupportedVolume, err = PathSupportsExtendedAttributes(volumeNameActual + `\`) + isEASupportedVolume, err = pathSupportsExtendedAttributes(volumeNameActual + `\`) // Debug log for cases where the prepared volume name is not valid if err != nil { debug.Log("Error checking if extended attributes are supported for actual volume name %s: %v", volumeNameActual, err) diff --git a/internal/fs/node_windows_test.go b/internal/fs/node_windows_test.go index 729ce2adb86..61836487915 100644 --- a/internal/fs/node_windows_test.go +++ b/internal/fs/node_windows_test.go @@ -23,10 +23,10 @@ import ( func TestRestoreSecurityDescriptors(t *testing.T) { t.Parallel() tempDir := t.TempDir() - for i, sd := range TestFileSDs { + for i, sd := range testFileSDs { testRestoreSecurityDescriptor(t, sd, tempDir, restic.NodeTypeFile, fmt.Sprintf("testfile%d", i)) } - for i, sd := range TestDirSDs { + for i, sd := range testDirSDs { testRestoreSecurityDescriptor(t, sd, tempDir, restic.NodeTypeDir, fmt.Sprintf("testdir%d", i)) } } @@ -47,13 +47,13 @@ func testRestoreSecurityDescriptor(t *testing.T, sd string, tempDir string, file sdByteFromRestoredNode := getWindowsAttr(t, testPath, node).SecurityDescriptor // Get the security descriptor for the test path after the restore. - sdBytesFromRestoredPath, err := GetSecurityDescriptor(testPath) + sdBytesFromRestoredPath, err := getSecurityDescriptor(testPath) test.OK(t, errors.Wrapf(err, "Error while getting the security descriptor for: %s", testPath)) // Compare the input SD and the SD got from the restored file. - CompareSecurityDescriptors(t, testPath, sdInputBytes, *sdBytesFromRestoredPath) + compareSecurityDescriptors(t, testPath, sdInputBytes, *sdBytesFromRestoredPath) // Compare the SD got from node constructed from the restored file info and the SD got directly from the restored file. - CompareSecurityDescriptors(t, testPath, *sdByteFromRestoredNode, *sdBytesFromRestoredPath) + compareSecurityDescriptors(t, testPath, *sdByteFromRestoredNode, *sdBytesFromRestoredPath) } func getNode(name string, fileType restic.NodeType, genericAttributes map[restic.GenericAttributeType]json.RawMessage) restic.Node { @@ -312,12 +312,12 @@ func TestRestoreExtendedAttributes(t *testing.T) { test.OK(t, errors.Wrapf(err, "Error closing file for: %s", testPath)) }() - extAttr, err := GetFileEA(handle) + extAttr, err := fgetEA(handle) test.OK(t, errors.Wrapf(err, "Error getting extended attributes for: %s", testPath)) test.Equals(t, len(node.ExtendedAttributes), len(extAttr)) for _, expectedExtAttr := range node.ExtendedAttributes { - var foundExtAttr *ExtendedAttribute + var foundExtAttr *extendedAttribute for _, ea := range extAttr { if strings.EqualFold(ea.Name, expectedExtAttr.Name) { foundExtAttr = &ea @@ -491,13 +491,13 @@ func TestPrepareVolumeName(t *testing.T) { test.Equals(t, tc.expectedVolume, volume) if tc.isRealPath { - isEASupportedVolume, err := PathSupportsExtendedAttributes(volume + `\`) + isEASupportedVolume, err := pathSupportsExtendedAttributes(volume + `\`) // If the prepared volume name is not valid, we will next fetch the actual volume name. test.OK(t, err) test.Equals(t, tc.expectedEASupported, isEASupportedVolume) - actualVolume, err := GetVolumePathName(tc.path) + actualVolume, err := getVolumePathName(tc.path) test.OK(t, err) test.Equals(t, tc.expectedVolume, actualVolume) } diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index 0004f1809f8..c68926f24e9 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -19,14 +19,14 @@ var ( onceBackup sync.Once onceRestore sync.Once - // SeBackupPrivilege allows the application to bypass file and directory ACLs to back up files and directories. - SeBackupPrivilege = "SeBackupPrivilege" - // SeRestorePrivilege allows the application to bypass file and directory ACLs to restore files and directories. - SeRestorePrivilege = "SeRestorePrivilege" - // SeSecurityPrivilege allows read and write access to all SACLs. - SeSecurityPrivilege = "SeSecurityPrivilege" - // SeTakeOwnershipPrivilege allows the application to take ownership of files and directories, regardless of the permissions set on them. - SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" + // seBackupPrivilege allows the application to bypass file and directory ACLs to back up files and directories. + seBackupPrivilege = "SeBackupPrivilege" + // seRestorePrivilege allows the application to bypass file and directory ACLs to restore files and directories. + seRestorePrivilege = "SeRestorePrivilege" + // seSecurityPrivilege allows read and write access to all SACLs. + seSecurityPrivilege = "SeSecurityPrivilege" + // seTakeOwnershipPrivilege allows the application to take ownership of files and directories, regardless of the permissions set on them. + seTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" lowerPrivileges atomic.Bool ) @@ -40,10 +40,10 @@ var lowBackupSecurityFlags windows.SECURITY_INFORMATION = windows.OWNER_SECURITY // Flags for restore without admin permissions. If there are no admin permissions, only the DACL from the SD can be restored and owner and group will be set based on the current user. var lowRestoreSecurityFlags windows.SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION | windows.ATTRIBUTE_SECURITY_INFORMATION | windows.PROTECTED_DACL_SECURITY_INFORMATION -// GetSecurityDescriptor takes the path of the file and returns the SecurityDescriptor for the file. +// getSecurityDescriptor takes the path of the file and returns the SecurityDescriptor for the file. // This needs admin permissions or SeBackupPrivilege for getting the full SD. // If there are no admin permissions, only the current user's owner, group and DACL will be got. -func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err error) { +func getSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err error) { onceBackup.Do(enableBackupPrivilege) var sd *windows.SECURITY_DESCRIPTOR @@ -59,7 +59,7 @@ func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. lowerPrivileges.Store(true) - return GetSecurityDescriptor(filePath) + return getSecurityDescriptor(filePath) } else if errors.Is(err, windows.ERROR_NOT_SUPPORTED) { return nil, nil } else { @@ -74,15 +74,15 @@ func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err return &sdBytes, nil } -// SetSecurityDescriptor sets the SecurityDescriptor for the file at the specified path. +// setSecurityDescriptor sets the SecurityDescriptor for the file at the specified path. // This needs admin permissions or SeRestorePrivilege, SeSecurityPrivilege and SeTakeOwnershipPrivilege // for setting the full SD. // If there are no admin permissions/required privileges, only the DACL from the SD can be set and // owner and group will be set based on the current user. -func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { +func setSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { onceRestore.Do(enableRestorePrivilege) // Set the security descriptor on the file - sd, err := SecurityDescriptorBytesToStruct(*securityDescriptor) + sd, err := securityDescriptorBytesToStruct(*securityDescriptor) if err != nil { return fmt.Errorf("error converting bytes to security descriptor: %w", err) } @@ -120,7 +120,7 @@ func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { // If ERROR_PRIVILEGE_NOT_HELD is encountered, fallback to backups/restores using lower non-admin privileges. lowerPrivileges.Store(true) - return SetSecurityDescriptor(filePath, securityDescriptor) + return setSecurityDescriptor(filePath, securityDescriptor) } else { return fmt.Errorf("set named security info failed with: %w", err) } @@ -150,7 +150,7 @@ func setNamedSecurityInfoLow(filePath string, dacl *windows.ACL) error { // enableBackupPrivilege enables privilege for backing up security descriptors func enableBackupPrivilege() { - err := enableProcessPrivileges([]string{SeBackupPrivilege}) + err := enableProcessPrivileges([]string{seBackupPrivilege}) if err != nil { debug.Log("error enabling backup privilege: %v", err) } @@ -158,7 +158,7 @@ func enableBackupPrivilege() { // enableBackupPrivilege enables privilege for restoring security descriptors func enableRestorePrivilege() { - err := enableProcessPrivileges([]string{SeRestorePrivilege, SeSecurityPrivilege, SeTakeOwnershipPrivilege}) + err := enableProcessPrivileges([]string{seRestorePrivilege, seSecurityPrivilege, seTakeOwnershipPrivilege}) if err != nil { debug.Log("error enabling restore/security privilege: %v", err) } @@ -174,9 +174,9 @@ func isHandlePrivilegeNotHeldError(err error) bool { return false } -// SecurityDescriptorBytesToStruct converts the security descriptor bytes representation +// securityDescriptorBytesToStruct converts the security descriptor bytes representation // into a pointer to windows SECURITY_DESCRIPTOR. -func SecurityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, error) { +func securityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, error) { if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { return nil, fmt.Errorf("securityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) } @@ -245,13 +245,13 @@ var ( privNameMutex sync.Mutex ) -// PrivilegeError represents an error enabling privileges. -type PrivilegeError struct { +// privilegeError represents an error enabling privileges. +type privilegeError struct { privileges []uint64 } // Error returns the string message for the error. -func (e *PrivilegeError) Error() string { +func (e *privilegeError) Error() string { s := "Could not enable privilege " if len(e.privileges) > 1 { s = "Could not enable privileges " diff --git a/internal/fs/sd_windows_test.go b/internal/fs/sd_windows_test.go index e78241ed356..c31b19b8b23 100644 --- a/internal/fs/sd_windows_test.go +++ b/internal/fs/sd_windows_test.go @@ -28,7 +28,7 @@ func TestSetGetFileSecurityDescriptors(t *testing.T) { } }() - testSecurityDescriptors(t, TestFileSDs, testfilePath) + testSecurityDescriptors(t, testFileSDs, testfilePath) } func TestSetGetFolderSecurityDescriptors(t *testing.T) { @@ -40,7 +40,7 @@ func TestSetGetFolderSecurityDescriptors(t *testing.T) { t.Fatalf("failed to create temporary file: %s", err) } - testSecurityDescriptors(t, TestDirSDs, testfolderPath) + testSecurityDescriptors(t, testDirSDs, testfolderPath) } func testSecurityDescriptors(t *testing.T, testSDs []string, testPath string) { @@ -48,13 +48,13 @@ func testSecurityDescriptors(t *testing.T, testSDs []string, testPath string) { sdInputBytes, err := base64.StdEncoding.DecodeString(testSD) test.OK(t, errors.Wrapf(err, "Error decoding SD: %s", testPath)) - err = SetSecurityDescriptor(testPath, &sdInputBytes) + err = setSecurityDescriptor(testPath, &sdInputBytes) test.OK(t, errors.Wrapf(err, "Error setting file security descriptor for: %s", testPath)) var sdOutputBytes *[]byte - sdOutputBytes, err = GetSecurityDescriptor(testPath) + sdOutputBytes, err = getSecurityDescriptor(testPath) test.OK(t, errors.Wrapf(err, "Error getting file security descriptor for: %s", testPath)) - CompareSecurityDescriptors(t, testPath, sdInputBytes, *sdOutputBytes) + compareSecurityDescriptors(t, testPath, sdInputBytes, *sdOutputBytes) } } diff --git a/internal/fs/sd_windows_test_helpers.go b/internal/fs/sd_windows_test_helpers.go index 8b3be5fd7c8..0e888884ae0 100644 --- a/internal/fs/sd_windows_test_helpers.go +++ b/internal/fs/sd_windows_test_helpers.go @@ -13,18 +13,18 @@ import ( ) var ( - TestFileSDs = []string{"AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAfAAEAAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABAUAP8BHwABAQAAAAAABRIAAAAAEBgA/wEfAAECAAAAAAAFIAAAACACAAAAECQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAA=", + testFileSDs = []string{"AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAfAAEAAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABAUAP8BHwABAQAAAAAABRIAAAAAEBgA/wEfAAECAAAAAAAFIAAAACACAAAAECQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAA=", "AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAyAAHAAAAAAAUAKkAEgABAQAAAAAABQcAAAAAABQAiQASAAEBAAAAAAAFBwAAAAAAJACpABIAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSar7QMAAAAAJAC/ARMAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSar6gMAAAAAFAD/AR8AAQEAAAAAAAUSAAAAAAAYAP8BHwABAgAAAAAABSAAAAAgAgAAAAAkAP8BHwABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAA", "AQAUvBQAAAAwAAAA7AAAAEwAAAABBQAAAAAABRUAAAAvr7t03PyHGk2FokNHCAAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAoAAFAAAAAAAkAP8BHwABBQAAAAAABRUAAAAvr7t03PyHGk2FokNHCAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABAUAP8BHwABAQAAAAAABRIAAAAAEBgA/wEfAAECAAAAAAAFIAAAACACAAAAECQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAACAHQAAwAAAAKAJAC/AQIAAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDtgQAAALAJAC/AQMAAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDPgkAAAJAJAD/AQ8AAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDtQQAAA==", } - TestDirSDs = []string{"AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAfAAEAAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABMUAP8BHwABAQAAAAAABRIAAAAAExgA/wEfAAECAAAAAAAFIAAAACACAAAAEyQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAA=", + testDirSDs = []string{"AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAfAAEAAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABMUAP8BHwABAQAAAAAABRIAAAAAExgA/wEfAAECAAAAAAAFIAAAACACAAAAEyQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAA=", "AQAUvBQAAAAwAAAAAAAAAEwAAAABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvqAwAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIA3AAIAAAAAAIUAKkAEgABAQAAAAAABQcAAAAAAxQAiQASAAEBAAAAAAAFBwAAAAAAJACpABIAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSar7QMAAAAAJAC/ARMAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSar6gMAAAALFAC/ARMAAQEAAAAAAAMAAAAAABMUAP8BHwABAQAAAAAABRIAAAAAExgA/wEfAAECAAAAAAAFIAAAACACAAAAEyQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAA=", "AQAUvBQAAAAwAAAA7AAAAEwAAAABBQAAAAAABRUAAAAvr7t03PyHGk2FokNHCAAAAQUAAAAAAAUVAAAAiJ9YrlaggurMvSarAQIAAAIAoAAFAAAAAAAkAP8BHwABBQAAAAAABRUAAAAvr7t03PyHGk2FokNHCAAAAAAkAKkAEgABBQAAAAAABRUAAACIn1iuVqCC6sy9JqvtAwAAABMUAP8BHwABAQAAAAAABRIAAAAAExgA/wEfAAECAAAAAAAFIAAAACACAAAAEyQA/wEfAAEFAAAAAAAFFQAAAIifWK5WoILqzL0mq+oDAAACAHQAAwAAAAKAJAC/AQIAAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDtgQAAALAJAC/AQMAAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDPgkAAAJAJAD/AQ8AAQUAAAAAAAUVAAAAL6+7dNz8hxpNhaJDtQQAAA==", } ) -// IsAdmin checks if current user is an administrator. -func IsAdmin() (isAdmin bool, err error) { +// isAdmin checks if current user is an administrator. +func isAdmin() (isAdmin bool, err error) { var sid *windows.SID err = windows.AllocateAndInitializeSid(&windows.SECURITY_NT_AUTHORITY, 2, windows.SECURITY_BUILTIN_DOMAIN_RID, windows.DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0, 0, &sid) @@ -40,15 +40,15 @@ func IsAdmin() (isAdmin bool, err error) { return member, nil } -// CompareSecurityDescriptors runs tests for comparing 2 security descriptors in []byte format. -func CompareSecurityDescriptors(t *testing.T, testPath string, sdInputBytes, sdOutputBytes []byte) { - sdInput, err := SecurityDescriptorBytesToStruct(sdInputBytes) +// compareSecurityDescriptors runs tests for comparing 2 security descriptors in []byte format. +func compareSecurityDescriptors(t *testing.T, testPath string, sdInputBytes, sdOutputBytes []byte) { + sdInput, err := securityDescriptorBytesToStruct(sdInputBytes) test.OK(t, errors.Wrapf(err, "Error converting SD to struct for: %s", testPath)) - sdOutput, err := SecurityDescriptorBytesToStruct(sdOutputBytes) + sdOutput, err := securityDescriptorBytesToStruct(sdOutputBytes) test.OK(t, errors.Wrapf(err, "Error converting SD to struct for: %s", testPath)) - isAdmin, err := IsAdmin() + isAdmin, err := isAdmin() test.OK(t, errors.Wrapf(err, "Error checking if user is admin: %s", testPath)) var ownerExpected *windows.SID diff --git a/internal/fs/vss.go b/internal/fs/vss.go index 8bfffab71cf..3215c9aa3cd 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -33,9 +33,9 @@ func HasSufficientPrivilegesForVSS() error { return errors.New("VSS snapshots are only supported on windows") } -// GetVolumeNameForVolumeMountPoint add trailing backslash to input parameter +// getVolumeNameForVolumeMountPoint add trailing backslash to input parameter // and calls the equivalent windows api. -func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { +func getVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { return mountPoint, nil } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 0b51b00f398..bb168c55326 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -22,6 +22,7 @@ import ( type HRESULT uint // HRESULT constant values necessary for using VSS api. +// //nolint:golint const ( S_OK HRESULT = 0x00000000 @@ -830,9 +831,9 @@ func HasSufficientPrivilegesForVSS() error { return err } -// GetVolumeNameForVolumeMountPoint add trailing backslash to input parameter +// getVolumeNameForVolumeMountPoint add trailing backslash to input parameter // and calls the equivalent windows api. -func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { +func getVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { if mountPoint != "" && mountPoint[len(mountPoint)-1] != filepath.Separator { mountPoint += string(filepath.Separator) } From f0329bb4e6f1cad84a09758f43151264071581e9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Aug 2024 23:43:45 +0200 Subject: [PATCH 658/893] fs: replace statT with ExtendedFileInfo --- internal/fs/node.go | 47 ++++++++++--------------- internal/fs/node_aix.go | 14 +------- internal/fs/node_darwin.go | 4 --- internal/fs/node_freebsd.go | 4 --- internal/fs/node_linux.go | 4 --- internal/fs/node_netbsd.go | 7 +--- internal/fs/node_openbsd.go | 7 +--- internal/fs/node_solaris.go | 4 --- internal/fs/node_unix.go | 19 ---------- internal/fs/node_unix_test.go | 46 +++++++++---------------- internal/fs/node_windows.go | 59 ++++---------------------------- internal/fs/node_windows_test.go | 6 ++-- internal/fs/node_xattr.go | 2 +- internal/fs/stat_windows.go | 3 +- 14 files changed, 50 insertions(+), 176 deletions(-) diff --git a/internal/fs/node.go b/internal/fs/node.go index 2dfe5417241..a8aa3c40895 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -6,7 +6,6 @@ import ( "strconv" "sync" "syscall" - "time" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" @@ -57,8 +56,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) restic.NodeType { } func nodeFillExtra(node *restic.Node, path string, fi os.FileInfo, ignoreXattrListError bool) error { - stat, ok := toStatT(fi.Sys()) - if !ok { + if fi.Sys() == nil { // fill minimal info with current values for uid, gid node.UID = uint32(os.Getuid()) node.GID = uint32(os.Getgid()) @@ -66,38 +64,43 @@ func nodeFillExtra(node *restic.Node, path string, fi os.FileInfo, ignoreXattrLi return nil } - node.Inode = uint64(stat.ino()) - node.DeviceID = uint64(stat.dev()) + stat := ExtendedStat(fi) - nodeFillTimes(node, stat) + node.Inode = stat.Inode + node.DeviceID = stat.DeviceID + node.ChangeTime = stat.ChangeTime + node.AccessTime = stat.AccessTime - nodeFillUser(node, stat) + node.UID = stat.UID + node.GID = stat.GID + node.User = lookupUsername(stat.UID) + node.Group = lookupGroup(stat.GID) switch node.Type { case restic.NodeTypeFile: - node.Size = uint64(stat.size()) - node.Links = uint64(stat.nlink()) + node.Size = uint64(stat.Size) + node.Links = stat.Links case restic.NodeTypeDir: case restic.NodeTypeSymlink: var err error node.LinkTarget, err = os.Readlink(fixpath(path)) - node.Links = uint64(stat.nlink()) + node.Links = stat.Links if err != nil { return errors.WithStack(err) } case restic.NodeTypeDev: - node.Device = uint64(stat.rdev()) - node.Links = uint64(stat.nlink()) + node.Device = stat.Device + node.Links = stat.Links case restic.NodeTypeCharDev: - node.Device = uint64(stat.rdev()) - node.Links = uint64(stat.nlink()) + node.Device = stat.Device + node.Links = stat.Links case restic.NodeTypeFifo: case restic.NodeTypeSocket: default: return errors.Errorf("unsupported file type %q", node.Type) } - allowExtended, err := nodeFillGenericAttributes(node, path, fi, stat) + allowExtended, err := nodeFillGenericAttributes(node, path, &stat) if allowExtended { // Skip processing ExtendedAttributes if allowExtended is false. err = errors.CombineErrors(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) @@ -105,20 +108,6 @@ func nodeFillExtra(node *restic.Node, path string, fi os.FileInfo, ignoreXattrLi return err } -func nodeFillTimes(node *restic.Node, stat *statT) { - ctim := stat.ctim() - atim := stat.atim() - node.ChangeTime = time.Unix(ctim.Unix()) - node.AccessTime = time.Unix(atim.Unix()) -} - -func nodeFillUser(node *restic.Node, stat *statT) { - uid, gid := stat.uid(), stat.gid() - node.UID, node.GID = uid, gid - node.User = lookupUsername(uid) - node.Group = lookupGroup(gid) -} - var ( uidLookupCache = make(map[uint32]string) uidLookupCacheMutex = sync.RWMutex{} diff --git a/internal/fs/node_aix.go b/internal/fs/node_aix.go index 123985c2d9b..2967c6db77a 100644 --- a/internal/fs/node_aix.go +++ b/internal/fs/node_aix.go @@ -4,7 +4,6 @@ package fs import ( - "os" "syscall" "github.com/restic/restic/internal/restic" @@ -14,17 +13,6 @@ func nodeRestoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { return nil } -// AIX has a funny timespec type in syscall, with 32-bit nanoseconds. -// golang.org/x/sys/unix handles this cleanly, but we're stuck with syscall -// because os.Stat returns a syscall type in its os.FileInfo.Sys(). -func toTimespec(t syscall.StTimespec_t) syscall.Timespec { - return syscall.Timespec{Sec: t.Sec, Nsec: int64(t.Nsec)} -} - -func (s statT) atim() syscall.Timespec { return toTimespec(s.Atim) } -func (s statT) mtim() syscall.Timespec { return toTimespec(s.Mtim) } -func (s statT) ctim() syscall.Timespec { return toTimespec(s.Ctim) } - // nodeRestoreExtendedAttributes is a no-op on AIX. func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { return nil @@ -46,6 +34,6 @@ func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg str } // nodeFillGenericAttributes is a no-op on AIX. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) (allowExtended bool, err error) { return true, nil } diff --git a/internal/fs/node_darwin.go b/internal/fs/node_darwin.go index 1ca7ce4806c..f4c843498ce 100644 --- a/internal/fs/node_darwin.go +++ b/internal/fs/node_darwin.go @@ -5,7 +5,3 @@ import "syscall" func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { return nil } - -func (s statT) atim() syscall.Timespec { return s.Atimespec } -func (s statT) mtim() syscall.Timespec { return s.Mtimespec } -func (s statT) ctim() syscall.Timespec { return s.Ctimespec } diff --git a/internal/fs/node_freebsd.go b/internal/fs/node_freebsd.go index 8796358b022..1b2f2fc7e94 100644 --- a/internal/fs/node_freebsd.go +++ b/internal/fs/node_freebsd.go @@ -12,7 +12,3 @@ func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error func mknod(path string, mode uint32, dev uint64) (err error) { return syscall.Mknod(path, mode, dev) } - -func (s statT) atim() syscall.Timespec { return s.Atimespec } -func (s statT) mtim() syscall.Timespec { return s.Mtimespec } -func (s statT) ctim() syscall.Timespec { return s.Ctimespec } diff --git a/internal/fs/node_linux.go b/internal/fs/node_linux.go index 4cb9bdeaa67..91ef4f90747 100644 --- a/internal/fs/node_linux.go +++ b/internal/fs/node_linux.go @@ -31,7 +31,3 @@ func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error return dir.Close() } - -func (s statT) atim() syscall.Timespec { return s.Atim } -func (s statT) mtim() syscall.Timespec { return s.Mtim } -func (s statT) ctim() syscall.Timespec { return s.Ctim } diff --git a/internal/fs/node_netbsd.go b/internal/fs/node_netbsd.go index 9961258515d..00e6c92ca71 100644 --- a/internal/fs/node_netbsd.go +++ b/internal/fs/node_netbsd.go @@ -1,7 +1,6 @@ package fs import ( - "os" "syscall" "github.com/restic/restic/internal/restic" @@ -11,10 +10,6 @@ func nodeRestoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { return nil } -func (s statT) atim() syscall.Timespec { return s.Atimespec } -func (s statT) mtim() syscall.Timespec { return s.Mtimespec } -func (s statT) ctim() syscall.Timespec { return s.Ctimespec } - // nodeRestoreExtendedAttributes is a no-op on netbsd. func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { return nil @@ -36,6 +31,6 @@ func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg str } // nodeFillGenericAttributes is a no-op on netbsd. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) (allowExtended bool, err error) { return true, nil } diff --git a/internal/fs/node_openbsd.go b/internal/fs/node_openbsd.go index 62eb7861815..590c603a39f 100644 --- a/internal/fs/node_openbsd.go +++ b/internal/fs/node_openbsd.go @@ -1,7 +1,6 @@ package fs import ( - "os" "syscall" "github.com/restic/restic/internal/restic" @@ -11,10 +10,6 @@ func nodeRestoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { return nil } -func (s statT) atim() syscall.Timespec { return s.Atim } -func (s statT) mtim() syscall.Timespec { return s.Mtim } -func (s statT) ctim() syscall.Timespec { return s.Ctim } - // nodeRestoreExtendedAttributes is a no-op on openbsd. func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { return nil @@ -36,6 +31,6 @@ func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg str } // fillGenericAttributes is a no-op on openbsd. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) (allowExtended bool, err error) { return true, nil } diff --git a/internal/fs/node_solaris.go b/internal/fs/node_solaris.go index 3f025b334be..f4c843498ce 100644 --- a/internal/fs/node_solaris.go +++ b/internal/fs/node_solaris.go @@ -5,7 +5,3 @@ import "syscall" func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { return nil } - -func (s statT) atim() syscall.Timespec { return s.Atim } -func (s statT) mtim() syscall.Timespec { return s.Mtim } -func (s statT) ctim() syscall.Timespec { return s.Ctim } diff --git a/internal/fs/node_unix.go b/internal/fs/node_unix.go index fb247ac99f4..5f08f362316 100644 --- a/internal/fs/node_unix.go +++ b/internal/fs/node_unix.go @@ -5,27 +5,8 @@ package fs import ( "os" - "syscall" ) func lchown(name string, uid, gid int) error { return os.Lchown(name, uid, gid) } - -type statT syscall.Stat_t - -func toStatT(i interface{}) (*statT, bool) { - s, ok := i.(*syscall.Stat_t) - if ok && s != nil { - return (*statT)(s), true - } - return nil, false -} - -func (s statT) dev() uint64 { return uint64(s.Dev) } -func (s statT) ino() uint64 { return uint64(s.Ino) } -func (s statT) nlink() uint64 { return uint64(s.Nlink) } -func (s statT) uid() uint32 { return uint32(s.Uid) } -func (s statT) gid() uint32 { return uint32(s.Gid) } -func (s statT) rdev() uint64 { return uint64(s.Rdev) } -func (s statT) size() int64 { return int64(s.Size) } diff --git a/internal/fs/node_unix_test.go b/internal/fs/node_unix_test.go index 3658c835662..4d01b6cc571 100644 --- a/internal/fs/node_unix_test.go +++ b/internal/fs/node_unix_test.go @@ -4,12 +4,12 @@ package fs import ( + "io/fs" "os" "path/filepath" "runtime" "syscall" "testing" - "time" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -28,8 +28,11 @@ func stat(t testing.TB, filename string) (fi os.FileInfo, ok bool) { return fi, true } -func checkFile(t testing.TB, stat *syscall.Stat_t, node *restic.Node) { +func checkFile(t testing.TB, fi fs.FileInfo, node *restic.Node) { t.Helper() + + stat := fi.Sys().(*syscall.Stat_t) + if uint32(node.Mode.Perm()) != uint32(stat.Mode&0777) { t.Errorf("Mode does not match, want %v, got %v", stat.Mode&0777, node.Mode) } @@ -59,29 +62,20 @@ func checkFile(t testing.TB, stat *syscall.Stat_t, node *restic.Node) { } // use the os dependent function to compare the timestamps - s, ok := toStatT(stat) - if !ok { - return + s := ExtendedStat(fi) + if node.ModTime != s.ModTime { + t.Errorf("ModTime does not match, want %v, got %v", s.ModTime, node.ModTime) } - - mtime := s.mtim() - if node.ModTime != time.Unix(mtime.Unix()) { - t.Errorf("ModTime does not match, want %v, got %v", time.Unix(mtime.Unix()), node.ModTime) - } - - ctime := s.ctim() - if node.ChangeTime != time.Unix(ctime.Unix()) { - t.Errorf("ChangeTime does not match, want %v, got %v", time.Unix(ctime.Unix()), node.ChangeTime) + if node.ChangeTime != s.ChangeTime { + t.Errorf("ChangeTime does not match, want %v, got %v", s.ChangeTime, node.ChangeTime) } - - atime := s.atim() - if node.AccessTime != time.Unix(atime.Unix()) { - t.Errorf("AccessTime does not match, want %v, got %v", time.Unix(atime.Unix()), node.AccessTime) + if node.AccessTime != s.AccessTime { + t.Errorf("AccessTime does not match, want %v, got %v", s.AccessTime, node.AccessTime) } - } -func checkDevice(t testing.TB, stat *syscall.Stat_t, node *restic.Node) { +func checkDevice(t testing.TB, fi fs.FileInfo, node *restic.Node) { + stat := fi.Sys().(*syscall.Stat_t) if node.Device != uint64(stat.Rdev) { t.Errorf("Rdev does not match, want %v, got %v", stat.Rdev, node.Device) } @@ -123,12 +117,6 @@ func TestNodeFromFileInfo(t *testing.T) { return } - s, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - t.Skipf("fi type is %T, not stat_t", fi.Sys()) - return - } - node, err := NodeFromFileInfo(test.filename, fi, false) if err != nil { t.Fatal(err) @@ -136,10 +124,10 @@ func TestNodeFromFileInfo(t *testing.T) { switch node.Type { case restic.NodeTypeFile, restic.NodeTypeSymlink: - checkFile(t, s, node) + checkFile(t, fi, node) case restic.NodeTypeDev, restic.NodeTypeCharDev: - checkFile(t, s, node) - checkDevice(t, s, node) + checkFile(t, fi, node) + checkDevice(t, fi, node) default: t.Fatalf("invalid node type %q", node.Type) } diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index d529e35039f..0a9064fc5d2 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -3,7 +3,6 @@ package fs import ( "encoding/json" "fmt" - "os" "path/filepath" "reflect" "runtime" @@ -175,40 +174,6 @@ func restoreExtendedAttributes(nodeType restic.NodeType, path string, eas []exte return nil } -type statT syscall.Win32FileAttributeData - -func toStatT(i interface{}) (*statT, bool) { - s, ok := i.(*syscall.Win32FileAttributeData) - if ok && s != nil { - return (*statT)(s), true - } - return nil, false -} - -func (s statT) dev() uint64 { return 0 } -func (s statT) ino() uint64 { return 0 } -func (s statT) nlink() uint64 { return 0 } -func (s statT) uid() uint32 { return 0 } -func (s statT) gid() uint32 { return 0 } -func (s statT) rdev() uint64 { return 0 } - -func (s statT) size() int64 { - return int64(s.FileSizeLow) | (int64(s.FileSizeHigh) << 32) -} - -func (s statT) atim() syscall.Timespec { - return syscall.NsecToTimespec(s.LastAccessTime.Nanoseconds()) -} - -func (s statT) mtim() syscall.Timespec { - return syscall.NsecToTimespec(s.LastWriteTime.Nanoseconds()) -} - -func (s statT) ctim() syscall.Timespec { - // Windows does not have the concept of a "change time" in the sense Unix uses it, so we're using the LastWriteTime here. - return s.mtim() -} - // restoreGenericAttributes restores generic attributes for Windows func nodeRestoreGenericAttributes(node *restic.Node, path string, warn func(msg string)) (err error) { if len(node.GenericAttributes) == 0 { @@ -365,7 +330,7 @@ func decryptFile(pathPointer *uint16) error { // Created time and Security Descriptors. // It also checks if the volume supports extended attributes and stores the result in a map // so that it does not have to be checked again for subsequent calls for paths in the same volume. -func nodeFillGenericAttributes(node *restic.Node, path string, fi os.FileInfo, stat *statT) (allowExtended bool, err error) { +func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFileInfo) (allowExtended bool, err error) { if strings.Contains(filepath.Base(path), ":") { // Do not process for Alternate Data Streams in Windows // Also do not allow processing of extended attributes for ADS. @@ -396,10 +361,13 @@ func nodeFillGenericAttributes(node *restic.Node, path string, fi os.FileInfo, s return allowExtended, err } } + + winFI := stat.Sys().(*syscall.Win32FileAttributeData) + // Add Windows attributes node.GenericAttributes, err = WindowsAttrsToGenericAttributes(WindowsAttributes{ - CreationTime: getCreationTime(fi, path), - FileAttributes: &stat.FileAttributes, + CreationTime: &winFI.CreationTime, + FileAttributes: &winFI.FileAttributes, SecurityDescriptor: sd, }) return allowExtended, err @@ -501,18 +469,3 @@ func WindowsAttrsToGenericAttributes(windowsAttributes WindowsAttributes) (attrs windowsAttributesValue := reflect.ValueOf(windowsAttributes) return restic.OSAttrsToGenericAttributes(reflect.TypeOf(windowsAttributes), &windowsAttributesValue, runtime.GOOS) } - -// getCreationTime gets the value for the WindowsAttribute CreationTime in a windows specific time format. -// The value is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601 (UTC) -// split into two 32-bit parts: the low-order DWORD and the high-order DWORD for efficiency and interoperability. -// The low-order DWORD represents the number of 100-nanosecond intervals elapsed since January 1, 1601, modulo -// 2^32. The high-order DWORD represents the number of times the low-order DWORD has overflowed. -func getCreationTime(fi os.FileInfo, path string) (creationTimeAttribute *syscall.Filetime) { - attrib, success := fi.Sys().(*syscall.Win32FileAttributeData) - if success && attrib != nil { - return &attrib.CreationTime - } else { - debug.Log("Could not get create time for path: %s", path) - return nil - } -} diff --git a/internal/fs/node_windows_test.go b/internal/fs/node_windows_test.go index 61836487915..218e729cec6 100644 --- a/internal/fs/node_windows_test.go +++ b/internal/fs/node_windows_test.go @@ -80,10 +80,10 @@ func TestRestoreCreationTime(t *testing.T) { path := t.TempDir() fi, err := os.Lstat(path) test.OK(t, errors.Wrapf(err, "Could not Lstat for path: %s", path)) - creationTimeAttribute := getCreationTime(fi, path) - test.OK(t, errors.Wrapf(err, "Could not get creation time for path: %s", path)) + attr := fi.Sys().(*syscall.Win32FileAttributeData) + creationTimeAttribute := attr.CreationTime //Using the temp dir creation time as the test creation time for the test file and folder - runGenericAttributesTest(t, path, restic.TypeCreationTime, WindowsAttributes{CreationTime: creationTimeAttribute}, false) + runGenericAttributesTest(t, path, restic.TypeCreationTime, WindowsAttributes{CreationTime: &creationTimeAttribute}, false) } func TestRestoreFileAttributes(t *testing.T) { diff --git a/internal/fs/node_xattr.go b/internal/fs/node_xattr.go index 55376ba58eb..1781452f700 100644 --- a/internal/fs/node_xattr.go +++ b/internal/fs/node_xattr.go @@ -71,7 +71,7 @@ func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg str } // nodeFillGenericAttributes is a no-op. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ os.FileInfo, _ *statT) (allowExtended bool, err error) { +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) (allowExtended bool, err error) { return true, nil } diff --git a/internal/fs/stat_windows.go b/internal/fs/stat_windows.go index ee678d92ac6..57f330fb54d 100644 --- a/internal/fs/stat_windows.go +++ b/internal/fs/stat_windows.go @@ -19,7 +19,7 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo { extFI := ExtendedFileInfo{ FileInfo: fi, - Size: int64(s.FileSizeLow) + int64(s.FileSizeHigh)<<32, + Size: int64(s.FileSizeLow) | (int64(s.FileSizeHigh) << 32), } atime := syscall.NsecToTimespec(s.LastAccessTime.Nanoseconds()) @@ -28,6 +28,7 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo { mtime := syscall.NsecToTimespec(s.LastWriteTime.Nanoseconds()) extFI.ModTime = time.Unix(mtime.Unix()) + // Windows does not have the concept of a "change time" in the sense Unix uses it, so we're using the LastWriteTime here. extFI.ChangeTime = extFI.ModTime return extFI From 6c16733dfd54a821948866983ec5e2503d832afc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 15:15:27 +0200 Subject: [PATCH 659/893] fs: remove unused methods from File interface --- internal/fs/fs_reader.go | 8 -------- internal/fs/interface.go | 2 -- 2 files changed, 10 deletions(-) diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index 9dd5ff6eb8f..490a6b68d1d 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -229,18 +229,10 @@ type fakeFile struct { // ensure that fakeFile implements File var _ File = fakeFile{} -func (f fakeFile) Fd() uintptr { - return 0 -} - func (f fakeFile) Readdirnames(_ int) ([]string, error) { return nil, pathError("readdirnames", f.name, os.ErrInvalid) } -func (f fakeFile) Seek(int64, int) (int64, error) { - return 0, pathError("seek", f.name, os.ErrInvalid) -} - func (f fakeFile) Read(_ []byte) (int, error) { return 0, pathError("read", f.name, os.ErrInvalid) } diff --git a/internal/fs/interface.go b/internal/fs/interface.go index 7707abd1d73..dcd16a0b34d 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -29,9 +29,7 @@ type File interface { io.Reader io.Closer - Fd() uintptr Readdirnames(n int) ([]string, error) - Seek(int64, int) (int64, error) Stat() (os.FileInfo, error) Name() string } From 2aa1e2615b7c2f201ca30812f17b883a00b00725 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 15:34:39 +0200 Subject: [PATCH 660/893] fs: fix comments --- internal/fs/fs_local_vss.go | 4 ++-- internal/fs/fs_reader_command.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 27abcf9eab4..908e744eee2 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -130,12 +130,12 @@ func (fs *LocalVss) OpenFile(name string, flag int, perm os.FileMode) (File, err return os.OpenFile(fs.snapshotPath(name), flag, perm) } -// Stat wraps the Open method of the underlying file system. +// Stat wraps the Stat method of the underlying file system. func (fs *LocalVss) Stat(name string) (os.FileInfo, error) { return os.Stat(fs.snapshotPath(name)) } -// Lstat wraps the Open method of the underlying file system. +// Lstat wraps the Lstat method of the underlying file system. func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { return os.Lstat(fs.snapshotPath(name)) } diff --git a/internal/fs/fs_reader_command.go b/internal/fs/fs_reader_command.go index 6d061f64123..2fa4375dd2a 100644 --- a/internal/fs/fs_reader_command.go +++ b/internal/fs/fs_reader_command.go @@ -10,7 +10,7 @@ import ( "github.com/restic/restic/internal/errors" ) -// CommandReader wrap a command such that its standard output can be read using +// CommandReader wraps a command such that its standard output can be read using // a io.ReadCloser. Close() waits for the command to terminate, reporting // any error back to the caller. type CommandReader struct { From ebdd946ac152c53977d5157714d832eb56146d63 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 15:34:53 +0200 Subject: [PATCH 661/893] fs: unexport nodeRestoreTimestamps --- internal/fs/node.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/fs/node.go b/internal/fs/node.go index a8aa3c40895..280e290c256 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -284,7 +284,7 @@ func nodeRestoreMetadata(node *restic.Node, path string, warn func(msg string)) } } - if err := NodeRestoreTimestamps(node, path); err != nil { + if err := nodeRestoreTimestamps(node, path); err != nil { debug.Log("error restoring timestamps for %v: %v", path, err) if firsterr == nil { firsterr = err @@ -305,7 +305,7 @@ func nodeRestoreMetadata(node *restic.Node, path string, warn func(msg string)) return firsterr } -func NodeRestoreTimestamps(node *restic.Node, path string) error { +func nodeRestoreTimestamps(node *restic.Node, path string) error { var utimes = [...]syscall.Timespec{ syscall.NsecToTimespec(node.AccessTime.UnixNano()), syscall.NsecToTimespec(node.ModTime.UnixNano()), From e2bce1b9eefd1ed9cdc77e727c5c245045ba49bb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 27 Aug 2024 16:00:29 +0200 Subject: [PATCH 662/893] fs: move WindowsAttributes definition back to restic package --- internal/fs/node_windows.go | 23 ++----------------- internal/fs/node_windows_test.go | 22 +++++++++--------- internal/restic/node_windows.go | 26 ++++++++++++++++++++++ internal/restorer/restorer_windows_test.go | 3 +-- 4 files changed, 40 insertions(+), 34 deletions(-) create mode 100644 internal/restic/node_windows.go diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index 0a9064fc5d2..9d46143cc82 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -5,7 +5,6 @@ import ( "fmt" "path/filepath" "reflect" - "runtime" "strings" "sync" "syscall" @@ -17,17 +16,6 @@ import ( "golang.org/x/sys/windows" ) -// WindowsAttributes are the genericAttributes for Windows OS -type WindowsAttributes struct { - // CreationTime is used for storing creation time for windows files. - CreationTime *syscall.Filetime `generic:"creation_time"` - // FileAttributes is used for storing file attributes for windows files. - FileAttributes *uint32 `generic:"file_attributes"` - // SecurityDescriptor is used for storing security descriptors which includes - // owner, group, discretionary access control list (DACL), system access control list (SACL) - SecurityDescriptor *[]byte `generic:"security_descriptor"` -} - var ( modAdvapi32 = syscall.NewLazyDLL("advapi32.dll") procEncryptFile = modAdvapi32.NewProc("EncryptFileW") @@ -205,7 +193,7 @@ func nodeRestoreGenericAttributes(node *restic.Node, path string, warn func(msg } // genericAttributesToWindowsAttrs converts the generic attributes map to a WindowsAttributes and also returns a string of unknown attributes that it could not convert. -func genericAttributesToWindowsAttrs(attrs map[restic.GenericAttributeType]json.RawMessage) (windowsAttributes WindowsAttributes, unknownAttribs []restic.GenericAttributeType, err error) { +func genericAttributesToWindowsAttrs(attrs map[restic.GenericAttributeType]json.RawMessage) (windowsAttributes restic.WindowsAttributes, unknownAttribs []restic.GenericAttributeType, err error) { waValue := reflect.ValueOf(&windowsAttributes).Elem() unknownAttribs, err = restic.GenericAttributesToOSAttrs(attrs, reflect.TypeOf(windowsAttributes), &waValue, "windows") return windowsAttributes, unknownAttribs, err @@ -365,7 +353,7 @@ func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFil winFI := stat.Sys().(*syscall.Win32FileAttributeData) // Add Windows attributes - node.GenericAttributes, err = WindowsAttrsToGenericAttributes(WindowsAttributes{ + node.GenericAttributes, err = restic.WindowsAttrsToGenericAttributes(restic.WindowsAttributes{ CreationTime: &winFI.CreationTime, FileAttributes: &winFI.FileAttributes, SecurityDescriptor: sd, @@ -462,10 +450,3 @@ func prepareVolumeName(path string) (volumeName string, err error) { } return volumeName, nil } - -// windowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection -func WindowsAttrsToGenericAttributes(windowsAttributes WindowsAttributes) (attrs map[restic.GenericAttributeType]json.RawMessage, err error) { - // Get the value of the WindowsAttributes - windowsAttributesValue := reflect.ValueOf(windowsAttributes) - return restic.OSAttrsToGenericAttributes(reflect.TypeOf(windowsAttributes), &windowsAttributesValue, runtime.GOOS) -} diff --git a/internal/fs/node_windows_test.go b/internal/fs/node_windows_test.go index 218e729cec6..83ad72d5309 100644 --- a/internal/fs/node_windows_test.go +++ b/internal/fs/node_windows_test.go @@ -36,7 +36,7 @@ func testRestoreSecurityDescriptor(t *testing.T, sd string, tempDir string, file sdInputBytes, err := base64.StdEncoding.DecodeString(sd) test.OK(t, errors.Wrapf(err, "Error decoding SD for: %s", fileName)) // Wrap the security descriptor bytes in windows attributes and convert to generic attributes. - genericAttributes, err := WindowsAttrsToGenericAttributes(WindowsAttributes{CreationTime: nil, FileAttributes: nil, SecurityDescriptor: &sdInputBytes}) + genericAttributes, err := restic.WindowsAttrsToGenericAttributes(restic.WindowsAttributes{CreationTime: nil, FileAttributes: nil, SecurityDescriptor: &sdInputBytes}) test.OK(t, errors.Wrapf(err, "Error constructing windows attributes for: %s", fileName)) // Construct a Node with the generic attributes. expectedNode := getNode(fileName, fileType, genericAttributes) @@ -68,7 +68,7 @@ func getNode(name string, fileType restic.NodeType, genericAttributes map[restic } } -func getWindowsAttr(t *testing.T, testPath string, node *restic.Node) WindowsAttributes { +func getWindowsAttr(t *testing.T, testPath string, node *restic.Node) restic.WindowsAttributes { windowsAttributes, unknownAttribs, err := genericAttributesToWindowsAttrs(node.GenericAttributes) test.OK(t, errors.Wrapf(err, "Error getting windows attr from generic attr: %s", testPath)) test.Assert(t, len(unknownAttribs) == 0, "Unknown attribs found: %s for: %s", unknownAttribs, testPath) @@ -83,7 +83,7 @@ func TestRestoreCreationTime(t *testing.T) { attr := fi.Sys().(*syscall.Win32FileAttributeData) creationTimeAttribute := attr.CreationTime //Using the temp dir creation time as the test creation time for the test file and folder - runGenericAttributesTest(t, path, restic.TypeCreationTime, WindowsAttributes{CreationTime: &creationTimeAttribute}, false) + runGenericAttributesTest(t, path, restic.TypeCreationTime, restic.WindowsAttributes{CreationTime: &creationTimeAttribute}, false) } func TestRestoreFileAttributes(t *testing.T) { @@ -95,7 +95,7 @@ func TestRestoreFileAttributes(t *testing.T) { system := uint32(syscall.FILE_ATTRIBUTE_SYSTEM) archive := uint32(syscall.FILE_ATTRIBUTE_ARCHIVE) encrypted := uint32(windows.FILE_ATTRIBUTE_ENCRYPTED) - fileAttributes := []WindowsAttributes{ + fileAttributes := []restic.WindowsAttributes{ //normal {FileAttributes: &normal}, //hidden @@ -108,7 +108,7 @@ func TestRestoreFileAttributes(t *testing.T) { {FileAttributes: &encrypted}, } for i, fileAttr := range fileAttributes { - genericAttrs, err := WindowsAttrsToGenericAttributes(fileAttr) + genericAttrs, err := restic.WindowsAttrsToGenericAttributes(fileAttr) test.OK(t, err) expectedNodes := []restic.Node{ { @@ -128,7 +128,7 @@ func TestRestoreFileAttributes(t *testing.T) { system = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY | windows.FILE_ATTRIBUTE_SYSTEM) archive = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY | windows.FILE_ATTRIBUTE_ARCHIVE) encrypted = uint32(syscall.FILE_ATTRIBUTE_DIRECTORY | windows.FILE_ATTRIBUTE_ENCRYPTED) - folderAttributes := []WindowsAttributes{ + folderAttributes := []restic.WindowsAttributes{ //normal {FileAttributes: &normal}, //hidden @@ -141,7 +141,7 @@ func TestRestoreFileAttributes(t *testing.T) { {FileAttributes: &encrypted}, } for i, folderAttr := range folderAttributes { - genericAttrs, err := WindowsAttrsToGenericAttributes(folderAttr) + genericAttrs, err := restic.WindowsAttrsToGenericAttributes(folderAttr) test.OK(t, err) expectedNodes := []restic.Node{ { @@ -158,8 +158,8 @@ func TestRestoreFileAttributes(t *testing.T) { } } -func runGenericAttributesTest(t *testing.T, tempDir string, genericAttributeName restic.GenericAttributeType, genericAttributeExpected WindowsAttributes, warningExpected bool) { - genericAttributes, err := WindowsAttrsToGenericAttributes(genericAttributeExpected) +func runGenericAttributesTest(t *testing.T, tempDir string, genericAttributeName restic.GenericAttributeType, genericAttributeExpected restic.WindowsAttributes, warningExpected bool) { + genericAttributes, err := restic.WindowsAttrsToGenericAttributes(genericAttributeExpected) test.OK(t, err) expectedNodes := []restic.Node{ { @@ -183,12 +183,12 @@ func runGenericAttributesTest(t *testing.T, tempDir string, genericAttributeName } runGenericAttributesTestForNodes(t, expectedNodes, tempDir, genericAttributeName, genericAttributeExpected, warningExpected) } -func runGenericAttributesTestForNodes(t *testing.T, expectedNodes []restic.Node, tempDir string, genericAttr restic.GenericAttributeType, genericAttributeExpected WindowsAttributes, warningExpected bool) { +func runGenericAttributesTestForNodes(t *testing.T, expectedNodes []restic.Node, tempDir string, genericAttr restic.GenericAttributeType, genericAttributeExpected restic.WindowsAttributes, warningExpected bool) { for _, testNode := range expectedNodes { testPath, node := restoreAndGetNode(t, tempDir, &testNode, warningExpected) rawMessage := node.GenericAttributes[genericAttr] - genericAttrsExpected, err := WindowsAttrsToGenericAttributes(genericAttributeExpected) + genericAttrsExpected, err := restic.WindowsAttrsToGenericAttributes(genericAttributeExpected) test.OK(t, err) rawMessageExpected := genericAttrsExpected[genericAttr] test.Equals(t, rawMessageExpected, rawMessage, "Generic attribute: %s got from NodeFromFileInfo not equal for path: %s", string(genericAttr), testPath) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go new file mode 100644 index 00000000000..7df4266656b --- /dev/null +++ b/internal/restic/node_windows.go @@ -0,0 +1,26 @@ +package restic + +import ( + "encoding/json" + "reflect" + "runtime" + "syscall" +) + +// WindowsAttributes are the genericAttributes for Windows OS +type WindowsAttributes struct { + // CreationTime is used for storing creation time for windows files. + CreationTime *syscall.Filetime `generic:"creation_time"` + // FileAttributes is used for storing file attributes for windows files. + FileAttributes *uint32 `generic:"file_attributes"` + // SecurityDescriptor is used for storing security descriptors which includes + // owner, group, discretionary access control list (DACL), system access control list (SACL) + SecurityDescriptor *[]byte `generic:"security_descriptor"` +} + +// windowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection +func WindowsAttrsToGenericAttributes(windowsAttributes WindowsAttributes) (attrs map[GenericAttributeType]json.RawMessage, err error) { + // Get the value of the WindowsAttributes + windowsAttributesValue := reflect.ValueOf(windowsAttributes) + return OSAttrsToGenericAttributes(reflect.TypeOf(windowsAttributes), &windowsAttributesValue, runtime.GOOS) +} diff --git a/internal/restorer/restorer_windows_test.go b/internal/restorer/restorer_windows_test.go index 9fcdfc48d39..4764bed2d4c 100644 --- a/internal/restorer/restorer_windows_test.go +++ b/internal/restorer/restorer_windows_test.go @@ -16,7 +16,6 @@ import ( "unsafe" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" @@ -264,7 +263,7 @@ func setup(t *testing.T, nodesMap map[string]Node) *Restorer { //If the node is a directory add FILE_ATTRIBUTE_DIRECTORY to attributes fileattr |= windows.FILE_ATTRIBUTE_DIRECTORY } - attrs, err := fs.WindowsAttrsToGenericAttributes(fs.WindowsAttributes{FileAttributes: &fileattr}) + attrs, err := restic.WindowsAttrsToGenericAttributes(restic.WindowsAttributes{FileAttributes: &fileattr}) test.OK(t, err) return attrs } From b91ef3f1ff617549129437d1162e0f43f9628454 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 28 Aug 2024 11:01:27 +0200 Subject: [PATCH 663/893] fs: remove dead code --- internal/fs/node_aix.go | 5 ----- internal/fs/node_netbsd.go | 5 ----- internal/fs/node_openbsd.go | 5 ----- 3 files changed, 15 deletions(-) diff --git a/internal/fs/node_aix.go b/internal/fs/node_aix.go index 2967c6db77a..463ed1c33e1 100644 --- a/internal/fs/node_aix.go +++ b/internal/fs/node_aix.go @@ -23,11 +23,6 @@ func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { return nil } -// isListxattrPermissionError is a no-op on AIX. -func isListxattrPermissionError(_ error) bool { - return false -} - // nodeRestoreGenericAttributes is no-op on AIX. func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) diff --git a/internal/fs/node_netbsd.go b/internal/fs/node_netbsd.go index 00e6c92ca71..182050da072 100644 --- a/internal/fs/node_netbsd.go +++ b/internal/fs/node_netbsd.go @@ -20,11 +20,6 @@ func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { return nil } -// isListxattrPermissionError is a no-op on netbsd. -func isListxattrPermissionError(_ error) bool { - return false -} - // nodeRestoreGenericAttributes is no-op on netbsd. func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) diff --git a/internal/fs/node_openbsd.go b/internal/fs/node_openbsd.go index 590c603a39f..2a7a410dd75 100644 --- a/internal/fs/node_openbsd.go +++ b/internal/fs/node_openbsd.go @@ -20,11 +20,6 @@ func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { return nil } -// isListxattrPermissionError is a no-op on openbsd. -func isListxattrPermissionError(_ error) bool { - return false -} - // nodeRestoreGenericAttributes is no-op on openbsd. func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) From 6f9513d88c2558dff1b11e2436d8144d719d1e09 Mon Sep 17 00:00:00 2001 From: dropbigfish Date: Sun, 1 Sep 2024 00:54:39 +0800 Subject: [PATCH 664/893] chore: fix some function names Signed-off-by: dropbigfish --- cmd/restic/cmd_snapshots.go | 2 +- cmd/restic/progress.go | 2 +- internal/fs/sd_windows.go | 2 +- internal/fs/vss_windows.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index 42677918fcf..466f536e061 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -329,7 +329,7 @@ type SnapshotGroup struct { Snapshots []Snapshot `json:"snapshots"` } -// printSnapshotsJSON writes the JSON representation of list to stdout. +// printSnapshotGroupJSON writes the JSON representation of list to stdout. func printSnapshotGroupJSON(stdout io.Writer, snGroups map[string]restic.Snapshots, grouped bool) error { if grouped { snapshotGroups := []SnapshotGroup{} diff --git a/cmd/restic/progress.go b/cmd/restic/progress.go index d9ff634ce6d..afd5d027f35 100644 --- a/cmd/restic/progress.go +++ b/cmd/restic/progress.go @@ -29,7 +29,7 @@ func calculateProgressInterval(show bool, json bool) time.Duration { return interval } -// newTerminalProgressMax returns a progress.Counter that prints to stdout or terminal if provided. +// newGenericProgressMax returns a progress.Counter that prints to stdout or terminal if provided. func newGenericProgressMax(show bool, max uint64, description string, print func(status string, final bool)) *progress.Counter { if !show { return nil diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index 0004f1809f8..d085e67eccc 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -156,7 +156,7 @@ func enableBackupPrivilege() { } } -// enableBackupPrivilege enables privilege for restoring security descriptors +// enableRestorePrivilege enables privilege for restoring security descriptors func enableRestorePrivilege() { err := enableProcessPrivileges([]string{SeRestorePrivilege, SeSecurityPrivilege, SeTakeOwnershipPrivilege}) if err != nil { diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 0b51b00f398..940e1b36ad0 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -784,7 +784,7 @@ func (p *VssSnapshot) GetSnapshotDeviceObject() string { return p.snapshotDeviceObject } -// initializeCOMInterface initialize an instance of the VSS COM api +// initializeVssCOMInterface initialize an instance of the VSS COM api func initializeVssCOMInterface() (*ole.IUnknown, error) { vssInstance, err := loadIVssBackupComponentsConstructor() if err != nil { From 5d658f216c1c8f9e53c8be65c703df046a1bc834 Mon Sep 17 00:00:00 2001 From: "Leo R. Lundgren" Date: Sat, 31 Aug 2024 17:28:24 +0200 Subject: [PATCH 665/893] doc: Polish unreleased changelogs --- changelog/unreleased/issue-2004 | 9 ++++----- changelog/unreleased/issue-4934 | 3 ++- changelog/unreleased/issue-4944 | 4 ++-- changelog/unreleased/issue-4945 | 2 +- changelog/unreleased/issue-4957 | 4 +--- changelog/unreleased/issue-4969 | 2 +- changelog/unreleased/issue-4970 | 3 ++- changelog/unreleased/issue-4975 | 2 +- changelog/unreleased/issue-5004 | 2 +- changelog/unreleased/issue-5005 | 2 +- changelog/unreleased/pull-4958 | 6 +++--- changelog/unreleased/pull-4977 | 2 +- changelog/unreleased/pull-4980 | 2 +- changelog/unreleased/pull-5018 | 2 +- 14 files changed, 22 insertions(+), 23 deletions(-) diff --git a/changelog/unreleased/issue-2004 b/changelog/unreleased/issue-2004 index a15651d75fe..5372eeb8c2d 100644 --- a/changelog/unreleased/issue-2004 +++ b/changelog/unreleased/issue-2004 @@ -5,11 +5,10 @@ name without a trailing slash, for example, `C:`, then restoring the resulting snapshot would result in an error. Note that using `C:\` as backup target worked correctly. -Specifying volume names is now handled correctly. - -To restore snapshots created before this bugfix, use the : -syntax. For example, to restore a snapshot with ID `12345678` that backed up -`C:`, use the following command: +Specifying volume names is now handled correctly. To restore snapshots +created before this bugfix, use the : syntax. For +example, to restore a snapshot with ID `12345678` that backed up `C:`, +use the following command: ``` restic restore 12345678:/C/C:./ --target output/folder diff --git a/changelog/unreleased/issue-4934 b/changelog/unreleased/issue-4934 index 56e22ad28c0..df77109a7d0 100644 --- a/changelog/unreleased/issue-4934 +++ b/changelog/unreleased/issue-4934 @@ -4,7 +4,8 @@ Previously, restic only removed snapshots from the cache on the host where the `forget` command was executed. On other hosts that use the same repository, the old snapshots remained in the cache. -Restic now automatically clears old snapshots from the local cache of the current host. +Restic now automatically clears old snapshots from the local cache of the +current host. https://github.com/restic/restic/issues/4934 https://github.com/restic/restic/pull/4981 diff --git a/changelog/unreleased/issue-4944 b/changelog/unreleased/issue-4944 index 738da8e5742..95ae24c03d1 100644 --- a/changelog/unreleased/issue-4944 +++ b/changelog/unreleased/issue-4944 @@ -1,7 +1,7 @@ Enhancement: Print JSON-formatted errors during `restore --json` -Restic printed any `restore` errors directly to the console as freeform text -messages, even when using the `--json` option. +Restic used to print any `restore` errors directly to the console as freeform +text messages, even when using the `--json` option. Now, when `--json` is specified, restic prints them as JSON formatted messages. diff --git a/changelog/unreleased/issue-4945 b/changelog/unreleased/issue-4945 index 024b30b211f..a7a483fed53 100644 --- a/changelog/unreleased/issue-4945 +++ b/changelog/unreleased/issue-4945 @@ -4,7 +4,7 @@ Previously, when running a backup with the `--json` option, restic failed to include the actual error message in the output, resulting in `"error": {}` being displayed. -Restic now includes the error text in JSON output. +This has now been fixed, and restic now includes the error text in JSON output. https://github.com/restic/restic/issues/4945 https://github.com/restic/restic/pull/4946 diff --git a/changelog/unreleased/issue-4957 b/changelog/unreleased/issue-4957 index 5da7463ae30..59c73b5c744 100644 --- a/changelog/unreleased/issue-4957 +++ b/changelog/unreleased/issue-4957 @@ -2,9 +2,7 @@ Bugfix: Fix delayed cancellation of certain commands Since restic 0.17.0, some commands did not immediately respond to cancellation via Ctrl-C (SIGINT) and continued running for a short period. The most affected -commands were `diff`,`find`, `ls`, `stats` and `rewrite`. - -This has been fixed. +commands were `diff`,`find`, `ls`, `stats` and `rewrite`. This is now resolved. https://github.com/restic/restic/issues/4957 https://github.com/restic/restic/pull/4960 diff --git a/changelog/unreleased/issue-4969 b/changelog/unreleased/issue-4969 index 9015c2eabba..d92392a2050 100644 --- a/changelog/unreleased/issue-4969 +++ b/changelog/unreleased/issue-4969 @@ -1,7 +1,7 @@ Bugfix: Correctly restore timestamp for files with resource forks on macOS On macOS, timestamps were not restored for files with resource forks. This has -been fixed. +now been fixed. https://github.com/restic/restic/issues/4969 https://github.com/restic/restic/pull/5006 diff --git a/changelog/unreleased/issue-4970 b/changelog/unreleased/issue-4970 index 2fc9300c995..422ae3c2579 100644 --- a/changelog/unreleased/issue-4970 +++ b/changelog/unreleased/issue-4970 @@ -8,7 +8,8 @@ error: `List(data) returned error, retrying after 1s: [...]: request timeout` -It is now possible to increase the timeout using the `--stuck-request-timeout` option. +It is now possible to increase the timeout using the `--stuck-request-timeout` +option. https://github.com/restic/restic/issues/4970 https://github.com/restic/restic/pull/5014 diff --git a/changelog/unreleased/issue-4975 b/changelog/unreleased/issue-4975 index 2503f46da3e..614642c0676 100644 --- a/changelog/unreleased/issue-4975 +++ b/changelog/unreleased/issue-4975 @@ -1,7 +1,7 @@ Bugfix: Prevent `backup --stdin-from-command` from panicking Restic would previously crash if `--stdin-from-command` was specified without -providing a command. This issue has been fixed. +providing a command. This issue has now been fixed. https://github.com/restic/restic/issues/4975 https://github.com/restic/restic/pull/4976 diff --git a/changelog/unreleased/issue-5004 b/changelog/unreleased/issue-5004 index a0df9478e13..72e98a9a4c6 100644 --- a/changelog/unreleased/issue-5004 +++ b/changelog/unreleased/issue-5004 @@ -6,7 +6,7 @@ On Windows, creating a backup could sometimes trigger the following error: error: nodeFromFileInfo [...]: get named security info failed with: a required privilege is not held by the client. ``` -This has been fixed. +This has now been fixed. https://github.com/restic/restic/issues/5004 https://github.com/restic/restic/pull/5019 diff --git a/changelog/unreleased/issue-5005 b/changelog/unreleased/issue-5005 index eb712b96775..16ac83b4abf 100644 --- a/changelog/unreleased/issue-5005 +++ b/changelog/unreleased/issue-5005 @@ -7,7 +7,7 @@ the lock files failed to load, resulting in the error: unable to create lock in backend: circuit breaker open for file ``` -This issue has been addressed. The error handling now properly retries the +This issue has now been addressed. The error handling now properly retries the locking operation. In addition, restic waits a few seconds between locking retries to increase chances of successful locking. diff --git a/changelog/unreleased/pull-4958 b/changelog/unreleased/pull-4958 index 02574ad3340..dae9b2c8e97 100644 --- a/changelog/unreleased/pull-4958 +++ b/changelog/unreleased/pull-4958 @@ -1,7 +1,7 @@ Bugfix: Don't ignore metadata-setting errors during restore -Restic ignored errors when setting timestamps, attributes, or file modes during -a restore. It now reports those, except for permission errors when running -without root privileges. +Previously, restic used to ignore errors when setting timestamps, attributes, +or file modes during a restore. It now reports those errors, except for +permission related errors when running without root privileges. https://github.com/restic/restic/pull/4958 diff --git a/changelog/unreleased/pull-4977 b/changelog/unreleased/pull-4977 index 85e4091c56b..781576a5696 100644 --- a/changelog/unreleased/pull-4977 +++ b/changelog/unreleased/pull-4977 @@ -1,4 +1,4 @@ -Change: `backup` includes files with incomplete metadata +Change: Also back up files with incomplete metadata If restic failed to read extended metadata for a file or folder during a backup, then the file or folder was not included in the resulting snapshot. diff --git a/changelog/unreleased/pull-4980 b/changelog/unreleased/pull-4980 index 4b1de54bfa9..b51ee8d5998 100644 --- a/changelog/unreleased/pull-4980 +++ b/changelog/unreleased/pull-4980 @@ -1,6 +1,6 @@ Bugfix: Skip extended attribute processing on unsupported Windows volumes -For restic 0.17.0, backups of certain Windows paths, such as network drives, +With restic 0.17.0, backups of certain Windows paths, such as network drives, failed due to errors while fetching extended attributes. Restic now skips extended attribute processing for volumes where they are not diff --git a/changelog/unreleased/pull-5018 b/changelog/unreleased/pull-5018 index 84f36355c2f..ca600c3e1d2 100644 --- a/changelog/unreleased/pull-5018 +++ b/changelog/unreleased/pull-5018 @@ -7,7 +7,7 @@ use by the client, it could result in the following error: http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error ``` -This issue has been resolved. +This issue has now been resolved. https://github.com/restic/restic/pull/5018 https://forum.restic.net/t/receiving-http2-goaway-messages-with-windows-restic-v0-17-0/8367 From ebd8f0c74ab46f658e04474594afca68e1e6b11c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 31 Aug 2024 19:39:30 +0200 Subject: [PATCH 666/893] docs: make s3-compatible section standalone --- doc/030_preparing_a_new_repo.rst | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index fd5b3112748..3dc10745686 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -314,9 +314,17 @@ this command. S3-compatible Storage ********************* -For an S3-compatible server that is not Amazon, you can specify the URL to the server +For an S3-compatible storage service that is not Amazon, you can specify the URL to the server like this: ``s3:https://server:port/bucket_name``. +You must also set credentials for authentication to the service. + +.. code-block:: console + + $ export AWS_ACCESS_KEY_ID= + $ export AWS_SECRET_ACCESS_KEY= + $ restic -r s3:https://server:port/bucket_name init + If needed, you can manually specify the region to use by either setting the environment variable ``AWS_DEFAULT_REGION`` or calling restic with an option parameter like ``-o s3.region="us-east-1"``. If the region is not specified, From 154ca4d9e89f570703099bc61206674f5cfe85e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Sep 2024 01:33:18 +0000 Subject: [PATCH 667/893] build(deps): bump golang.org/x/sys from 0.23.0 to 0.24.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.23.0 to 0.24.0. - [Commits](https://github.com/golang/sys/compare/v0.23.0...v0.24.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 598755a53e8..13d929019de 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( golang.org/x/net v0.27.0 golang.org/x/oauth2 v0.22.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.23.0 + golang.org/x/sys v0.24.0 golang.org/x/term v0.23.0 golang.org/x/text v0.17.0 golang.org/x/time v0.6.0 diff --git a/go.sum b/go.sum index 309f20fc692..cb774264855 100644 --- a/go.sum +++ b/go.sum @@ -276,8 +276,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From daf156a76aa133118f828fd50c440a2a27b76964 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Sep 2024 01:33:35 +0000 Subject: [PATCH 668/893] build(deps): bump github.com/ncw/swift/v2 from 2.0.2 to 2.0.3 Bumps [github.com/ncw/swift/v2](https://github.com/ncw/swift) from 2.0.2 to 2.0.3. - [Release notes](https://github.com/ncw/swift/releases) - [Changelog](https://github.com/ncw/swift/blob/master/RELEASE.md) - [Commits](https://github.com/ncw/swift/compare/v2.0.2...v2.0.3) --- updated-dependencies: - dependency-name: github.com/ncw/swift/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 598755a53e8..7ca03addfe1 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/klauspost/compress v1.17.9 github.com/minio/minio-go/v7 v7.0.74 - github.com/ncw/swift/v2 v2.0.2 + github.com/ncw/swift/v2 v2.0.3 github.com/peterbourgon/unixtransport v0.0.4 github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 diff --git a/go.sum b/go.sum index 309f20fc692..deb190135f8 100644 --- a/go.sum +++ b/go.sum @@ -148,8 +148,8 @@ github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYCFe0= github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8= -github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk= -github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg= +github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg= +github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/ff/v3 v3.3.1/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= From 790dbd442b7ec53937ec054d9b1cfdd36d819432 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Sep 2024 01:33:45 +0000 Subject: [PATCH 669/893] build(deps): bump google.golang.org/api from 0.191.0 to 0.195.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.191.0 to 0.195.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.191.0...v0.195.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 22 +++++++++++----------- go.sum | 48 ++++++++++++++++++++++++------------------------ 2 files changed, 35 insertions(+), 35 deletions(-) diff --git a/go.mod b/go.mod index 598755a53e8..c685aaa5746 100644 --- a/go.mod +++ b/go.mod @@ -26,22 +26,22 @@ require ( github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 golang.org/x/crypto v0.26.0 - golang.org/x/net v0.27.0 + golang.org/x/net v0.28.0 golang.org/x/oauth2 v0.22.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.23.0 + golang.org/x/sys v0.24.0 golang.org/x/term v0.23.0 golang.org/x/text v0.17.0 golang.org/x/time v0.6.0 - google.golang.org/api v0.191.0 + google.golang.org/api v0.195.0 ) require ( - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.7.3 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect + cloud.google.com/go v0.115.1 // indirect + cloud.google.com/go/auth v0.9.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.12 // indirect + cloud.google.com/go/iam v1.1.13 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect @@ -73,10 +73,10 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/grpc v1.64.1 // indirect + google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect + google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 309f20fc692..2bb482be773 100644 --- a/go.sum +++ b/go.sum @@ -1,16 +1,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= -cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= -cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/auth v0.9.1 h1:+pMtLEV2k0AXKvs/tGZojuj6QaioxfUjOpMsG5Gtx+w= +cloud.google.com/go/auth v0.9.1/go.mod h1:Sw8ocT5mhhXxFklyhT12Eiy0ed6tTrPMCJjSI8KhYLk= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= -cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= -cloud.google.com/go/longrunning v0.5.11 h1:Havn1kGjz3whCfoD8dxMLP73Ph5w+ODyZB9RUsDxtGk= -cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= +cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= +cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= +cloud.google.com/go/longrunning v0.5.12 h1:5LqSIdERr71CqfUsFlJdBpOkBH8FBCFD7P1nTWy3TYE= +cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= @@ -247,8 +247,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -276,8 +276,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -306,26 +306,26 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.191.0 h1:cJcF09Z+4HAB2t5qTQM1ZtfL/PemsLFkcFG67qq2afk= -google.golang.org/api v0.191.0/go.mod h1:tD5dsFGxFza0hnQveGfVk9QQYKcfp+VzgRqyXFxE0+E= +google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= +google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c/go.mod h1:2rC5OendXvZ8wGEo/cSLheztrZDZaSoHanUcd1xtZnw= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From a6490feab22cf8be61f5ba659dacb0d5d8319c64 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Sep 2024 22:00:55 +0000 Subject: [PATCH 670/893] build(deps): bump github.com/minio/minio-go/v7 from 7.0.74 to 7.0.76 Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.74 to 7.0.76. - [Release notes](https://github.com/minio/minio-go/releases) - [Commits](https://github.com/minio/minio-go/compare/v7.0.74...v7.0.76) --- updated-dependencies: - dependency-name: github.com/minio/minio-go/v7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 7ca03addfe1..ed56eeb2960 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/klauspost/compress v1.17.9 - github.com/minio/minio-go/v7 v7.0.74 + github.com/minio/minio-go/v7 v7.0.76 github.com/ncw/swift/v2 v2.0.3 github.com/peterbourgon/unixtransport v0.0.4 github.com/pkg/errors v0.9.1 @@ -26,10 +26,10 @@ require ( github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 golang.org/x/crypto v0.26.0 - golang.org/x/net v0.27.0 + golang.org/x/net v0.28.0 golang.org/x/oauth2 v0.22.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.23.0 + golang.org/x/sys v0.24.0 golang.org/x/term v0.23.0 golang.org/x/text v0.17.0 golang.org/x/time v0.6.0 @@ -65,7 +65,7 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/rs/xid v1.5.0 // indirect + github.com/rs/xid v1.6.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect diff --git a/go.sum b/go.sum index deb190135f8..b5deddebda8 100644 --- a/go.sum +++ b/go.sum @@ -146,8 +146,8 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYCFe0= -github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8= +github.com/minio/minio-go/v7 v7.0.76 h1:9nxHH2XDai61cT/EFhyIw/wW4vJfpPNvl7lSFpRt+Ng= +github.com/minio/minio-go/v7 v7.0.76/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg= github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg= github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= @@ -179,8 +179,8 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= @@ -247,8 +247,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -276,8 +276,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From 00ca0b371bebd32f23538cba4aade3fdcc46571a Mon Sep 17 00:00:00 2001 From: Git'Fellow <12234510+solracsf@users.noreply.github.com> Date: Wed, 4 Sep 2024 13:21:37 +0200 Subject: [PATCH 671/893] docs: Recommend to setup B2 versions lifecycle rules --- doc/030_preparing_a_new_repo.rst | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 3dc10745686..06b38ac052a 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -463,9 +463,11 @@ Backblaze B2 than using the Backblaze B2 backend directly. Different from the B2 backend, restic's S3 backend will only hide no longer - necessary files. Thus, make sure to setup lifecycle rules to eventually - delete hidden files. The lifecycle setting "Keep only the last version of the file" - will keep only the most current version of a file. Read the [Backblaze documentation](https://www.backblaze.com/docs/cloud-storage-lifecycle-rules). + necessary files. By default, Backblaze B2 retains all of the different versions of the + files and "hides" the older versions. Thus, to make sure to setup B2 lifecycle rules to + delete hidden files, the B2 lifecycle setting "Keep only the last version of the file" + is **recommended**. The previous version of the file is "hidden" for one day and then + deleted automatically by B2. More details at the [Backblaze documentation](https://www.backblaze.com/docs/cloud-storage-lifecycle-rules). Restic can backup data to any Backblaze B2 bucket. You need to first setup the following environment variables with the credentials you can find in the From 17e54b04abad9eeb3143aec93e7f04ec0555671f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 5 Sep 2024 21:25:19 +0200 Subject: [PATCH 672/893] Prepare changelog for 0.17.1 --- changelog/{unreleased => 0.17.1_2024-09-05}/issue-2004 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/issue-4795 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/issue-4934 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/issue-4944 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/issue-4945 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/issue-4953 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/issue-4957 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/issue-4969 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/issue-4970 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/issue-4975 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/issue-5004 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/issue-5005 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/pull-4958 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/pull-4959 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/pull-4977 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/pull-4980 | 0 changelog/{unreleased => 0.17.1_2024-09-05}/pull-5018 | 0 17 files changed, 0 insertions(+), 0 deletions(-) rename changelog/{unreleased => 0.17.1_2024-09-05}/issue-2004 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/issue-4795 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/issue-4934 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/issue-4944 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/issue-4945 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/issue-4953 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/issue-4957 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/issue-4969 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/issue-4970 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/issue-4975 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/issue-5004 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/issue-5005 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/pull-4958 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/pull-4959 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/pull-4977 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/pull-4980 (100%) rename changelog/{unreleased => 0.17.1_2024-09-05}/pull-5018 (100%) diff --git a/changelog/unreleased/issue-2004 b/changelog/0.17.1_2024-09-05/issue-2004 similarity index 100% rename from changelog/unreleased/issue-2004 rename to changelog/0.17.1_2024-09-05/issue-2004 diff --git a/changelog/unreleased/issue-4795 b/changelog/0.17.1_2024-09-05/issue-4795 similarity index 100% rename from changelog/unreleased/issue-4795 rename to changelog/0.17.1_2024-09-05/issue-4795 diff --git a/changelog/unreleased/issue-4934 b/changelog/0.17.1_2024-09-05/issue-4934 similarity index 100% rename from changelog/unreleased/issue-4934 rename to changelog/0.17.1_2024-09-05/issue-4934 diff --git a/changelog/unreleased/issue-4944 b/changelog/0.17.1_2024-09-05/issue-4944 similarity index 100% rename from changelog/unreleased/issue-4944 rename to changelog/0.17.1_2024-09-05/issue-4944 diff --git a/changelog/unreleased/issue-4945 b/changelog/0.17.1_2024-09-05/issue-4945 similarity index 100% rename from changelog/unreleased/issue-4945 rename to changelog/0.17.1_2024-09-05/issue-4945 diff --git a/changelog/unreleased/issue-4953 b/changelog/0.17.1_2024-09-05/issue-4953 similarity index 100% rename from changelog/unreleased/issue-4953 rename to changelog/0.17.1_2024-09-05/issue-4953 diff --git a/changelog/unreleased/issue-4957 b/changelog/0.17.1_2024-09-05/issue-4957 similarity index 100% rename from changelog/unreleased/issue-4957 rename to changelog/0.17.1_2024-09-05/issue-4957 diff --git a/changelog/unreleased/issue-4969 b/changelog/0.17.1_2024-09-05/issue-4969 similarity index 100% rename from changelog/unreleased/issue-4969 rename to changelog/0.17.1_2024-09-05/issue-4969 diff --git a/changelog/unreleased/issue-4970 b/changelog/0.17.1_2024-09-05/issue-4970 similarity index 100% rename from changelog/unreleased/issue-4970 rename to changelog/0.17.1_2024-09-05/issue-4970 diff --git a/changelog/unreleased/issue-4975 b/changelog/0.17.1_2024-09-05/issue-4975 similarity index 100% rename from changelog/unreleased/issue-4975 rename to changelog/0.17.1_2024-09-05/issue-4975 diff --git a/changelog/unreleased/issue-5004 b/changelog/0.17.1_2024-09-05/issue-5004 similarity index 100% rename from changelog/unreleased/issue-5004 rename to changelog/0.17.1_2024-09-05/issue-5004 diff --git a/changelog/unreleased/issue-5005 b/changelog/0.17.1_2024-09-05/issue-5005 similarity index 100% rename from changelog/unreleased/issue-5005 rename to changelog/0.17.1_2024-09-05/issue-5005 diff --git a/changelog/unreleased/pull-4958 b/changelog/0.17.1_2024-09-05/pull-4958 similarity index 100% rename from changelog/unreleased/pull-4958 rename to changelog/0.17.1_2024-09-05/pull-4958 diff --git a/changelog/unreleased/pull-4959 b/changelog/0.17.1_2024-09-05/pull-4959 similarity index 100% rename from changelog/unreleased/pull-4959 rename to changelog/0.17.1_2024-09-05/pull-4959 diff --git a/changelog/unreleased/pull-4977 b/changelog/0.17.1_2024-09-05/pull-4977 similarity index 100% rename from changelog/unreleased/pull-4977 rename to changelog/0.17.1_2024-09-05/pull-4977 diff --git a/changelog/unreleased/pull-4980 b/changelog/0.17.1_2024-09-05/pull-4980 similarity index 100% rename from changelog/unreleased/pull-4980 rename to changelog/0.17.1_2024-09-05/pull-4980 diff --git a/changelog/unreleased/pull-5018 b/changelog/0.17.1_2024-09-05/pull-5018 similarity index 100% rename from changelog/unreleased/pull-5018 rename to changelog/0.17.1_2024-09-05/pull-5018 From d8870a2f731faa2709e569f0166e18218c0d2490 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 5 Sep 2024 21:25:20 +0200 Subject: [PATCH 673/893] Generate CHANGELOG.md for 0.17.1 --- CHANGELOG.md | 225 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 225 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a6926755a9..9a53939156a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ # Table of Contents +* [Changelog for 0.17.1](#changelog-for-restic-0171-2024-09-05) * [Changelog for 0.17.0](#changelog-for-restic-0170-2024-07-26) * [Changelog for 0.16.5](#changelog-for-restic-0165-2024-07-01) * [Changelog for 0.16.4](#changelog-for-restic-0164-2024-02-04) @@ -35,6 +36,230 @@ * [Changelog for 0.6.0](#changelog-for-restic-060-2017-05-29) +# Changelog for restic 0.17.1 (2024-09-05) +The following sections list the changes in restic 0.17.1 relevant to +restic users. The changes are ordered by importance. + +## Summary + + * Fix #2004: Correctly handle volume names in `backup` command on Windows + * Fix #4945: Include missing backup error text with `--json` + * Fix #4953: Correctly handle long paths on older Windows versions + * Fix #4957: Fix delayed cancellation of certain commands + * Fix #4958: Don't ignore metadata-setting errors during restore + * Fix #4969: Correctly restore timestamp for files with resource forks on macOS + * Fix #4975: Prevent `backup --stdin-from-command` from panicking + * Fix #4980: Skip extended attribute processing on unsupported Windows volumes + * Fix #5004: Fix spurious "A Required Privilege Is Not Held by the Client" error + * Fix #5005: Fix rare failures to retry locking a repository + * Fix #5018: Improve HTTP/2 support for REST backend + * Chg #4953: Also back up files with incomplete metadata + * Enh #4795: Display progress bar for `restore --verify` + * Enh #4934: Automatically clear removed snapshots from cache + * Enh #4944: Print JSON-formatted errors during `restore --json` + * Enh #4959: Return exit code 12 for "bad password" errors + * Enh #4970: Make timeout for stuck requests customizable + +## Details + + * Bugfix #2004: Correctly handle volume names in `backup` command on Windows + + On Windows, when the specified backup target only included the volume name + without a trailing slash, for example, `C:`, then restoring the resulting + snapshot would result in an error. Note that using `C:\` as backup target worked + correctly. + + Specifying volume names is now handled correctly. To restore snapshots created + before this bugfix, use the : syntax. For example, to restore + a snapshot with ID `12345678` that backed up `C:`, use the following command: + + ``` + restic restore 12345678:/C/C:./ --target output/folder + ``` + + https://github.com/restic/restic/issues/2004 + https://github.com/restic/restic/pull/5028 + + * Bugfix #4945: Include missing backup error text with `--json` + + Previously, when running a backup with the `--json` option, restic failed to + include the actual error message in the output, resulting in `"error": {}` being + displayed. + + This has now been fixed, and restic now includes the error text in JSON output. + + https://github.com/restic/restic/issues/4945 + https://github.com/restic/restic/pull/4946 + + * Bugfix #4953: Correctly handle long paths on older Windows versions + + On older Windows versions, like Windows Server 2012, restic 0.17.0 failed to + back up files with long paths. This problem has now been resolved. + + https://github.com/restic/restic/issues/4953 + https://github.com/restic/restic/pull/4954 + + * Bugfix #4957: Fix delayed cancellation of certain commands + + Since restic 0.17.0, some commands did not immediately respond to cancellation + via Ctrl-C (SIGINT) and continued running for a short period. The most affected + commands were `diff`,`find`, `ls`, `stats` and `rewrite`. This is now resolved. + + https://github.com/restic/restic/issues/4957 + https://github.com/restic/restic/pull/4960 + + * Bugfix #4958: Don't ignore metadata-setting errors during restore + + Previously, restic used to ignore errors when setting timestamps, attributes, or + file modes during a restore. It now reports those errors, except for permission + related errors when running without root privileges. + + https://github.com/restic/restic/pull/4958 + + * Bugfix #4969: Correctly restore timestamp for files with resource forks on macOS + + On macOS, timestamps were not restored for files with resource forks. This has + now been fixed. + + https://github.com/restic/restic/issues/4969 + https://github.com/restic/restic/pull/5006 + + * Bugfix #4975: Prevent `backup --stdin-from-command` from panicking + + Restic would previously crash if `--stdin-from-command` was specified without + providing a command. This issue has now been fixed. + + https://github.com/restic/restic/issues/4975 + https://github.com/restic/restic/pull/4976 + + * Bugfix #4980: Skip extended attribute processing on unsupported Windows volumes + + With restic 0.17.0, backups of certain Windows paths, such as network drives, + failed due to errors while fetching extended attributes. + + Restic now skips extended attribute processing for volumes where they are not + supported. + + https://github.com/restic/restic/issues/4955 + https://github.com/restic/restic/issues/4950 + https://github.com/restic/restic/pull/4980 + https://github.com/restic/restic/pull/4998 + + * Bugfix #5004: Fix spurious "A Required Privilege Is Not Held by the Client" error + + On Windows, creating a backup could sometimes trigger the following error: + + ``` + error: nodeFromFileInfo [...]: get named security info failed with: a required privilege is not held by the client. + ``` + + This has now been fixed. + + https://github.com/restic/restic/issues/5004 + https://github.com/restic/restic/pull/5019 + + * Bugfix #5005: Fix rare failures to retry locking a repository + + Restic 0.17.0 could in rare cases fail to retry locking a repository if one of + the lock files failed to load, resulting in the error: + + ``` + unable to create lock in backend: circuit breaker open for file + ``` + + This issue has now been addressed. The error handling now properly retries the + locking operation. In addition, restic waits a few seconds between locking + retries to increase chances of successful locking. + + https://github.com/restic/restic/issues/5005 + https://github.com/restic/restic/pull/5011 + https://github.com/restic/restic/pull/5012 + + * Bugfix #5018: Improve HTTP/2 support for REST backend + + If `rest-server` tried to gracefully shut down an HTTP/2 connection still in use + by the client, it could result in the following error: + + ``` + http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error + ``` + + This issue has now been resolved. + + https://github.com/restic/restic/pull/5018 + https://forum.restic.net/t/receiving-http2-goaway-messages-with-windows-restic-v0-17-0/8367 + + * Change #4953: Also back up files with incomplete metadata + + If restic failed to read extended metadata for a file or folder during a backup, + then the file or folder was not included in the resulting snapshot. Instead, a + warning message was printed along with returning exit code 3 once the backup was + finished. + + Now, restic also includes items for which the extended metadata could not be + read in a snapshot. The warning message has been updated to: + + ``` + incomplete metadata for /path/to/file:
+ ``` + + https://github.com/restic/restic/issues/4953 + https://github.com/restic/restic/pull/4977 + + * Enhancement #4795: Display progress bar for `restore --verify` + + When the `restore` command is run with `--verify`, it now displays a progress + bar while the verification step is running. The progress bar is not shown when + the `--json` flag is specified. + + https://github.com/restic/restic/issues/4795 + https://github.com/restic/restic/pull/4989 + + * Enhancement #4934: Automatically clear removed snapshots from cache + + Previously, restic only removed snapshots from the cache on the host where the + `forget` command was executed. On other hosts that use the same repository, the + old snapshots remained in the cache. + + Restic now automatically clears old snapshots from the local cache of the + current host. + + https://github.com/restic/restic/issues/4934 + https://github.com/restic/restic/pull/4981 + + * Enhancement #4944: Print JSON-formatted errors during `restore --json` + + Restic used to print any `restore` errors directly to the console as freeform + text messages, even when using the `--json` option. + + Now, when `--json` is specified, restic prints them as JSON formatted messages. + + https://github.com/restic/restic/issues/4944 + https://github.com/restic/restic/pull/4946 + + * Enhancement #4959: Return exit code 12 for "bad password" errors + + Restic now returns exit code 12 when it cannot open the repository due to an + incorrect password. + + https://github.com/restic/restic/pull/4959 + + * Enhancement #4970: Make timeout for stuck requests customizable + + Restic monitors connections to the backend to detect stuck requests. If a + request does not return any data within five minutes, restic assumes the request + is stuck and retries it. However, for large repositories this timeout might be + insufficient to collect a list of all files, causing the following error: + + `List(data) returned error, retrying after 1s: [...]: request timeout` + + It is now possible to increase the timeout using the `--stuck-request-timeout` + option. + + https://github.com/restic/restic/issues/4970 + https://github.com/restic/restic/pull/5014 + + # Changelog for restic 0.17.0 (2024-07-26) The following sections list the changes in restic 0.17.0 relevant to restic users. The changes are ordered by importance. From a98370cc9e9bbfb5bca3b73ec423ad2ad131213f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 5 Sep 2024 21:25:21 +0200 Subject: [PATCH 674/893] Update manpages and auto-completion --- doc/bash-completion.sh | 229 ++++++++++++++++++++++++++++++ doc/man/restic-backup.1 | 5 + doc/man/restic-cache.1 | 4 + doc/man/restic-cat.1 | 5 + doc/man/restic-check.1 | 5 + doc/man/restic-copy.1 | 7 +- doc/man/restic-diff.1 | 5 + doc/man/restic-dump.1 | 5 + doc/man/restic-features.1 | 146 +++++++++++++++++++ doc/man/restic-find.1 | 5 + doc/man/restic-forget.1 | 5 + doc/man/restic-generate.1 | 4 + doc/man/restic-init.1 | 6 +- doc/man/restic-key-add.1 | 5 + doc/man/restic-key-list.1 | 5 + doc/man/restic-key-passwd.1 | 5 + doc/man/restic-key-remove.1 | 5 + doc/man/restic-key.1 | 4 + doc/man/restic-list.1 | 5 + doc/man/restic-ls.1 | 5 + doc/man/restic-migrate.1 | 5 + doc/man/restic-mount.1 | 5 + doc/man/restic-options.1 | 135 ++++++++++++++++++ doc/man/restic-prune.1 | 5 + doc/man/restic-recover.1 | 5 + doc/man/restic-repair-index.1 | 5 + doc/man/restic-repair-packs.1 | 5 + doc/man/restic-repair-snapshots.1 | 5 + doc/man/restic-repair.1 | 4 + doc/man/restic-restore.1 | 5 + doc/man/restic-rewrite.1 | 5 + doc/man/restic-self-update.1 | 5 + doc/man/restic-snapshots.1 | 5 + doc/man/restic-stats.1 | 5 + doc/man/restic-tag.1 | 5 + doc/man/restic-unlock.1 | 4 + doc/man/restic-version.1 | 4 + doc/man/restic.1 | 6 +- 38 files changed, 680 insertions(+), 3 deletions(-) create mode 100644 doc/man/restic-features.1 create mode 100644 doc/man/restic-options.1 diff --git a/doc/bash-completion.sh b/doc/bash-completion.sh index 9d64871ca3f..0517fdf7c7d 100644 --- a/doc/bash-completion.sh +++ b/doc/bash-completion.sh @@ -516,6 +516,8 @@ _restic_backup() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -592,6 +594,8 @@ _restic_cache() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -660,6 +664,8 @@ _restic_cat() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -667,6 +673,15 @@ _restic_cat() must_have_one_flag=() must_have_one_noun=() + must_have_one_noun+=("blob") + must_have_one_noun+=("config") + must_have_one_noun+=("index") + must_have_one_noun+=("key") + must_have_one_noun+=("lock") + must_have_one_noun+=("masterkey") + must_have_one_noun+=("pack") + must_have_one_noun+=("snapshot") + must_have_one_noun+=("tree") noun_aliases=() } @@ -736,6 +751,8 @@ _restic_check() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -840,6 +857,8 @@ _restic_copy() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -910,6 +929,8 @@ _restic_diff() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1004,6 +1025,78 @@ _restic_dump() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_features() +{ + last_command="restic_features" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + local_nonpersistent_flags+=("-h") + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1122,6 +1215,8 @@ _restic_find() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1298,6 +1393,8 @@ _restic_forget() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1386,6 +1483,8 @@ _restic_generate() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1450,6 +1549,8 @@ _restic_help() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1547,6 +1648,8 @@ _restic_init() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1629,6 +1732,8 @@ _restic_key_add() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1693,6 +1798,8 @@ _restic_key_help() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1762,6 +1869,8 @@ _restic_key_list() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1844,6 +1953,8 @@ _restic_key_passwd() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1912,6 +2023,8 @@ _restic_key_remove() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -1985,6 +2098,8 @@ _restic_key() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2053,6 +2168,8 @@ _restic_list() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2145,6 +2262,8 @@ _restic_ls() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2217,6 +2336,8 @@ _restic_migrate() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2313,6 +2434,78 @@ _restic_mount() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") + flags+=("--tls-client-cert=") + two_word_flags+=("--tls-client-cert") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_options() +{ + last_command="restic_options" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + local_nonpersistent_flags+=("-h") + flags+=("--cacert=") + two_word_flags+=("--cacert") + flags+=("--cache-dir=") + two_word_flags+=("--cache-dir") + flags+=("--cleanup-cache") + flags+=("--compression=") + two_word_flags+=("--compression") + flags+=("--http-user-agent=") + two_word_flags+=("--http-user-agent") + flags+=("--insecure-no-password") + flags+=("--insecure-tls") + flags+=("--json") + flags+=("--key-hint=") + two_word_flags+=("--key-hint") + flags+=("--limit-download=") + two_word_flags+=("--limit-download") + flags+=("--limit-upload=") + two_word_flags+=("--limit-upload") + flags+=("--no-cache") + flags+=("--no-extra-verify") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("--option") + two_word_flags+=("-o") + flags+=("--pack-size=") + two_word_flags+=("--pack-size") + flags+=("--password-command=") + two_word_flags+=("--password-command") + flags+=("--password-file=") + two_word_flags+=("--password-file") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("--repo") + two_word_flags+=("-r") + flags+=("--repository-file=") + two_word_flags+=("--repository-file") + flags+=("--retry-lock=") + two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2403,6 +2596,8 @@ _restic_prune() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2471,6 +2666,8 @@ _restic_recover() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2535,6 +2732,8 @@ _restic_repair_help() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2606,6 +2805,8 @@ _restic_repair_index() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2674,6 +2875,8 @@ _restic_repair_packs() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2762,6 +2965,8 @@ _restic_repair_snapshots() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2834,6 +3039,8 @@ _restic_repair() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -2970,6 +3177,8 @@ _restic_restore() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -3084,6 +3293,8 @@ _restic_rewrite() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -3156,6 +3367,8 @@ _restic_self-update() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -3252,6 +3465,8 @@ _restic_snapshots() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -3288,6 +3503,8 @@ _restic_stats() local_nonpersistent_flags+=("-H") flags+=("--mode=") two_word_flags+=("--mode") + flags_with_completion+=("--mode") + flags_completion+=("__restic_handle_go_custom_completion") local_nonpersistent_flags+=("--mode") local_nonpersistent_flags+=("--mode=") flags+=("--path=") @@ -3338,6 +3555,8 @@ _restic_stats() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -3432,6 +3651,8 @@ _restic_tag() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -3502,6 +3723,8 @@ _restic_unlock() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -3570,6 +3793,8 @@ _restic_version() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") @@ -3594,6 +3819,7 @@ _restic_root_command() commands+=("copy") commands+=("diff") commands+=("dump") + commands+=("features") commands+=("find") commands+=("forget") commands+=("generate") @@ -3604,6 +3830,7 @@ _restic_root_command() commands+=("ls") commands+=("migrate") commands+=("mount") + commands+=("options") commands+=("prune") commands+=("recover") commands+=("repair") @@ -3666,6 +3893,8 @@ _restic_root_command() two_word_flags+=("--repository-file") flags+=("--retry-lock=") two_word_flags+=("--retry-lock") + flags+=("--stuck-request-timeout=") + two_word_flags+=("--stuck-request-timeout") flags+=("--tls-client-cert=") two_word_flags+=("--tls-client-cert") flags+=("--verbose") diff --git a/doc/man/restic-backup.1 b/doc/man/restic-backup.1 index cda4aadffbe..a84b955bac7 100644 --- a/doc/man/restic-backup.1 +++ b/doc/man/restic-backup.1 @@ -24,6 +24,7 @@ Exit status is 1 if there was a fatal error (no snapshot created). Exit status is 3 if some source data could not be read (incomplete snapshot created). Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -229,6 +230,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-cache.1 b/doc/man/restic-cache.1 index f868b8a6b6c..fb23fe8a9d6 100644 --- a/doc/man/restic-cache.1 +++ b/doc/man/restic-cache.1 @@ -129,6 +129,10 @@ Exit status is 1 if there was any error. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-cat.1 b/doc/man/restic-cat.1 index 2298c58cfeb..cab1b85a5b1 100644 --- a/doc/man/restic-cat.1 +++ b/doc/man/restic-cat.1 @@ -22,6 +22,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -119,6 +120,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-check.1 b/doc/man/restic-check.1 index c0d1b07a8f3..60d17a3139e 100644 --- a/doc/man/restic-check.1 +++ b/doc/man/restic-check.1 @@ -27,6 +27,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -136,6 +137,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-copy.1 b/doc/man/restic-copy.1 index 63b67e5e726..96c39413947 100644 --- a/doc/man/restic-copy.1 +++ b/doc/man/restic-copy.1 @@ -36,12 +36,13 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS .PP \fB--from-insecure-no-password\fP[=false] - use an empty password for the source repository, must be passed to every restic command (insecure) + use an empty password for the source repository (insecure) .PP \fB--from-key-hint\fP="" @@ -169,6 +170,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-diff.1 b/doc/man/restic-diff.1 index f4ffa27374b..f4c8a1d144b 100644 --- a/doc/man/restic-diff.1 +++ b/doc/man/restic-diff.1 @@ -49,6 +49,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -150,6 +151,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-dump.1 b/doc/man/restic-dump.1 index 00cb3c8b6ba..657570f6d41 100644 --- a/doc/man/restic-dump.1 +++ b/doc/man/restic-dump.1 @@ -34,6 +34,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -151,6 +152,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-features.1 b/doc/man/restic-features.1 new file mode 100644 index 00000000000..b288f655ab0 --- /dev/null +++ b/doc/man/restic-features.1 @@ -0,0 +1,146 @@ +.nh +.TH "restic backup" "1" "Jan 2017" "generated by \fBrestic generate\fR" "" + +.SH NAME +.PP +restic-features - Print list of feature flags + + +.SH SYNOPSIS +.PP +\fBrestic features [flags]\fP + + +.SH DESCRIPTION +.PP +The "features" command prints a list of supported feature flags. + +.PP +To pass feature flags to restic, set the RESTIC_FEATURES environment variable +to "featureA=true,featureB=false". Specifying an unknown feature flag is an error. + +.PP +A feature can either be in alpha, beta, stable or deprecated state. +An \fIalpha\fP feature is disabled by default and may change in arbitrary ways between restic versions or be removed. +A \fIbeta\fP feature is enabled by default, but still can change in minor ways or be removed. +A \fIstable\fP feature is always enabled and cannot be disabled. The flag will be removed in a future restic version. +A \fIdeprecated\fP feature is always disabled and cannot be enabled. The flag will be removed in a future restic version. + + +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. + + +.SH OPTIONS +.PP +\fB-h\fP, \fB--help\fP[=false] + help for features + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB--cacert\fP=[] + \fBfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT) + +.PP +\fB--cache-dir\fP="" + set the cache \fBdirectory\fR\&. (default: use system default cache directory) + +.PP +\fB--cleanup-cache\fP[=false] + auto remove old cache directories + +.PP +\fB--compression\fP=auto + compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) + +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + +.PP +\fB--insecure-tls\fP[=false] + skip TLS certificate verification when connecting to the repository (insecure) + +.PP +\fB--json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB--key-hint\fP="" + \fBkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + +.PP +\fB--limit-download\fP=0 + limits downloads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--limit-upload\fP=0 + limits uploads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--no-cache\fP[=false] + do not use a local cache + +.PP +\fB--no-extra-verify\fP[=false] + skip additional verification of data before upload (see documentation) + +.PP +\fB--no-lock\fP[=false] + do not lock the repository, this allows some operations on read-only repositories + +.PP +\fB-o\fP, \fB--option\fP=[] + set extended option (\fBkey=value\fR, can be specified multiple times) + +.PP +\fB--pack-size\fP=0 + set target pack \fBsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) + +.PP +\fB--password-command\fP="" + shell \fBcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND) + +.PP +\fB-p\fP, \fB--password-file\fP="" + \fBfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE) + +.PP +\fB-q\fP, \fB--quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB-r\fP, \fB--repo\fP="" + \fBrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY) + +.PP +\fB--repository-file\fP="" + \fBfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE) + +.PP +\fB--retry-lock\fP=0s + retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) + +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + +.PP +\fB--tls-client-cert\fP="" + path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) + +.PP +\fB-v\fP, \fB--verbose\fP[=0] + be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-find.1 b/doc/man/restic-find.1 index 2d81decd387..e8d974527d2 100644 --- a/doc/man/restic-find.1 +++ b/doc/man/restic-find.1 @@ -165,6 +165,10 @@ It can also be used to search for restic blobs or trees for troubleshooting. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) @@ -190,6 +194,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .EE diff --git a/doc/man/restic-forget.1 b/doc/man/restic-forget.1 index 55705288feb..058dbee25a8 100644 --- a/doc/man/restic-forget.1 +++ b/doc/man/restic-forget.1 @@ -36,6 +36,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -237,6 +238,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-generate.1 b/doc/man/restic-generate.1 index f2db39bacad..f17a6fcd030 100644 --- a/doc/man/restic-generate.1 +++ b/doc/man/restic-generate.1 @@ -138,6 +138,10 @@ Exit status is 1 if there was any error. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-init.1 b/doc/man/restic-init.1 index de439add53d..50fa00b7101 100644 --- a/doc/man/restic-init.1 +++ b/doc/man/restic-init.1 @@ -29,7 +29,7 @@ Exit status is 1 if there was any error. .PP \fB--from-insecure-no-password\fP[=false] - use an empty password for the source repository, must be passed to every restic command (insecure) + use an empty password for the source repository (insecure) .PP \fB--from-key-hint\fP="" @@ -149,6 +149,10 @@ Exit status is 1 if there was any error. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-key-add.1 b/doc/man/restic-key-add.1 index 6a24e1e6712..ff33408b47a 100644 --- a/doc/man/restic-key-add.1 +++ b/doc/man/restic-key-add.1 @@ -22,6 +22,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -135,6 +136,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-key-list.1 b/doc/man/restic-key-list.1 index a00b116b9bf..7deb05793b0 100644 --- a/doc/man/restic-key-list.1 +++ b/doc/man/restic-key-list.1 @@ -24,6 +24,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -121,6 +122,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-key-passwd.1 b/doc/man/restic-key-passwd.1 index 42315d72ae0..68e81edd984 100644 --- a/doc/man/restic-key-passwd.1 +++ b/doc/man/restic-key-passwd.1 @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -136,6 +137,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-key-remove.1 b/doc/man/restic-key-remove.1 index 6ee82605968..ff1a0ceb917 100644 --- a/doc/man/restic-key-remove.1 +++ b/doc/man/restic-key-remove.1 @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -120,6 +121,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-key.1 b/doc/man/restic-key.1 index 43da808ccc0..4fd1f6caf42 100644 --- a/doc/man/restic-key.1 +++ b/doc/man/restic-key.1 @@ -112,6 +112,10 @@ per repository. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-list.1 b/doc/man/restic-list.1 index f8a1db00580..29945e859d4 100644 --- a/doc/man/restic-list.1 +++ b/doc/man/restic-list.1 @@ -22,6 +22,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -119,6 +120,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-ls.1 b/doc/man/restic-ls.1 index 6cc6625836c..b990d2ec880 100644 --- a/doc/man/restic-ls.1 +++ b/doc/man/restic-ls.1 @@ -37,6 +37,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -162,6 +163,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-migrate.1 b/doc/man/restic-migrate.1 index 2272294bf47..c0fa2dbc1ed 100644 --- a/doc/man/restic-migrate.1 +++ b/doc/man/restic-migrate.1 @@ -24,6 +24,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -125,6 +126,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-mount.1 b/doc/man/restic-mount.1 index a256d2a5ff3..5ec59391d85 100644 --- a/doc/man/restic-mount.1 +++ b/doc/man/restic-mount.1 @@ -64,6 +64,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -193,6 +194,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-options.1 b/doc/man/restic-options.1 new file mode 100644 index 00000000000..8ea8bea63c7 --- /dev/null +++ b/doc/man/restic-options.1 @@ -0,0 +1,135 @@ +.nh +.TH "restic backup" "1" "Jan 2017" "generated by \fBrestic generate\fR" "" + +.SH NAME +.PP +restic-options - Print list of extended options + + +.SH SYNOPSIS +.PP +\fBrestic options [flags]\fP + + +.SH DESCRIPTION +.PP +The "options" command prints a list of extended options. + + +.SH EXIT STATUS +.PP +Exit status is 0 if the command was successful. +Exit status is 1 if there was any error. + + +.SH OPTIONS +.PP +\fB-h\fP, \fB--help\fP[=false] + help for options + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB--cacert\fP=[] + \fBfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT) + +.PP +\fB--cache-dir\fP="" + set the cache \fBdirectory\fR\&. (default: use system default cache directory) + +.PP +\fB--cleanup-cache\fP[=false] + auto remove old cache directories + +.PP +\fB--compression\fP=auto + compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) + +.PP +\fB--http-user-agent\fP="" + set a http user agent for outgoing http requests + +.PP +\fB--insecure-no-password\fP[=false] + use an empty password for the repository, must be passed to every restic command (insecure) + +.PP +\fB--insecure-tls\fP[=false] + skip TLS certificate verification when connecting to the repository (insecure) + +.PP +\fB--json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB--key-hint\fP="" + \fBkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + +.PP +\fB--limit-download\fP=0 + limits downloads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--limit-upload\fP=0 + limits uploads to a maximum \fBrate\fR in KiB/s. (default: unlimited) + +.PP +\fB--no-cache\fP[=false] + do not use a local cache + +.PP +\fB--no-extra-verify\fP[=false] + skip additional verification of data before upload (see documentation) + +.PP +\fB--no-lock\fP[=false] + do not lock the repository, this allows some operations on read-only repositories + +.PP +\fB-o\fP, \fB--option\fP=[] + set extended option (\fBkey=value\fR, can be specified multiple times) + +.PP +\fB--pack-size\fP=0 + set target pack \fBsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE) + +.PP +\fB--password-command\fP="" + shell \fBcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND) + +.PP +\fB-p\fP, \fB--password-file\fP="" + \fBfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE) + +.PP +\fB-q\fP, \fB--quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB-r\fP, \fB--repo\fP="" + \fBrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY) + +.PP +\fB--repository-file\fP="" + \fBfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE) + +.PP +\fB--retry-lock\fP=0s + retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) + +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + +.PP +\fB--tls-client-cert\fP="" + path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) + +.PP +\fB-v\fP, \fB--verbose\fP[=0] + be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-prune.1 b/doc/man/restic-prune.1 index 7e16748ab2b..1ee262b61a1 100644 --- a/doc/man/restic-prune.1 +++ b/doc/man/restic-prune.1 @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -148,6 +149,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-recover.1 b/doc/man/restic-recover.1 index 0529360aecf..382a91cebd0 100644 --- a/doc/man/restic-recover.1 +++ b/doc/man/restic-recover.1 @@ -24,6 +24,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -121,6 +122,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-repair-index.1 b/doc/man/restic-repair-index.1 index 60327a91692..341f90d5926 100644 --- a/doc/man/restic-repair-index.1 +++ b/doc/man/restic-repair-index.1 @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -124,6 +125,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-repair-packs.1 b/doc/man/restic-repair-packs.1 index 01a2f65407b..d0091725bc2 100644 --- a/doc/man/restic-repair-packs.1 +++ b/doc/man/restic-repair-packs.1 @@ -23,6 +23,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -120,6 +121,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-repair-snapshots.1 b/doc/man/restic-repair-snapshots.1 index c4439f1314e..d9e12ddf104 100644 --- a/doc/man/restic-repair-snapshots.1 +++ b/doc/man/restic-repair-snapshots.1 @@ -41,6 +41,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -158,6 +159,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-repair.1 b/doc/man/restic-repair.1 index 7fa313aab7c..b0656248681 100644 --- a/doc/man/restic-repair.1 +++ b/doc/man/restic-repair.1 @@ -111,6 +111,10 @@ Repair the repository \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-restore.1 b/doc/man/restic-restore.1 index 876b18bf805..e9ef4ef94c3 100644 --- a/doc/man/restic-restore.1 +++ b/doc/man/restic-restore.1 @@ -31,6 +31,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -196,6 +197,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-rewrite.1 b/doc/man/restic-rewrite.1 index d3dd9243677..c0d4a7e1a8c 100644 --- a/doc/man/restic-rewrite.1 +++ b/doc/man/restic-rewrite.1 @@ -39,6 +39,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -180,6 +181,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-self-update.1 b/doc/man/restic-self-update.1 index e6dd4faf25f..d475f13cb00 100644 --- a/doc/man/restic-self-update.1 +++ b/doc/man/restic-self-update.1 @@ -25,6 +25,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -126,6 +127,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-snapshots.1 b/doc/man/restic-snapshots.1 index 25d5274e3e4..f59240b444e 100644 --- a/doc/man/restic-snapshots.1 +++ b/doc/man/restic-snapshots.1 @@ -22,6 +22,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -143,6 +144,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-stats.1 b/doc/man/restic-stats.1 index fe4074ca52d..1e6e79dac85 100644 --- a/doc/man/restic-stats.1 +++ b/doc/man/restic-stats.1 @@ -52,6 +52,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -165,6 +166,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-tag.1 b/doc/man/restic-tag.1 index 7ab1911e5a4..89c677867e3 100644 --- a/doc/man/restic-tag.1 +++ b/doc/man/restic-tag.1 @@ -29,6 +29,7 @@ Exit status is 0 if the command was successful. Exit status is 1 if there was any error. Exit status is 10 if the repository does not exist. Exit status is 11 if the repository is already locked. +Exit status is 12 if the password is incorrect. .SH OPTIONS @@ -150,6 +151,10 @@ Exit status is 11 if the repository is already locked. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-unlock.1 b/doc/man/restic-unlock.1 index a24a4f81535..74679ef915f 100644 --- a/doc/man/restic-unlock.1 +++ b/doc/man/restic-unlock.1 @@ -121,6 +121,10 @@ Exit status is 1 if there was any error. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic-version.1 b/doc/man/restic-version.1 index e9df439edb0..8d5fe6c65b6 100644 --- a/doc/man/restic-version.1 +++ b/doc/man/restic-version.1 @@ -118,6 +118,10 @@ Exit status is 1 if there was any error. \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) diff --git a/doc/man/restic.1 b/doc/man/restic.1 index ee423c6ad99..bd8009aac43 100644 --- a/doc/man/restic.1 +++ b/doc/man/restic.1 @@ -113,6 +113,10 @@ The full documentation can be found at https://restic.readthedocs.io/ . \fB--retry-lock\fP=0s retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries) +.PP +\fB--stuck-request-timeout\fP=5m0s + \fBduration\fR after which to retry stuck requests + .PP \fB--tls-client-cert\fP="" path to a \fBfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT) @@ -124,4 +128,4 @@ The full documentation can be found at https://restic.readthedocs.io/ . .SH SEE ALSO .PP -\fBrestic-backup(1)\fP, \fBrestic-cache(1)\fP, \fBrestic-cat(1)\fP, \fBrestic-check(1)\fP, \fBrestic-copy(1)\fP, \fBrestic-diff(1)\fP, \fBrestic-dump(1)\fP, \fBrestic-find(1)\fP, \fBrestic-forget(1)\fP, \fBrestic-generate(1)\fP, \fBrestic-init(1)\fP, \fBrestic-key(1)\fP, \fBrestic-list(1)\fP, \fBrestic-ls(1)\fP, \fBrestic-migrate(1)\fP, \fBrestic-mount(1)\fP, \fBrestic-prune(1)\fP, \fBrestic-recover(1)\fP, \fBrestic-repair(1)\fP, \fBrestic-restore(1)\fP, \fBrestic-rewrite(1)\fP, \fBrestic-self-update(1)\fP, \fBrestic-snapshots(1)\fP, \fBrestic-stats(1)\fP, \fBrestic-tag(1)\fP, \fBrestic-unlock(1)\fP, \fBrestic-version(1)\fP +\fBrestic-backup(1)\fP, \fBrestic-cache(1)\fP, \fBrestic-cat(1)\fP, \fBrestic-check(1)\fP, \fBrestic-copy(1)\fP, \fBrestic-diff(1)\fP, \fBrestic-dump(1)\fP, \fBrestic-features(1)\fP, \fBrestic-find(1)\fP, \fBrestic-forget(1)\fP, \fBrestic-generate(1)\fP, \fBrestic-init(1)\fP, \fBrestic-key(1)\fP, \fBrestic-list(1)\fP, \fBrestic-ls(1)\fP, \fBrestic-migrate(1)\fP, \fBrestic-mount(1)\fP, \fBrestic-options(1)\fP, \fBrestic-prune(1)\fP, \fBrestic-recover(1)\fP, \fBrestic-repair(1)\fP, \fBrestic-restore(1)\fP, \fBrestic-rewrite(1)\fP, \fBrestic-self-update(1)\fP, \fBrestic-snapshots(1)\fP, \fBrestic-stats(1)\fP, \fBrestic-tag(1)\fP, \fBrestic-unlock(1)\fP, \fBrestic-version(1)\fP From 975aa41e1e6a1c88deb501451f23cbdbb013f1da Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 5 Sep 2024 21:25:21 +0200 Subject: [PATCH 675/893] Add version for 0.17.1 --- VERSION | 2 +- cmd/restic/global.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index a0073758b8a..7cca7711a0d 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.17.0-dev +0.17.1 diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 375b57f98bd..9df009d8ca8 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -47,7 +47,7 @@ import ( // to a missing backend storage location or config file var ErrNoRepository = errors.New("repository does not exist") -var version = "0.17.0-dev (compiled manually)" +var version = "0.17.1" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" From 76a647febf5d0e58e26b3d6561cfcb37ddb97135 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 5 Sep 2024 21:25:24 +0200 Subject: [PATCH 676/893] Set development version for 0.17.1 --- VERSION | 2 +- cmd/restic/global.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index 7cca7711a0d..21997e69ac4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.17.1 +0.17.1-dev diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 9df009d8ca8..99f9df8cf5a 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -47,7 +47,7 @@ import ( // to a missing backend storage location or config file var ErrNoRepository = errors.New("repository does not exist") -var version = "0.17.1" +var version = "0.17.1-dev (compiled manually)" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" From 3df8337d6366d76e8a426045af75d8bef6c0c27f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 5 Sep 2024 22:33:57 +0200 Subject: [PATCH 677/893] Fix indentation of blockquotes in github release notes --- changelog/changelog-github.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog/changelog-github.tmpl b/changelog/changelog-github.tmpl index d19788daf53..9936da8e609 100644 --- a/changelog/changelog-github.tmpl +++ b/changelog/changelog-github.tmpl @@ -15,7 +15,7 @@ Details {{ range $entry := .Entries }}{{ with $entry }} * {{ .Type }} #{{ .PrimaryID }}: {{ .Title }} {{ range $par := .Paragraphs }} - {{ $par }} +{{ indent 3 $par }} {{ end }} {{ range $id := .Issues -}} {{ ` ` }}[#{{ $id }}](https://github.com/restic/restic/issues/{{ $id -}}) From 87f30bc7878a8bd8cd6a7cf5591f7744b8dc923a Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 1 Sep 2024 17:10:32 +0530 Subject: [PATCH 678/893] forget: indicate why the oldest snapshot in a group is kept When the oldest snapshot in the list is retained, the reason is now prefixed with "oldest" to clearly indicate why it's being kept. --- internal/restic/snapshot_policy.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/restic/snapshot_policy.go b/internal/restic/snapshot_policy.go index 950c26c9182..f5c90d6e4da 100644 --- a/internal/restic/snapshot_policy.go +++ b/internal/restic/snapshot_policy.go @@ -260,6 +260,9 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reason if buckets[i].Count > 0 { buckets[i].Count-- } + if nr == len(list)-1 { + b.reason = fmt.Sprintf("oldest %v", b.reason) + } keepSnapReasons = append(keepSnapReasons, b.reason) } } @@ -276,6 +279,9 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reason debug.Log("keep %v, time %v, ID %v, bucker %v, val %v %v\n", b.reason, cur.Time, cur.id.Str(), i, val, b.Last) keepSnap = true bucketsWithin[i].Last = val + if nr == len(list)-1 { + b.reason = fmt.Sprintf("oldest %v", b.reason) + } keepSnapReasons = append(keepSnapReasons, fmt.Sprintf("%v %v", b.reason, b.Within)) } } From d656a508523c2ddfa7f02eab6448243ae6e285b0 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 1 Sep 2024 17:22:39 +0530 Subject: [PATCH 679/893] forget: update tests to reflect specific reasons for keeping oldest snapshots in a group --- internal/restic/testdata/policy_keep_snapshots_16 | 2 +- internal/restic/testdata/policy_keep_snapshots_17 | 2 +- internal/restic/testdata/policy_keep_snapshots_35 | 2 +- internal/restic/testdata/policy_keep_snapshots_36 | 2 +- internal/restic/testdata/policy_keep_snapshots_37 | 4 ++-- internal/restic/testdata/policy_keep_snapshots_38 | 2 +- internal/restic/testdata/policy_keep_snapshots_39 | 4 ++-- internal/restic/testdata/policy_keep_snapshots_4 | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/internal/restic/testdata/policy_keep_snapshots_16 b/internal/restic/testdata/policy_keep_snapshots_16 index da6f43a1c7c..07444102b5f 100644 --- a/internal/restic/testdata/policy_keep_snapshots_16 +++ b/internal/restic/testdata/policy_keep_snapshots_16 @@ -68,7 +68,7 @@ "paths": null }, "matches": [ - "yearly snapshot" + "oldest yearly snapshot" ], "counters": { "yearly": 6 diff --git a/internal/restic/testdata/policy_keep_snapshots_17 b/internal/restic/testdata/policy_keep_snapshots_17 index ee728d4e09b..de489d445ce 100644 --- a/internal/restic/testdata/policy_keep_snapshots_17 +++ b/internal/restic/testdata/policy_keep_snapshots_17 @@ -214,7 +214,7 @@ "paths": null }, "matches": [ - "yearly snapshot" + "oldest yearly snapshot" ], "counters": { "yearly": 6 diff --git a/internal/restic/testdata/policy_keep_snapshots_35 b/internal/restic/testdata/policy_keep_snapshots_35 index ece4ddbd226..afc2017ddf6 100644 --- a/internal/restic/testdata/policy_keep_snapshots_35 +++ b/internal/restic/testdata/policy_keep_snapshots_35 @@ -165,7 +165,7 @@ "paths": null }, "matches": [ - "yearly within 9999y" + "oldest yearly within 9999y" ], "counters": {} } diff --git a/internal/restic/testdata/policy_keep_snapshots_36 b/internal/restic/testdata/policy_keep_snapshots_36 index cce4cf537fb..6023e993e2a 100644 --- a/internal/restic/testdata/policy_keep_snapshots_36 +++ b/internal/restic/testdata/policy_keep_snapshots_36 @@ -1978,7 +1978,7 @@ "paths": null }, "matches": [ - "last snapshot" + "oldest last snapshot" ], "counters": { "last": -1 diff --git a/internal/restic/testdata/policy_keep_snapshots_37 b/internal/restic/testdata/policy_keep_snapshots_37 index 9856a83d69b..0cf643ab6dd 100644 --- a/internal/restic/testdata/policy_keep_snapshots_37 +++ b/internal/restic/testdata/policy_keep_snapshots_37 @@ -2169,8 +2169,8 @@ "paths": null }, "matches": [ - "last snapshot", - "hourly snapshot" + "oldest last snapshot", + "oldest hourly snapshot" ], "counters": { "last": -1, diff --git a/internal/restic/testdata/policy_keep_snapshots_38 b/internal/restic/testdata/policy_keep_snapshots_38 index f5d7136d421..ef4eb677703 100644 --- a/internal/restic/testdata/policy_keep_snapshots_38 +++ b/internal/restic/testdata/policy_keep_snapshots_38 @@ -1708,7 +1708,7 @@ "paths": null }, "matches": [ - "hourly snapshot" + "oldest hourly snapshot" ], "counters": { "hourly": -1 diff --git a/internal/restic/testdata/policy_keep_snapshots_39 b/internal/restic/testdata/policy_keep_snapshots_39 index f5fb4b1bf6e..fc06d899456 100644 --- a/internal/restic/testdata/policy_keep_snapshots_39 +++ b/internal/restic/testdata/policy_keep_snapshots_39 @@ -238,8 +238,8 @@ "paths": null }, "matches": [ - "monthly snapshot", - "yearly snapshot" + "oldest monthly snapshot", + "oldest yearly snapshot" ], "counters": { "monthly": -1, diff --git a/internal/restic/testdata/policy_keep_snapshots_4 b/internal/restic/testdata/policy_keep_snapshots_4 index ff572d6a09a..66e5cebe25a 100644 --- a/internal/restic/testdata/policy_keep_snapshots_4 +++ b/internal/restic/testdata/policy_keep_snapshots_4 @@ -1978,7 +1978,7 @@ "paths": null }, "matches": [ - "last snapshot" + "oldest last snapshot" ], "counters": { "last": 97 From b69c6408a6cec3700915469c285c279a4356e9dc Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 7 Sep 2024 15:00:21 +0530 Subject: [PATCH 680/893] forget: make oldest snapshot marker more strict Now, a snapshot is only marked as oldest if it's the last in the list AND its values matches the last seen value for that bucket. Also, updated the corresponding golden files for the tests. --- internal/restic/snapshot_policy.go | 10 +++++----- internal/restic/testdata/policy_keep_snapshots_36 | 2 +- internal/restic/testdata/policy_keep_snapshots_37 | 4 ++-- internal/restic/testdata/policy_keep_snapshots_38 | 2 +- internal/restic/testdata/policy_keep_snapshots_4 | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/internal/restic/snapshot_policy.go b/internal/restic/snapshot_policy.go index f5c90d6e4da..28f871f4a7e 100644 --- a/internal/restic/snapshot_policy.go +++ b/internal/restic/snapshot_policy.go @@ -256,13 +256,13 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reason if val != b.Last || nr == len(list)-1 { debug.Log("keep %v %v, bucker %v, val %v\n", cur.Time, cur.id.Str(), i, val) keepSnap = true + if val == b.Last && nr == len(list)-1 { + b.reason = fmt.Sprintf("oldest %v", b.reason) + } buckets[i].Last = val if buckets[i].Count > 0 { buckets[i].Count-- } - if nr == len(list)-1 { - b.reason = fmt.Sprintf("oldest %v", b.reason) - } keepSnapReasons = append(keepSnapReasons, b.reason) } } @@ -278,10 +278,10 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reason if val != b.Last || nr == len(list)-1 { debug.Log("keep %v, time %v, ID %v, bucker %v, val %v %v\n", b.reason, cur.Time, cur.id.Str(), i, val, b.Last) keepSnap = true - bucketsWithin[i].Last = val - if nr == len(list)-1 { + if val == b.Last && nr == len(list)-1 { b.reason = fmt.Sprintf("oldest %v", b.reason) } + bucketsWithin[i].Last = val keepSnapReasons = append(keepSnapReasons, fmt.Sprintf("%v %v", b.reason, b.Within)) } } diff --git a/internal/restic/testdata/policy_keep_snapshots_36 b/internal/restic/testdata/policy_keep_snapshots_36 index 6023e993e2a..cce4cf537fb 100644 --- a/internal/restic/testdata/policy_keep_snapshots_36 +++ b/internal/restic/testdata/policy_keep_snapshots_36 @@ -1978,7 +1978,7 @@ "paths": null }, "matches": [ - "oldest last snapshot" + "last snapshot" ], "counters": { "last": -1 diff --git a/internal/restic/testdata/policy_keep_snapshots_37 b/internal/restic/testdata/policy_keep_snapshots_37 index 0cf643ab6dd..9856a83d69b 100644 --- a/internal/restic/testdata/policy_keep_snapshots_37 +++ b/internal/restic/testdata/policy_keep_snapshots_37 @@ -2169,8 +2169,8 @@ "paths": null }, "matches": [ - "oldest last snapshot", - "oldest hourly snapshot" + "last snapshot", + "hourly snapshot" ], "counters": { "last": -1, diff --git a/internal/restic/testdata/policy_keep_snapshots_38 b/internal/restic/testdata/policy_keep_snapshots_38 index ef4eb677703..f5d7136d421 100644 --- a/internal/restic/testdata/policy_keep_snapshots_38 +++ b/internal/restic/testdata/policy_keep_snapshots_38 @@ -1708,7 +1708,7 @@ "paths": null }, "matches": [ - "oldest hourly snapshot" + "hourly snapshot" ], "counters": { "hourly": -1 diff --git a/internal/restic/testdata/policy_keep_snapshots_4 b/internal/restic/testdata/policy_keep_snapshots_4 index 66e5cebe25a..ff572d6a09a 100644 --- a/internal/restic/testdata/policy_keep_snapshots_4 +++ b/internal/restic/testdata/policy_keep_snapshots_4 @@ -1978,7 +1978,7 @@ "paths": null }, "matches": [ - "oldest last snapshot" + "last snapshot" ], "counters": { "last": 97 From 5468e852220ea52afd94f2222b42a6f54dc4f93f Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sat, 7 Sep 2024 15:06:21 +0530 Subject: [PATCH 681/893] docs: mention that the oldest snapshot is marked `oldest` in the reasons of the forget comman --- doc/060_forget.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/060_forget.rst b/doc/060_forget.rst index fe0236f124d..b211148cb39 100644 --- a/doc/060_forget.rst +++ b/doc/060_forget.rst @@ -214,7 +214,8 @@ The ``forget`` command accepts the following policy options: run) and these snapshots will hence not be removed. .. note:: If there are not enough snapshots to keep one for each duration related - ``--keep-{within-,}*`` option, the oldest snapshot is kept additionally. + ``--keep-{within-,}*`` option, the oldest snapshot is kept additionally and + marked as ``oldest`` in the output (e.g. ``oldest hourly snapshot``). .. note:: Specifying ``--keep-tag ''`` will match untagged snapshots only. From 113439c69bf8f48d9d71761bca54ad9aa9171509 Mon Sep 17 00:00:00 2001 From: Git'Fellow <12234510+solracsf@users.noreply.github.com> Date: Sat, 7 Sep 2024 15:27:15 +0200 Subject: [PATCH 682/893] fix: shorten sentence --- doc/030_preparing_a_new_repo.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 06b38ac052a..0b35d1a1ee6 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -464,10 +464,10 @@ Backblaze B2 Different from the B2 backend, restic's S3 backend will only hide no longer necessary files. By default, Backblaze B2 retains all of the different versions of the - files and "hides" the older versions. Thus, to make sure to setup B2 lifecycle rules to - delete hidden files, the B2 lifecycle setting "Keep only the last version of the file" - is **recommended**. The previous version of the file is "hidden" for one day and then - deleted automatically by B2. More details at the [Backblaze documentation](https://www.backblaze.com/docs/cloud-storage-lifecycle-rules). + files and "hides" the older versions. Thus, to free space occupied by hidden files, + it is **recommended** to use the B2 lifecycle "Keep only the last version of the file". + The previous version of the file is "hidden" for one day and then deleted automatically + by B2. More details at the [Backblaze documentation](https://www.backblaze.com/docs/cloud-storage-lifecycle-rules). Restic can backup data to any Backblaze B2 bucket. You need to first setup the following environment variables with the credentials you can find in the From 34fe73ea4299604af46cb685c9feb8f7f9065723 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 7 Sep 2024 16:37:26 +0200 Subject: [PATCH 683/893] fs: retry preallocate on Linux if interrupted by signal --- internal/fs/preallocate_linux.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/internal/fs/preallocate_linux.go b/internal/fs/preallocate_linux.go index 30b9e46441c..7b044950796 100644 --- a/internal/fs/preallocate_linux.go +++ b/internal/fs/preallocate_linux.go @@ -2,6 +2,7 @@ package fs import ( "os" + "syscall" "golang.org/x/sys/unix" ) @@ -12,5 +13,17 @@ func PreallocateFile(wr *os.File, size int64) error { } // int fallocate(int fd, int mode, off_t offset, off_t len) // use mode = 0 to also change the file size - return unix.Fallocate(int(wr.Fd()), 0, 0, size) + return ignoringEINTR(func() error { return unix.Fallocate(int(wr.Fd()), 0, 0, size) }) +} + +// ignoringEINTR makes a function call and repeats it if it returns +// an EINTR error. +// copied from /usr/lib/go/src/internal/poll/fd_posix.go of go 1.23.1 +func ignoringEINTR(fn func() error) error { + for { + err := fn() + if err != syscall.EINTR { + return err + } + } } From 6f08dbb2d7741d5f5967d5604199b2d3c78fa4f3 Mon Sep 17 00:00:00 2001 From: Roman Inflianskas Date: Fri, 13 Sep 2024 12:22:53 +0300 Subject: [PATCH 684/893] list: add subcommand completion --- cmd/restic/cmd_list.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index f6c07d66f51..6150f37a0c2 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -2,6 +2,7 @@ package main import ( "context" + "strings" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository/index" @@ -10,8 +11,11 @@ import ( "github.com/spf13/cobra" ) +var listAllowedArgs = []string{"blobs", "packs", "index", "snapshots", "keys", "locks"} +var listAllowedArgsUseString = strings.Join(listAllowedArgs, "|") + var cmdList = &cobra.Command{ - Use: "list [flags] [blobs|packs|index|snapshots|keys|locks]", + Use: "list [flags] [" + listAllowedArgsUseString + "]", Short: "List objects in the repository", Long: ` The "list" command allows listing objects in the repository based on type. @@ -30,6 +34,7 @@ Exit status is 12 if the password is incorrect. RunE: func(cmd *cobra.Command, args []string) error { return runList(cmd.Context(), globalOptions, args) }, + ValidArgs: listAllowedArgs, } func init() { From a84e65b7f95390e438c04eb86b594eca3671ed3a Mon Sep 17 00:00:00 2001 From: Roman Inflianskas Date: Fri, 13 Sep 2024 12:23:26 +0300 Subject: [PATCH 685/893] list: validate subcommand --- cmd/restic/cmd_list.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 6150f37a0c2..d66cddc4f62 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -35,6 +35,7 @@ Exit status is 12 if the password is incorrect. return runList(cmd.Context(), globalOptions, args) }, ValidArgs: listAllowedArgs, + Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), } func init() { From 4795143d6d76ee3d292afbbb7b6dfee16f807ee9 Mon Sep 17 00:00:00 2001 From: Damien Clark Date: Tue, 10 Sep 2024 17:14:07 +1000 Subject: [PATCH 686/893] cache: fix race condition in cache cleanup Fix multiple restic processes executing concurrently and racing to remove obsolete snapshots. Co-authored-by: Michael Eischer --- changelog/unreleased/pull-5047 | 7 +++++++ internal/backend/cache/file.go | 4 ++++ 2 files changed, 11 insertions(+) create mode 100644 changelog/unreleased/pull-5047 diff --git a/changelog/unreleased/pull-5047 b/changelog/unreleased/pull-5047 new file mode 100644 index 00000000000..ee50c6ec752 --- /dev/null +++ b/changelog/unreleased/pull-5047 @@ -0,0 +1,7 @@ +Bugfix: Fix possible error on concurrent cache cleanup + +Fix for multiple restic processes executing concurrently and racing to +remove obsolete snapshots from the local backend cache. Restic now suppresses the `no +such file or directory` error. + +https://github.com/restic/restic/pull/5047 diff --git a/internal/backend/cache/file.go b/internal/backend/cache/file.go index 41fd0b49be5..062d6ea3f64 100644 --- a/internal/backend/cache/file.go +++ b/internal/backend/cache/file.go @@ -210,6 +210,10 @@ func (c *Cache) list(t restic.FileType) (restic.IDSet, error) { dir := filepath.Join(c.path, cacheLayoutPaths[t]) err := filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error { if err != nil { + // ignore ErrNotExist to gracefully handle multiple processes clearing the cache + if errors.Is(err, os.ErrNotExist) { + return nil + } return errors.Wrap(err, "Walk") } From 6ec2b62ec5fc0059f88f35d525a70929ab41d976 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 9 Sep 2024 22:15:30 +0200 Subject: [PATCH 687/893] fuse: cache fs.Node instances A particular node should always be represented by a single instance. This is necessary to allow the fuse library to assign a stable nodeId to a node. macOS Sonoma trips over the previous, unstable behavior when using fuse-t. --- internal/fuse/dir.go | 43 +++++++++++++++++++--------------- internal/fuse/snapshots_dir.go | 14 +++++++---- internal/fuse/tree_cache.go | 38 ++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+), 24 deletions(-) create mode 100644 internal/fuse/tree_cache.go diff --git a/internal/fuse/dir.go b/internal/fuse/dir.go index 62298cf249d..330a6885747 100644 --- a/internal/fuse/dir.go +++ b/internal/fuse/dir.go @@ -29,6 +29,7 @@ type dir struct { parentInode uint64 node *restic.Node m sync.Mutex + cache treeCache } func cleanupNodeName(name string) string { @@ -43,6 +44,7 @@ func newDir(root *Root, inode, parentInode uint64, node *restic.Node) (*dir, err node: node, inode: inode, parentInode: parentInode, + cache: *newTreeCache(), }, nil } @@ -87,6 +89,7 @@ func newDirFromSnapshot(root *Root, inode uint64, snapshot *restic.Snapshot) (*d Subtree: snapshot.Tree, }, inode: inode, + cache: *newTreeCache(), }, nil } @@ -208,25 +211,27 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { return nil, err } - node, ok := d.items[name] - if !ok { - debug.Log(" Lookup(%v) -> not found", name) - return nil, syscall.ENOENT - } - inode := inodeFromNode(d.inode, node) - switch node.Type { - case restic.NodeTypeDir: - return newDir(d.root, inode, d.inode, node) - case restic.NodeTypeFile: - return newFile(d.root, inode, node) - case restic.NodeTypeSymlink: - return newLink(d.root, inode, node) - case restic.NodeTypeDev, restic.NodeTypeCharDev, restic.NodeTypeFifo, restic.NodeTypeSocket: - return newOther(d.root, inode, node) - default: - debug.Log(" node %v has unknown type %v", name, node.Type) - return nil, syscall.ENOENT - } + return d.cache.lookupOrCreate(name, func() (fs.Node, error) { + node, ok := d.items[name] + if !ok { + debug.Log(" Lookup(%v) -> not found", name) + return nil, syscall.ENOENT + } + inode := inodeFromNode(d.inode, node) + switch node.Type { + case restic.NodeTypeDir: + return newDir(d.root, inode, d.inode, node) + case restic.NodeTypeFile: + return newFile(d.root, inode, node) + case restic.NodeTypeSymlink: + return newLink(d.root, inode, node) + case restic.NodeTypeDev, restic.NodeTypeCharDev, restic.NodeTypeFifo, restic.NodeTypeSocket: + return newOther(d.root, inode, node) + default: + debug.Log(" node %v has unknown type %v", name, node.Type) + return nil, syscall.ENOENT + } + }) } func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { diff --git a/internal/fuse/snapshots_dir.go b/internal/fuse/snapshots_dir.go index 4cae7106c47..cfe1f782a26 100644 --- a/internal/fuse/snapshots_dir.go +++ b/internal/fuse/snapshots_dir.go @@ -23,6 +23,7 @@ type SnapshotsDir struct { parentInode uint64 dirStruct *SnapshotsDirStructure prefix string + cache treeCache } // ensure that *SnapshotsDir implements these interfaces @@ -38,6 +39,7 @@ func NewSnapshotsDir(root *Root, inode, parentInode uint64, dirStruct *Snapshots parentInode: parentInode, dirStruct: dirStruct, prefix: prefix, + cache: *newTreeCache(), } } @@ -107,8 +109,12 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) return nil, syscall.ENOENT } - entry := meta.names[name] - if entry != nil { + return d.cache.lookupOrCreate(name, func() (fs.Node, error) { + entry := meta.names[name] + if entry == nil { + return nil, syscall.ENOENT + } + inode := inodeFromName(d.inode, name) if entry.linkTarget != "" { return newSnapshotLink(d.root, inode, entry.linkTarget, entry.snapshot) @@ -116,9 +122,7 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) return newDirFromSnapshot(d.root, inode, entry.snapshot) } return NewSnapshotsDir(d.root, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil - } - - return nil, syscall.ENOENT + }) } // SnapshotLink diff --git a/internal/fuse/tree_cache.go b/internal/fuse/tree_cache.go new file mode 100644 index 00000000000..addc54a469d --- /dev/null +++ b/internal/fuse/tree_cache.go @@ -0,0 +1,38 @@ +//go:build darwin || freebsd || linux +// +build darwin freebsd linux + +package fuse + +import ( + "sync" + + "github.com/anacrolix/fuse/fs" +) + +type treeCache struct { + nodes map[string]fs.Node + m sync.Mutex +} + +func newTreeCache() *treeCache { + return &treeCache{ + nodes: map[string]fs.Node{}, + } +} + +func (t *treeCache) lookupOrCreate(name string, create func() (fs.Node, error)) (fs.Node, error) { + t.m.Lock() + defer t.m.Unlock() + + if node, ok := t.nodes[name]; ok { + return node, nil + } + + node, err := create() + if err != nil { + return nil, err + } + + t.nodes[name] = node + return node, nil +} From e9940f39dc506fb568a637cde106caf499c2b10c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 9 Sep 2024 22:37:08 +0200 Subject: [PATCH 688/893] fuse: add missing type assertion for optional interfaces --- internal/fuse/dir.go | 2 ++ internal/fuse/link.go | 2 ++ internal/fuse/other.go | 4 ++++ 3 files changed, 8 insertions(+) diff --git a/internal/fuse/dir.go b/internal/fuse/dir.go index 330a6885747..c562ef8c3ea 100644 --- a/internal/fuse/dir.go +++ b/internal/fuse/dir.go @@ -20,6 +20,8 @@ import ( // Statically ensure that *dir implement those interface var _ = fs.HandleReadDirAller(&dir{}) +var _ = fs.NodeGetxattrer(&dir{}) +var _ = fs.NodeListxattrer(&dir{}) var _ = fs.NodeStringLookuper(&dir{}) type dir struct { diff --git a/internal/fuse/link.go b/internal/fuse/link.go index 3aea8b06e91..975e640ea1e 100644 --- a/internal/fuse/link.go +++ b/internal/fuse/link.go @@ -12,6 +12,8 @@ import ( ) // Statically ensure that *link implements the given interface +var _ = fs.NodeGetxattrer(&link{}) +var _ = fs.NodeListxattrer(&link{}) var _ = fs.NodeReadlinker(&link{}) type link struct { diff --git a/internal/fuse/other.go b/internal/fuse/other.go index f536de5c1be..d459d0efdbe 100644 --- a/internal/fuse/other.go +++ b/internal/fuse/other.go @@ -7,9 +7,13 @@ import ( "context" "github.com/anacrolix/fuse" + "github.com/anacrolix/fuse/fs" "github.com/restic/restic/internal/restic" ) +// Statically ensure that *other implements the given interface +var _ = fs.NodeReadlinker(&other{}) + type other struct { root *Root node *restic.Node From 51173c5003daf40eb4b2612e6d012ca4545fafc6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 9 Sep 2024 22:37:51 +0200 Subject: [PATCH 689/893] fuse: forget fs.Node instances on request by the kernel Forget fs.Node instances once the kernel frees the corresponding nodeId. This ensures that restic does not run out of memory on large snapshots. --- internal/fuse/dir.go | 24 ++++++++++++++++-------- internal/fuse/file.go | 23 +++++++++++++++-------- internal/fuse/fuse_test.go | 6 +++--- internal/fuse/link.go | 16 +++++++++++----- internal/fuse/other.go | 16 +++++++++++----- internal/fuse/root.go | 2 +- internal/fuse/snapshots_dir.go | 27 ++++++++++++++++++++------- internal/fuse/tree_cache.go | 11 +++++++++-- 8 files changed, 86 insertions(+), 39 deletions(-) diff --git a/internal/fuse/dir.go b/internal/fuse/dir.go index c562ef8c3ea..a0317a757dc 100644 --- a/internal/fuse/dir.go +++ b/internal/fuse/dir.go @@ -20,12 +20,14 @@ import ( // Statically ensure that *dir implement those interface var _ = fs.HandleReadDirAller(&dir{}) +var _ = fs.NodeForgetter(&dir{}) var _ = fs.NodeGetxattrer(&dir{}) var _ = fs.NodeListxattrer(&dir{}) var _ = fs.NodeStringLookuper(&dir{}) type dir struct { root *Root + forget forgetFn items map[string]*restic.Node inode uint64 parentInode uint64 @@ -38,11 +40,12 @@ func cleanupNodeName(name string) string { return filepath.Base(name) } -func newDir(root *Root, inode, parentInode uint64, node *restic.Node) (*dir, error) { +func newDir(root *Root, forget forgetFn, inode, parentInode uint64, node *restic.Node) (*dir, error) { debug.Log("new dir for %v (%v)", node.Name, node.Subtree) return &dir{ root: root, + forget: forget, node: node, inode: inode, parentInode: parentInode, @@ -79,10 +82,11 @@ func replaceSpecialNodes(ctx context.Context, repo restic.BlobLoader, node *rest return tree.Nodes, nil } -func newDirFromSnapshot(root *Root, inode uint64, snapshot *restic.Snapshot) (*dir, error) { +func newDirFromSnapshot(root *Root, forget forgetFn, inode uint64, snapshot *restic.Snapshot) (*dir, error) { debug.Log("new dir for snapshot %v (%v)", snapshot.ID(), snapshot.Tree) return &dir{ - root: root, + root: root, + forget: forget, node: &restic.Node{ AccessTime: snapshot.Time, ModTime: snapshot.Time, @@ -213,7 +217,7 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { return nil, err } - return d.cache.lookupOrCreate(name, func() (fs.Node, error) { + return d.cache.lookupOrCreate(name, func(forget forgetFn) (fs.Node, error) { node, ok := d.items[name] if !ok { debug.Log(" Lookup(%v) -> not found", name) @@ -222,13 +226,13 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { inode := inodeFromNode(d.inode, node) switch node.Type { case restic.NodeTypeDir: - return newDir(d.root, inode, d.inode, node) + return newDir(d.root, forget, inode, d.inode, node) case restic.NodeTypeFile: - return newFile(d.root, inode, node) + return newFile(d.root, forget, inode, node) case restic.NodeTypeSymlink: - return newLink(d.root, inode, node) + return newLink(d.root, forget, inode, node) case restic.NodeTypeDev, restic.NodeTypeCharDev, restic.NodeTypeFifo, restic.NodeTypeSocket: - return newOther(d.root, inode, node) + return newOther(d.root, forget, inode, node) default: debug.Log(" node %v has unknown type %v", name, node.Type) return nil, syscall.ENOENT @@ -244,3 +248,7 @@ func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fus func (d *dir) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return nodeGetXattr(d.node, req, resp) } + +func (d *dir) Forget() { + d.forget() +} diff --git a/internal/fuse/file.go b/internal/fuse/file.go index 494fca283ba..a69471f8372 100644 --- a/internal/fuse/file.go +++ b/internal/fuse/file.go @@ -20,14 +20,16 @@ const blockSize = 512 // Statically ensure that *file and *openFile implement the given interfaces var _ = fs.HandleReader(&openFile{}) -var _ = fs.NodeListxattrer(&file{}) +var _ = fs.NodeForgetter(&file{}) var _ = fs.NodeGetxattrer(&file{}) +var _ = fs.NodeListxattrer(&file{}) var _ = fs.NodeOpener(&file{}) type file struct { - root *Root - node *restic.Node - inode uint64 + root *Root + forget forgetFn + node *restic.Node + inode uint64 } type openFile struct { @@ -36,12 +38,13 @@ type openFile struct { cumsize []uint64 } -func newFile(root *Root, inode uint64, node *restic.Node) (fusefile *file, err error) { +func newFile(root *Root, forget forgetFn, inode uint64, node *restic.Node) (fusefile *file, err error) { debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content)) return &file{ - inode: inode, - root: root, - node: node, + inode: inode, + forget: forget, + root: root, + node: node, }, nil } @@ -172,3 +175,7 @@ func (f *file) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fu func (f *file) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return nodeGetXattr(f.node, req, resp) } + +func (f *file) Forget() { + f.forget() +} diff --git a/internal/fuse/fuse_test.go b/internal/fuse/fuse_test.go index bbdfe6b3ff6..b31dd155d3b 100644 --- a/internal/fuse/fuse_test.go +++ b/internal/fuse/fuse_test.go @@ -119,7 +119,7 @@ func TestFuseFile(t *testing.T) { root := &Root{repo: repo, blobCache: bloblru.New(blobCacheSize)} inode := inodeFromNode(1, node) - f, err := newFile(root, inode, node) + f, err := newFile(root, func() {}, inode, node) rtest.OK(t, err) of, err := f.Open(context.TODO(), nil, nil) rtest.OK(t, err) @@ -162,7 +162,7 @@ func TestFuseDir(t *testing.T) { } parentInode := inodeFromName(0, "parent") inode := inodeFromName(1, "foo") - d, err := newDir(root, inode, parentInode, node) + d, err := newDir(root, func() {}, inode, parentInode, node) rtest.OK(t, err) // don't open the directory as that would require setting up a proper tree blob @@ -276,7 +276,7 @@ func TestLink(t *testing.T) { {Name: "foo", Value: []byte("bar")}, }} - lnk, err := newLink(&Root{}, 42, node) + lnk, err := newLink(&Root{}, func() {}, 42, node) rtest.OK(t, err) target, err := lnk.Readlink(context.TODO(), nil) rtest.OK(t, err) diff --git a/internal/fuse/link.go b/internal/fuse/link.go index 975e640ea1e..f8bf8d3ee11 100644 --- a/internal/fuse/link.go +++ b/internal/fuse/link.go @@ -12,18 +12,20 @@ import ( ) // Statically ensure that *link implements the given interface +var _ = fs.NodeForgetter(&link{}) var _ = fs.NodeGetxattrer(&link{}) var _ = fs.NodeListxattrer(&link{}) var _ = fs.NodeReadlinker(&link{}) type link struct { - root *Root - node *restic.Node - inode uint64 + root *Root + forget forgetFn + node *restic.Node + inode uint64 } -func newLink(root *Root, inode uint64, node *restic.Node) (*link, error) { - return &link{root: root, inode: inode, node: node}, nil +func newLink(root *Root, forget forgetFn, inode uint64, node *restic.Node) (*link, error) { + return &link{root: root, forget: forget, inode: inode, node: node}, nil } func (l *link) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) { @@ -57,3 +59,7 @@ func (l *link) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fu func (l *link) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return nodeGetXattr(l.node, req, resp) } + +func (l *link) Forget() { + l.forget() +} diff --git a/internal/fuse/other.go b/internal/fuse/other.go index d459d0efdbe..cbd9667ccf1 100644 --- a/internal/fuse/other.go +++ b/internal/fuse/other.go @@ -12,16 +12,18 @@ import ( ) // Statically ensure that *other implements the given interface +var _ = fs.NodeForgetter(&other{}) var _ = fs.NodeReadlinker(&other{}) type other struct { - root *Root - node *restic.Node - inode uint64 + root *Root + forget forgetFn + node *restic.Node + inode uint64 } -func newOther(root *Root, inode uint64, node *restic.Node) (*other, error) { - return &other{root: root, inode: inode, node: node}, nil +func newOther(root *Root, forget forgetFn, inode uint64, node *restic.Node) (*other, error) { + return &other{root: root, forget: forget, inode: inode, node: node}, nil } func (l *other) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) { @@ -44,3 +46,7 @@ func (l *other) Attr(_ context.Context, a *fuse.Attr) error { return nil } + +func (l *other) Forget() { + l.forget() +} diff --git a/internal/fuse/root.go b/internal/fuse/root.go index ab6116f0dcc..72a0634fca5 100644 --- a/internal/fuse/root.go +++ b/internal/fuse/root.go @@ -66,7 +66,7 @@ func NewRoot(repo restic.Repository, cfg Config) *Root { } } - root.SnapshotsDir = NewSnapshotsDir(root, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), "") + root.SnapshotsDir = NewSnapshotsDir(root, func() {}, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), "") return root } diff --git a/internal/fuse/snapshots_dir.go b/internal/fuse/snapshots_dir.go index cfe1f782a26..bcab160849e 100644 --- a/internal/fuse/snapshots_dir.go +++ b/internal/fuse/snapshots_dir.go @@ -19,6 +19,7 @@ import ( // It uses the saved prefix to select the corresponding MetaDirData. type SnapshotsDir struct { root *Root + forget forgetFn inode uint64 parentInode uint64 dirStruct *SnapshotsDirStructure @@ -28,13 +29,15 @@ type SnapshotsDir struct { // ensure that *SnapshotsDir implements these interfaces var _ = fs.HandleReadDirAller(&SnapshotsDir{}) +var _ = fs.NodeForgetter(&SnapshotsDir{}) var _ = fs.NodeStringLookuper(&SnapshotsDir{}) // NewSnapshotsDir returns a new directory structure containing snapshots and "latest" links -func NewSnapshotsDir(root *Root, inode, parentInode uint64, dirStruct *SnapshotsDirStructure, prefix string) *SnapshotsDir { +func NewSnapshotsDir(root *Root, forget forgetFn, inode, parentInode uint64, dirStruct *SnapshotsDirStructure, prefix string) *SnapshotsDir { debug.Log("create snapshots dir, inode %d", inode) return &SnapshotsDir{ root: root, + forget: forget, inode: inode, parentInode: parentInode, dirStruct: dirStruct, @@ -109,7 +112,7 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) return nil, syscall.ENOENT } - return d.cache.lookupOrCreate(name, func() (fs.Node, error) { + return d.cache.lookupOrCreate(name, func(forget forgetFn) (fs.Node, error) { entry := meta.names[name] if entry == nil { return nil, syscall.ENOENT @@ -117,27 +120,33 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) inode := inodeFromName(d.inode, name) if entry.linkTarget != "" { - return newSnapshotLink(d.root, inode, entry.linkTarget, entry.snapshot) + return newSnapshotLink(d.root, forget, inode, entry.linkTarget, entry.snapshot) } else if entry.snapshot != nil { - return newDirFromSnapshot(d.root, inode, entry.snapshot) + return newDirFromSnapshot(d.root, forget, inode, entry.snapshot) } - return NewSnapshotsDir(d.root, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil + return NewSnapshotsDir(d.root, forget, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil }) } +func (d *SnapshotsDir) Forget() { + d.forget() +} + // SnapshotLink type snapshotLink struct { root *Root + forget forgetFn inode uint64 target string snapshot *restic.Snapshot } +var _ = fs.NodeForgetter(&snapshotLink{}) var _ = fs.NodeReadlinker(&snapshotLink{}) // newSnapshotLink -func newSnapshotLink(root *Root, inode uint64, target string, snapshot *restic.Snapshot) (*snapshotLink, error) { - return &snapshotLink{root: root, inode: inode, target: target, snapshot: snapshot}, nil +func newSnapshotLink(root *Root, forget forgetFn, inode uint64, target string, snapshot *restic.Snapshot) (*snapshotLink, error) { + return &snapshotLink{root: root, forget: forget, inode: inode, target: target, snapshot: snapshot}, nil } // Readlink @@ -161,3 +170,7 @@ func (l *snapshotLink) Attr(_ context.Context, a *fuse.Attr) error { return nil } + +func (l *snapshotLink) Forget() { + l.forget() +} diff --git a/internal/fuse/tree_cache.go b/internal/fuse/tree_cache.go index addc54a469d..d913f9b81b3 100644 --- a/internal/fuse/tree_cache.go +++ b/internal/fuse/tree_cache.go @@ -14,13 +14,15 @@ type treeCache struct { m sync.Mutex } +type forgetFn func() + func newTreeCache() *treeCache { return &treeCache{ nodes: map[string]fs.Node{}, } } -func (t *treeCache) lookupOrCreate(name string, create func() (fs.Node, error)) (fs.Node, error) { +func (t *treeCache) lookupOrCreate(name string, create func(forget forgetFn) (fs.Node, error)) (fs.Node, error) { t.m.Lock() defer t.m.Unlock() @@ -28,7 +30,12 @@ func (t *treeCache) lookupOrCreate(name string, create func() (fs.Node, error)) return node, nil } - node, err := create() + node, err := create(func() { + t.m.Lock() + defer t.m.Unlock() + + delete(t.nodes, name) + }) if err != nil { return nil, err } From 5ce6ca2219200cb2299bcf53753ccff922da2c3a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 11 Sep 2024 21:31:05 +0200 Subject: [PATCH 690/893] fuse: test that the same fs.Node is used for the same file --- internal/fuse/fuse_test.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/internal/fuse/fuse_test.go b/internal/fuse/fuse_test.go index b31dd155d3b..3c0648bc6c9 100644 --- a/internal/fuse/fuse_test.go +++ b/internal/fuse/fuse_test.go @@ -217,6 +217,34 @@ func testTopUIDGID(t *testing.T, cfg Config, repo restic.Repository, uid, gid ui rtest.Equals(t, uint32(0), attr.Gid) } +// The Lookup method must return the same Node object unless it was forgotten in the meantime +func testStableLookup(t *testing.T, node fs.Node, path string) fs.Node { + t.Helper() + result, err := node.(fs.NodeStringLookuper).Lookup(context.TODO(), path) + rtest.OK(t, err) + result2, err := node.(fs.NodeStringLookuper).Lookup(context.TODO(), path) + rtest.OK(t, err) + rtest.Assert(t, result == result2, "%v are not the same object", path) + + result2.(fs.NodeForgetter).Forget() + result2, err = node.(fs.NodeStringLookuper).Lookup(context.TODO(), path) + rtest.OK(t, err) + rtest.Assert(t, result != result2, "object for %v should change after forget", path) + return result +} + +func TestStableNodeObjects(t *testing.T) { + repo := repository.TestRepository(t) + restic.TestCreateSnapshot(t, repo, time.Unix(1460289341, 207401672), 2) + root := NewRoot(repo, Config{}) + + idsdir := testStableLookup(t, root, "ids") + snapID := loadFirstSnapshot(t, repo).ID().Str() + snapshotdir := testStableLookup(t, idsdir, snapID) + dir := testStableLookup(t, snapshotdir, "dir-0") + testStableLookup(t, dir, "file-2") +} + // Test reporting of fuse.Attr.Blocks in multiples of 512. func TestBlocks(t *testing.T) { root := &Root{} From ccf5be235a663d0d80a997b3cd54f33469eb9d7f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 11 Sep 2024 21:39:35 +0200 Subject: [PATCH 691/893] add changelog for fuse fix --- changelog/unreleased/issue-4971 | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 changelog/unreleased/issue-4971 diff --git a/changelog/unreleased/issue-4971 b/changelog/unreleased/issue-4971 new file mode 100644 index 00000000000..9ab529408a1 --- /dev/null +++ b/changelog/unreleased/issue-4971 @@ -0,0 +1,9 @@ +Bugfix: Fix unusable `mount` on macOS Sonoma + +On macOS Sonoma when using fuse-t, it was not possible to access files in +a mounted repository. + +This issue has been resolved. + +https://github.com/restic/restic/issues/4971 +https://github.com/restic/restic/pull/5048 From cab6b15603f3bb08eabfa46ceabae0be614baf6f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 14 Sep 2024 18:25:29 +0200 Subject: [PATCH 692/893] tag: fix swallowed error if repository cannot be opened --- changelog/unreleased/issue-5050 | 7 +++++++ cmd/restic/cmd_tag.go | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/issue-5050 diff --git a/changelog/unreleased/issue-5050 b/changelog/unreleased/issue-5050 new file mode 100644 index 00000000000..9604fc857dd --- /dev/null +++ b/changelog/unreleased/issue-5050 @@ -0,0 +1,7 @@ +Bugfix: Missing error if `tag` fails to lock repository + +Since restic 0.17.0, the `tag` command did not return an error if it failed to +open or lock the repository. This has been fixed. + +https://github.com/restic/restic/issues/5050 +https://github.com/restic/restic/pull/5056 diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index c7bf725e92d..8a2a83678c7 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -110,7 +110,7 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st Verbosef("create exclusive lock for repository\n") ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { - return nil + return err } defer unlock() From 7680f482588354ec343c26bed79cfacd7b8af74f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 14 Sep 2024 20:54:27 +0200 Subject: [PATCH 693/893] Changelogs should omit problem if it duplicates the new behavior When adding a new feature, the problem description often just says that feature Y was missing, followed by saying that feature Y is now supported. This duplication just makes the changelog entries unnecessarily verbose. --- changelog/TEMPLATE | 2 ++ 1 file changed, 2 insertions(+) diff --git a/changelog/TEMPLATE b/changelog/TEMPLATE index 9304359b348..7d6065e046a 100644 --- a/changelog/TEMPLATE +++ b/changelog/TEMPLATE @@ -5,6 +5,8 @@ Enhancement: Allow custom bar in the foo command # Describe the problem in the past tense, the new behavior in the present # tense. Mention the affected commands, backends, operating systems, etc. +# If the problem description just says that a feature was missing, then +# only explain the new behavior. # Focus on user-facing behavior, not the implementation. # Use "Restic now ..." instead of "We have changed ...". From bad6c54a338ce5fc986c72d4162d5d88108673cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Phillipp=20R=C3=B6ll?= Date: Fri, 13 Sep 2024 18:21:07 +0200 Subject: [PATCH 694/893] dump: add --compress-zip flag to compress zip archives --- changelog/unreleased/pull-5054 | 10 ++++++++++ cmd/restic/cmd_dump.go | 8 +++++--- internal/dump/common.go | 20 +++++++++++--------- internal/dump/common_test.go | 4 ++-- internal/dump/tar.go | 13 ++++++++++++- internal/dump/tar_test.go | 3 ++- internal/dump/zip.go | 4 ++++ internal/dump/zip_test.go | 3 ++- 8 files changed, 48 insertions(+), 17 deletions(-) create mode 100644 changelog/unreleased/pull-5054 diff --git a/changelog/unreleased/pull-5054 b/changelog/unreleased/pull-5054 new file mode 100644 index 00000000000..7b5291e2d7f --- /dev/null +++ b/changelog/unreleased/pull-5054 @@ -0,0 +1,10 @@ +Enhancement: Add `--compress` flag to `dump` command to compress archive + +Restic did not compress the archives that was created by using the +`dump` command. It now allows to save some disk space when exporting +archives by adding a `--compress` flag. The DEFLATE algorithm is used +for "zip" archives, and the gzip algorithm for "tar" archives, +resulting in a .tar.gz or .tgz file. Not compressing the archive +is still the default. + +https://github.com/restic/restic/pull/5054 diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 6b7f8d01285..23c50263559 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -50,8 +50,9 @@ Exit status is 12 if the password is incorrect. // DumpOptions collects all options for the dump command. type DumpOptions struct { restic.SnapshotFilter - Archive string - Target string + Archive string + Target string + Compress bool } var dumpOptions DumpOptions @@ -63,6 +64,7 @@ func init() { initSingleSnapshotFilter(flags, &dumpOptions.SnapshotFilter) flags.StringVarP(&dumpOptions.Archive, "archive", "a", "tar", "set archive `format` as \"tar\" or \"zip\"") flags.StringVarP(&dumpOptions.Target, "target", "t", "", "write the output to target `path`") + flags.BoolVarP(&dumpOptions.Compress, "compress", "c", false, "compress archive contents. When enabled, the DEFLATE algorithm is applied for \"zip\" archives, and the gzip algorithm for \"tar\" archives, resulting in a .tar.gz or .tgz file. (default: false)") } func splitPath(p string) []string { @@ -187,7 +189,7 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args [] canWriteArchiveFunc = func() error { return nil } } - d := dump.New(opts.Archive, repo, outputFileWriter) + d := dump.New(opts.Archive, opts.Compress, repo, outputFileWriter) err = printFromTree(ctx, tree, repo, "/", splittedPath, d, canWriteArchiveFunc) if err != nil { return errors.Fatalf("cannot dump file: %v", err) diff --git a/internal/dump/common.go b/internal/dump/common.go index 4bc404fe0e5..619e946e9e4 100644 --- a/internal/dump/common.go +++ b/internal/dump/common.go @@ -15,18 +15,20 @@ import ( // A Dumper writes trees and files from a repository to a Writer // in an archive format. type Dumper struct { - cache *bloblru.Cache - format string - repo restic.Loader - w io.Writer + cache *bloblru.Cache + format string + repo restic.Loader + w io.Writer + compress bool } -func New(format string, repo restic.Loader, w io.Writer) *Dumper { +func New(format string, compress bool, repo restic.Loader, w io.Writer) *Dumper { return &Dumper{ - cache: bloblru.New(64 << 20), - format: format, - repo: repo, - w: w, + cache: bloblru.New(64 << 20), + format: format, + repo: repo, + w: w, + compress: compress, } } diff --git a/internal/dump/common_test.go b/internal/dump/common_test.go index afd19df6372..f581e31cb7d 100644 --- a/internal/dump/common_test.go +++ b/internal/dump/common_test.go @@ -23,7 +23,7 @@ func prepareTempdirRepoSrc(t testing.TB, src archiver.TestDir) (string, restic.R type CheckDump func(t *testing.T, testDir string, testDump *bytes.Buffer) error -func WriteTest(t *testing.T, format string, cd CheckDump) { +func WriteTest(t *testing.T, format string, compress bool, cd CheckDump) { tests := []struct { name string args archiver.TestDir @@ -85,7 +85,7 @@ func WriteTest(t *testing.T, format string, cd CheckDump) { rtest.OK(t, err) dst := &bytes.Buffer{} - d := New(format, repo, dst) + d := New(format, compress, repo, dst) if err := d.DumpTree(ctx, tree, tt.target); err != nil { t.Fatalf("Dumper.Run error = %v", err) } diff --git a/internal/dump/tar.go b/internal/dump/tar.go index c5933d4f8ac..0358fba5d5d 100644 --- a/internal/dump/tar.go +++ b/internal/dump/tar.go @@ -2,6 +2,7 @@ package dump import ( "archive/tar" + "compress/gzip" "context" "fmt" "os" @@ -13,12 +14,22 @@ import ( ) func (d *Dumper) dumpTar(ctx context.Context, ch <-chan *restic.Node) (err error) { - w := tar.NewWriter(d.w) + outer := d.w + + if d.compress { + outer = gzip.NewWriter(outer) + } + w := tar.NewWriter(outer) defer func() { if err == nil { err = w.Close() err = errors.Wrap(err, "Close") + + if gz, ok := outer.(*gzip.Writer); ok { + err = gz.Close() + err = errors.Wrap(err, "Close") + } } }() diff --git a/internal/dump/tar_test.go b/internal/dump/tar_test.go index cb3cb08c438..ae575efad34 100644 --- a/internal/dump/tar_test.go +++ b/internal/dump/tar_test.go @@ -18,7 +18,8 @@ import ( ) func TestWriteTar(t *testing.T) { - WriteTest(t, "tar", checkTar) + WriteTest(t, "tar", false, checkTar) + WriteTest(t, "tar", true, checkTar) } func checkTar(t *testing.T, testDir string, srcTar *bytes.Buffer) error { diff --git a/internal/dump/zip.go b/internal/dump/zip.go index d32475770f1..0d210eaa1e4 100644 --- a/internal/dump/zip.go +++ b/internal/dump/zip.go @@ -40,6 +40,10 @@ func (d *Dumper) dumpNodeZip(ctx context.Context, node *restic.Node, zw *zip.Wri } header.SetMode(node.Mode) + if d.compress { + header.Method = zip.Deflate + } + if node.Type == restic.NodeTypeDir { header.Name += "/" } diff --git a/internal/dump/zip_test.go b/internal/dump/zip_test.go index 6f5f60f54fa..ab955858c26 100644 --- a/internal/dump/zip_test.go +++ b/internal/dump/zip_test.go @@ -12,7 +12,8 @@ import ( ) func TestWriteZip(t *testing.T) { - WriteTest(t, "zip", checkZip) + WriteTest(t, "zip", true, checkZip) + WriteTest(t, "zip", false, checkZip) } func readZipFile(f *zip.File) ([]byte, error) { From 4469fe15753d6c6994a3b6e1e50a6dcd0d61164d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 29 Aug 2024 23:22:16 +0200 Subject: [PATCH 695/893] fs: fix restoring timestamps on Windows for long paths --- changelog/unreleased/issue-1843 | 9 +++++++++ internal/fs/node.go | 2 +- internal/fs/node_windows.go | 4 ++-- 3 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 changelog/unreleased/issue-1843 diff --git a/changelog/unreleased/issue-1843 b/changelog/unreleased/issue-1843 new file mode 100644 index 00000000000..1b891adc9c1 --- /dev/null +++ b/changelog/unreleased/issue-1843 @@ -0,0 +1,9 @@ +Bugfix: Correctly restore timestamp on long filepaths on old Windows versions + +The `restore` command did not restore timestamps on file paths longer than 256 +characters on Windows versions before Windows 10 1607. + +This issue is now resolved. + +https://github.com/restic/restic/issues/1843 +https://github.com/restic/restic/pull/5061 diff --git a/internal/fs/node.go b/internal/fs/node.go index 280e290c256..a273a9f08f8 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -315,7 +315,7 @@ func nodeRestoreTimestamps(node *restic.Node, path string) error { return nodeRestoreSymlinkTimestamps(path, utimes) } - if err := syscall.UtimesNano(path, utimes[:]); err != nil { + if err := syscall.UtimesNano(fixpath(path), utimes[:]); err != nil { return errors.Wrap(err, "UtimesNano") } diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index 9d46143cc82..836e7b5d4f1 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -202,7 +202,7 @@ func genericAttributesToWindowsAttrs(attrs map[restic.GenericAttributeType]json. // restoreCreationTime gets the creation time from the data and sets it to the file/folder at // the specified path. func restoreCreationTime(path string, creationTime *syscall.Filetime) (err error) { - pathPointer, err := syscall.UTF16PtrFromString(path) + pathPointer, err := syscall.UTF16PtrFromString(fixpath(path)) if err != nil { return err } @@ -223,7 +223,7 @@ func restoreCreationTime(path string, creationTime *syscall.Filetime) (err error // restoreFileAttributes gets the File Attributes from the data and sets them to the file/folder // at the specified path. func restoreFileAttributes(path string, fileAttributes *uint32) (err error) { - pathPointer, err := syscall.UTF16PtrFromString(path) + pathPointer, err := syscall.UTF16PtrFromString(fixpath(path)) if err != nil { return err } From 1a7fafc7ebca93f214ed0b1ac45781ee6cd9dccb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Phillipp=20R=C3=B6ll?= Date: Sun, 15 Sep 2024 21:01:23 +0200 Subject: [PATCH 696/893] dump: compress zip archives --- changelog/unreleased/pull-5054 | 11 ++++------- cmd/restic/cmd_dump.go | 8 +++----- internal/dump/common.go | 20 +++++++++----------- internal/dump/common_test.go | 4 ++-- internal/dump/tar.go | 13 +------------ internal/dump/tar_test.go | 3 +-- internal/dump/zip.go | 5 +---- internal/dump/zip_test.go | 3 +-- 8 files changed, 22 insertions(+), 45 deletions(-) diff --git a/changelog/unreleased/pull-5054 b/changelog/unreleased/pull-5054 index 7b5291e2d7f..6efd5882c44 100644 --- a/changelog/unreleased/pull-5054 +++ b/changelog/unreleased/pull-5054 @@ -1,10 +1,7 @@ -Enhancement: Add `--compress` flag to `dump` command to compress archive +Enhancement: Compress ZIP archives created by `dump` command -Restic did not compress the archives that was created by using the -`dump` command. It now allows to save some disk space when exporting -archives by adding a `--compress` flag. The DEFLATE algorithm is used -for "zip" archives, and the gzip algorithm for "tar" archives, -resulting in a .tar.gz or .tgz file. Not compressing the archive -is still the default. +Restic did not compress the archives that were created by using +the `dump` command. It now saves some disk space when exporting +archives using the DEFLATE algorithm for "zip" archives. https://github.com/restic/restic/pull/5054 diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 23c50263559..6b7f8d01285 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -50,9 +50,8 @@ Exit status is 12 if the password is incorrect. // DumpOptions collects all options for the dump command. type DumpOptions struct { restic.SnapshotFilter - Archive string - Target string - Compress bool + Archive string + Target string } var dumpOptions DumpOptions @@ -64,7 +63,6 @@ func init() { initSingleSnapshotFilter(flags, &dumpOptions.SnapshotFilter) flags.StringVarP(&dumpOptions.Archive, "archive", "a", "tar", "set archive `format` as \"tar\" or \"zip\"") flags.StringVarP(&dumpOptions.Target, "target", "t", "", "write the output to target `path`") - flags.BoolVarP(&dumpOptions.Compress, "compress", "c", false, "compress archive contents. When enabled, the DEFLATE algorithm is applied for \"zip\" archives, and the gzip algorithm for \"tar\" archives, resulting in a .tar.gz or .tgz file. (default: false)") } func splitPath(p string) []string { @@ -189,7 +187,7 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args [] canWriteArchiveFunc = func() error { return nil } } - d := dump.New(opts.Archive, opts.Compress, repo, outputFileWriter) + d := dump.New(opts.Archive, repo, outputFileWriter) err = printFromTree(ctx, tree, repo, "/", splittedPath, d, canWriteArchiveFunc) if err != nil { return errors.Fatalf("cannot dump file: %v", err) diff --git a/internal/dump/common.go b/internal/dump/common.go index 619e946e9e4..4bc404fe0e5 100644 --- a/internal/dump/common.go +++ b/internal/dump/common.go @@ -15,20 +15,18 @@ import ( // A Dumper writes trees and files from a repository to a Writer // in an archive format. type Dumper struct { - cache *bloblru.Cache - format string - repo restic.Loader - w io.Writer - compress bool + cache *bloblru.Cache + format string + repo restic.Loader + w io.Writer } -func New(format string, compress bool, repo restic.Loader, w io.Writer) *Dumper { +func New(format string, repo restic.Loader, w io.Writer) *Dumper { return &Dumper{ - cache: bloblru.New(64 << 20), - format: format, - repo: repo, - w: w, - compress: compress, + cache: bloblru.New(64 << 20), + format: format, + repo: repo, + w: w, } } diff --git a/internal/dump/common_test.go b/internal/dump/common_test.go index f581e31cb7d..afd19df6372 100644 --- a/internal/dump/common_test.go +++ b/internal/dump/common_test.go @@ -23,7 +23,7 @@ func prepareTempdirRepoSrc(t testing.TB, src archiver.TestDir) (string, restic.R type CheckDump func(t *testing.T, testDir string, testDump *bytes.Buffer) error -func WriteTest(t *testing.T, format string, compress bool, cd CheckDump) { +func WriteTest(t *testing.T, format string, cd CheckDump) { tests := []struct { name string args archiver.TestDir @@ -85,7 +85,7 @@ func WriteTest(t *testing.T, format string, compress bool, cd CheckDump) { rtest.OK(t, err) dst := &bytes.Buffer{} - d := New(format, compress, repo, dst) + d := New(format, repo, dst) if err := d.DumpTree(ctx, tree, tt.target); err != nil { t.Fatalf("Dumper.Run error = %v", err) } diff --git a/internal/dump/tar.go b/internal/dump/tar.go index 0358fba5d5d..c5933d4f8ac 100644 --- a/internal/dump/tar.go +++ b/internal/dump/tar.go @@ -2,7 +2,6 @@ package dump import ( "archive/tar" - "compress/gzip" "context" "fmt" "os" @@ -14,22 +13,12 @@ import ( ) func (d *Dumper) dumpTar(ctx context.Context, ch <-chan *restic.Node) (err error) { - outer := d.w - - if d.compress { - outer = gzip.NewWriter(outer) - } - w := tar.NewWriter(outer) + w := tar.NewWriter(d.w) defer func() { if err == nil { err = w.Close() err = errors.Wrap(err, "Close") - - if gz, ok := outer.(*gzip.Writer); ok { - err = gz.Close() - err = errors.Wrap(err, "Close") - } } }() diff --git a/internal/dump/tar_test.go b/internal/dump/tar_test.go index ae575efad34..cb3cb08c438 100644 --- a/internal/dump/tar_test.go +++ b/internal/dump/tar_test.go @@ -18,8 +18,7 @@ import ( ) func TestWriteTar(t *testing.T) { - WriteTest(t, "tar", false, checkTar) - WriteTest(t, "tar", true, checkTar) + WriteTest(t, "tar", checkTar) } func checkTar(t *testing.T, testDir string, srcTar *bytes.Buffer) error { diff --git a/internal/dump/zip.go b/internal/dump/zip.go index 0d210eaa1e4..6041c518733 100644 --- a/internal/dump/zip.go +++ b/internal/dump/zip.go @@ -39,10 +39,7 @@ func (d *Dumper) dumpNodeZip(ctx context.Context, node *restic.Node, zw *zip.Wri Modified: node.ModTime, } header.SetMode(node.Mode) - - if d.compress { - header.Method = zip.Deflate - } + header.Method = zip.Deflate if node.Type == restic.NodeTypeDir { header.Name += "/" diff --git a/internal/dump/zip_test.go b/internal/dump/zip_test.go index ab955858c26..6f5f60f54fa 100644 --- a/internal/dump/zip_test.go +++ b/internal/dump/zip_test.go @@ -12,8 +12,7 @@ import ( ) func TestWriteZip(t *testing.T) { - WriteTest(t, "zip", true, checkZip) - WriteTest(t, "zip", false, checkZip) + WriteTest(t, "zip", checkZip) } func readZipFile(f *zip.File) ([]byte, error) { From 3d976562fae4f511db7520b4ba8d43a7d9b6e69f Mon Sep 17 00:00:00 2001 From: Roman Inflianskas Date: Fri, 13 Sep 2024 15:33:49 +0300 Subject: [PATCH 697/893] generate: allow passing `-` for stdout output Since generating completions to stdout for multiple shells does not make sense, enforce `-` is supplied only once. --- changelog/unreleased/issue-2511 | 6 ++ cmd/restic/cmd_generate.go | 73 +++++++++++++-------- cmd/restic/cmd_generate_integration_test.go | 40 +++++++++++ 3 files changed, 92 insertions(+), 27 deletions(-) create mode 100644 changelog/unreleased/issue-2511 create mode 100644 cmd/restic/cmd_generate_integration_test.go diff --git a/changelog/unreleased/issue-2511 b/changelog/unreleased/issue-2511 new file mode 100644 index 00000000000..97b01ccde60 --- /dev/null +++ b/changelog/unreleased/issue-2511 @@ -0,0 +1,6 @@ +Enhancement: Allow generating shell completions to stdout + +Restic `generate` now supports passing `-` passed as file name to `--[shell]-completion` option. + +https://github.com/restic/restic/issues/2511 +https://github.com/restic/restic/pull/5053 diff --git a/cmd/restic/cmd_generate.go b/cmd/restic/cmd_generate.go index b5c7cecb589..66b3fa7c52f 100644 --- a/cmd/restic/cmd_generate.go +++ b/cmd/restic/cmd_generate.go @@ -1,6 +1,8 @@ package main import ( + "io" + "os" "time" "github.com/restic/restic/internal/errors" @@ -41,10 +43,10 @@ func init() { cmdRoot.AddCommand(cmdGenerate) fs := cmdGenerate.Flags() fs.StringVar(&genOpts.ManDir, "man", "", "write man pages to `directory`") - fs.StringVar(&genOpts.BashCompletionFile, "bash-completion", "", "write bash completion `file`") - fs.StringVar(&genOpts.FishCompletionFile, "fish-completion", "", "write fish completion `file`") - fs.StringVar(&genOpts.ZSHCompletionFile, "zsh-completion", "", "write zsh completion `file`") - fs.StringVar(&genOpts.PowerShellCompletionFile, "powershell-completion", "", "write powershell completion `file`") + fs.StringVar(&genOpts.BashCompletionFile, "bash-completion", "", "write bash completion `file` (`-` for stdout)") + fs.StringVar(&genOpts.FishCompletionFile, "fish-completion", "", "write fish completion `file` (`-` for stdout)") + fs.StringVar(&genOpts.ZSHCompletionFile, "zsh-completion", "", "write zsh completion `file` (`-` for stdout)") + fs.StringVar(&genOpts.PowerShellCompletionFile, "powershell-completion", "", "write powershell completion `file` (`-` for stdout)") } func writeManpages(dir string) error { @@ -65,32 +67,44 @@ func writeManpages(dir string) error { return doc.GenManTree(cmdRoot, header, dir) } -func writeBashCompletion(file string) error { +func writeCompletion(filename string, shell string, generate func(w io.Writer) error) (err error) { if stdoutIsTerminal() { - Verbosef("writing bash completion file to %v\n", file) + Verbosef("writing %s completion file to %v\n", shell, filename) } - return cmdRoot.GenBashCompletionFile(file) -} - -func writeFishCompletion(file string) error { - if stdoutIsTerminal() { - Verbosef("writing fish completion file to %v\n", file) + var outWriter io.Writer + if filename != "-" { + var outFile *os.File + outFile, err = os.Create(filename) + if err != nil { + return + } + defer func() { err = outFile.Close() }() + outWriter = outFile + } else { + outWriter = globalOptions.stdout } - return cmdRoot.GenFishCompletionFile(file, true) -} -func writeZSHCompletion(file string) error { - if stdoutIsTerminal() { - Verbosef("writing zsh completion file to %v\n", file) - } - return cmdRoot.GenZshCompletionFile(file) + err = generate(outWriter) + return } -func writePowerShellCompletion(file string) error { - if stdoutIsTerminal() { - Verbosef("writing powershell completion file to %v\n", file) +func checkStdoutForSingleShell(opts generateOptions) error { + completionFileOpts := []string{ + opts.BashCompletionFile, + opts.FishCompletionFile, + opts.ZSHCompletionFile, + opts.PowerShellCompletionFile, + } + seenIsStdout := false + for _, completionFileOpt := range completionFileOpts { + if completionFileOpt == "-" { + if seenIsStdout { + return errors.Fatal("the generate command can generate shell completions to stdout for single shell only") + } + seenIsStdout = true + } } - return cmdRoot.GenPowerShellCompletionFile(file) + return nil } func runGenerate(opts generateOptions, args []string) error { @@ -105,29 +119,34 @@ func runGenerate(opts generateOptions, args []string) error { } } + err := checkStdoutForSingleShell(opts) + if err != nil { + return err + } + if opts.BashCompletionFile != "" { - err := writeBashCompletion(opts.BashCompletionFile) + err := writeCompletion(opts.BashCompletionFile, "bash", cmdRoot.GenBashCompletion) if err != nil { return err } } if opts.FishCompletionFile != "" { - err := writeFishCompletion(opts.FishCompletionFile) + err := writeCompletion(opts.FishCompletionFile, "fish", func(w io.Writer) error { return cmdRoot.GenFishCompletion(w, true) }) if err != nil { return err } } if opts.ZSHCompletionFile != "" { - err := writeZSHCompletion(opts.ZSHCompletionFile) + err := writeCompletion(opts.ZSHCompletionFile, "zsh", cmdRoot.GenZshCompletion) if err != nil { return err } } if opts.PowerShellCompletionFile != "" { - err := writePowerShellCompletion(opts.PowerShellCompletionFile) + err := writeCompletion(opts.PowerShellCompletionFile, "powershell", cmdRoot.GenPowerShellCompletion) if err != nil { return err } diff --git a/cmd/restic/cmd_generate_integration_test.go b/cmd/restic/cmd_generate_integration_test.go new file mode 100644 index 00000000000..0480abc04db --- /dev/null +++ b/cmd/restic/cmd_generate_integration_test.go @@ -0,0 +1,40 @@ +package main + +import ( + "bytes" + "strings" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func TestGenerateStdout(t *testing.T) { + testCases := []struct { + name string + opts generateOptions + }{ + {"bash", generateOptions{BashCompletionFile: "-"}}, + {"fish", generateOptions{FishCompletionFile: "-"}}, + {"zsh", generateOptions{ZSHCompletionFile: "-"}}, + {"powershell", generateOptions{PowerShellCompletionFile: "-"}}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + buf := bytes.NewBuffer(nil) + globalOptions.stdout = buf + err := runGenerate(tc.opts, []string{}) + rtest.OK(t, err) + completionString := buf.String() + rtest.Assert(t, strings.Contains(completionString, "# "+tc.name+" completion for restic"), "has no expected completion header") + }) + } + + t.Run("Generate shell completions to stdout for two shells", func(t *testing.T) { + buf := bytes.NewBuffer(nil) + globalOptions.stdout = buf + opts := generateOptions{BashCompletionFile: "-", FishCompletionFile: "-"} + err := runGenerate(opts, []string{}) + rtest.Assert(t, err != nil, "generate shell completions to stdout for two shells fails") + }) +} From 8c7a6daa476c004f44251c64f48cb9e357f33202 Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Mon, 30 Sep 2024 17:24:05 +0200 Subject: [PATCH 698/893] dump: Simplify writeNode and use fewer goroutines This changes Dumper.writeNode to spawn loader goroutines as needed instead of as a pool. The code is shorter, fewer goroutines are spawned for small files, and crash dumps (also for unrelated errors) should be smaller. --- internal/dump/common.go | 80 ++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 53 deletions(-) diff --git a/internal/dump/common.go b/internal/dump/common.go index 4bc404fe0e5..b4741302e38 100644 --- a/internal/dump/common.go +++ b/internal/dump/common.go @@ -6,7 +6,6 @@ import ( "path" "github.com/restic/restic/internal/bloblru" - "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/walker" "golang.org/x/sync/errgroup" @@ -104,75 +103,50 @@ func (d *Dumper) WriteNode(ctx context.Context, node *restic.Node) error { } func (d *Dumper) writeNode(ctx context.Context, w io.Writer, node *restic.Node) error { - type loadTask struct { - id restic.ID - out chan<- []byte - } - type writeTask struct { - data <-chan []byte - } - - loaderCh := make(chan loadTask) - // per worker: allows for one blob that gets download + one blob thats queue for writing - writerCh := make(chan writeTask, d.repo.Connections()*2) - wg, ctx := errgroup.WithContext(ctx) + limit := d.repo.Connections() - 1 // See below for the -1. + blobs := make(chan (<-chan []byte), limit) wg.Go(func() error { - defer close(loaderCh) - defer close(writerCh) - for _, id := range node.Content { - // non-blocking blob handover to allow the loader to load the next blob - // while the old one is still written - ch := make(chan []byte, 1) + for ch := range blobs { select { - case loaderCh <- loadTask{id: id, out: ch}: - case <-ctx.Done(): - return ctx.Err() - } - - select { - case writerCh <- writeTask{data: ch}: case <-ctx.Done(): return ctx.Err() + case blob := <-ch: + if _, err := w.Write(blob); err != nil { + return err + } } } return nil }) - for i := uint(0); i < d.repo.Connections(); i++ { + // Start short-lived goroutines to load blobs. + // There will be at most 1+cap(blobs) calling LoadBlob at any moment. +loop: + for _, id := range node.Content { + // This needs to be buffered, so that loaders can quit + // without waiting for the writer. + ch := make(chan []byte, 1) + wg.Go(func() error { - for task := range loaderCh { - blob, err := d.cache.GetOrCompute(task.id, func() ([]byte, error) { - return d.repo.LoadBlob(ctx, restic.DataBlob, task.id, nil) - }) - if err != nil { - return err - } + blob, err := d.cache.GetOrCompute(id, func() ([]byte, error) { + return d.repo.LoadBlob(ctx, restic.DataBlob, id, nil) + }) - select { - case task.out <- blob: - case <-ctx.Done(): - return ctx.Err() - } + if err == nil { + ch <- blob } - return nil + return err }) - } - wg.Go(func() error { - for result := range writerCh { - select { - case data := <-result.data: - if _, err := w.Write(data); err != nil { - return errors.Wrap(err, "Write") - } - case <-ctx.Done(): - return ctx.Err() - } + select { + case blobs <- ch: + case <-ctx.Done(): + break loop } - return nil - }) + } + close(blobs) return wg.Wait() } From 3f08dee685529b61c614746c832860d4cc8a8c63 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 01:49:38 +0000 Subject: [PATCH 699/893] build(deps): bump golang.org/x/sys from 0.24.0 to 0.25.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.24.0 to 0.25.0. - [Commits](https://github.com/golang/sys/compare/v0.24.0...v0.25.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e6a2f0da473..4e825c9efcd 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( golang.org/x/net v0.28.0 golang.org/x/oauth2 v0.22.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.24.0 + golang.org/x/sys v0.25.0 golang.org/x/term v0.23.0 golang.org/x/text v0.17.0 golang.org/x/time v0.6.0 diff --git a/go.sum b/go.sum index bc244d8e82a..9fa1ced9f9e 100644 --- a/go.sum +++ b/go.sum @@ -276,8 +276,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From e90085b37569edc512a876a3da21dc988986fabc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 01:49:41 +0000 Subject: [PATCH 700/893] build(deps): bump go.uber.org/automaxprocs from 1.5.3 to 1.6.0 Bumps [go.uber.org/automaxprocs](https://github.com/uber-go/automaxprocs) from 1.5.3 to 1.6.0. - [Release notes](https://github.com/uber-go/automaxprocs/releases) - [Changelog](https://github.com/uber-go/automaxprocs/blob/master/CHANGELOG.md) - [Commits](https://github.com/uber-go/automaxprocs/compare/v1.5.3...v1.6.0) --- updated-dependencies: - dependency-name: go.uber.org/automaxprocs dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e6a2f0da473..249bbe28cbb 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/restic/chunker v0.4.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - go.uber.org/automaxprocs v1.5.3 + go.uber.org/automaxprocs v1.6.0 golang.org/x/crypto v0.26.0 golang.org/x/net v0.28.0 golang.org/x/oauth2 v0.22.0 diff --git a/go.sum b/go.sum index bc244d8e82a..c71c6d56f17 100644 --- a/go.sum +++ b/go.sum @@ -216,8 +216,8 @@ go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucg go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= From f3f629bb6904f7b2790371af62f9f3066e974ddf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 01:49:46 +0000 Subject: [PATCH 701/893] build(deps): bump github.com/minio/minio-go/v7 from 7.0.76 to 7.0.77 Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.76 to 7.0.77. - [Release notes](https://github.com/minio/minio-go/releases) - [Commits](https://github.com/minio/minio-go/compare/v7.0.76...v7.0.77) --- updated-dependencies: - dependency-name: github.com/minio/minio-go/v7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e6a2f0da473..802d505f408 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/klauspost/compress v1.17.9 - github.com/minio/minio-go/v7 v7.0.76 + github.com/minio/minio-go/v7 v7.0.77 github.com/ncw/swift/v2 v2.0.3 github.com/peterbourgon/unixtransport v0.0.4 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index bc244d8e82a..5097e52e034 100644 --- a/go.sum +++ b/go.sum @@ -146,8 +146,8 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.76 h1:9nxHH2XDai61cT/EFhyIw/wW4vJfpPNvl7lSFpRt+Ng= -github.com/minio/minio-go/v7 v7.0.76/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg= +github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw= +github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg= github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg= github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= From b5c28a7ba2b2dc133b40d9d13778b001531c8b2e Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Mon, 30 Sep 2024 22:43:04 +0200 Subject: [PATCH 702/893] internal/restic: Use IDSet.Clone + use maps package One place where IDSet.Clone is useful was reinventing it, using a conversion to list, a sort, and a conversion back to map. Also, use the stdlib "maps" package to implement as much of IDSet as possible. This requires changing one caller, which assumed that cloning nil would return a non-nil IDSet. --- internal/repository/index/master_index.go | 3 +++ internal/restic/idset.go | 33 +++++------------------ internal/restic/lock.go | 2 +- 3 files changed, 11 insertions(+), 27 deletions(-) diff --git a/internal/repository/index/master_index.go b/internal/repository/index/master_index.go index 2600fe350dd..ce9afcde43c 100644 --- a/internal/repository/index/master_index.go +++ b/internal/repository/index/master_index.go @@ -347,6 +347,9 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud // copy excludePacks to prevent unintended sideeffects excludePacks = excludePacks.Clone() + if excludePacks == nil { + excludePacks = restic.NewIDSet() + } debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(indexes), excludePacks) wg, wgCtx := errgroup.WithContext(ctx) diff --git a/internal/restic/idset.go b/internal/restic/idset.go index 9e6e3c6fd85..7d98b487cf6 100644 --- a/internal/restic/idset.go +++ b/internal/restic/idset.go @@ -1,6 +1,9 @@ package restic -import "sort" +import ( + "maps" + "sort" +) // IDSet is a set of IDs. type IDSet map[ID]struct{} @@ -44,28 +47,10 @@ func (s IDSet) List() IDs { } // Equals returns true iff s equals other. -func (s IDSet) Equals(other IDSet) bool { - if len(s) != len(other) { - return false - } - - for id := range s { - if _, ok := other[id]; !ok { - return false - } - } - - // length + one-way comparison is sufficient implication of equality - - return true -} +func (s IDSet) Equals(other IDSet) bool { return maps.Equal(s, other) } // Merge adds the blobs in other to the current set. -func (s IDSet) Merge(other IDSet) { - for id := range other { - s.Insert(id) - } -} +func (s IDSet) Merge(other IDSet) { maps.Copy(s, other) } // Intersect returns a new set containing the IDs that are present in both sets. func (s IDSet) Intersect(other IDSet) (result IDSet) { @@ -106,8 +91,4 @@ func (s IDSet) String() string { return "{" + str[1:len(str)-1] + "}" } -func (s IDSet) Clone() IDSet { - c := NewIDSet() - c.Merge(s) - return c -} +func (s IDSet) Clone() IDSet { return maps.Clone(s) } diff --git a/internal/restic/lock.go b/internal/restic/lock.go index 969d0593db1..8ad84091af9 100644 --- a/internal/restic/lock.go +++ b/internal/restic/lock.go @@ -187,7 +187,7 @@ func (l *Lock) checkForOtherLocks(ctx context.Context) error { // Store updates in new IDSet to prevent data races var m sync.Mutex - newCheckedIDs := NewIDSet(checkedIDs.List()...) + newCheckedIDs := checkedIDs.Clone() err = ForAllLocks(ctx, l.repo, checkedIDs, func(id ID, lock *Lock, err error) error { if err != nil { // if we cannot load a lock then it is unclear whether it can be ignored From e10e2bb50f5e869d9125c663a3599019fbf8f372 Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Thu, 3 Oct 2024 21:17:22 +0200 Subject: [PATCH 703/893] fs: Include filename in mknod errors --- internal/fs/mknod_unix.go | 14 +++++++++++--- internal/fs/node_freebsd.go | 13 ++++++++++--- internal/fs/node_unix_test.go | 11 +++++++++++ 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/internal/fs/mknod_unix.go b/internal/fs/mknod_unix.go index 6127599f72e..024c4d502d9 100644 --- a/internal/fs/mknod_unix.go +++ b/internal/fs/mknod_unix.go @@ -3,8 +3,16 @@ package fs -import "golang.org/x/sys/unix" +import ( + "os" -func mknod(path string, mode uint32, dev uint64) (err error) { - return unix.Mknod(path, mode, int(dev)) + "golang.org/x/sys/unix" +) + +func mknod(path string, mode uint32, dev uint64) error { + err := unix.Mknod(path, mode, int(dev)) + if err != nil { + err = &os.PathError{Op: "mknod", Path: path, Err: err} + } + return err } diff --git a/internal/fs/node_freebsd.go b/internal/fs/node_freebsd.go index 1b2f2fc7e94..0cbe876f124 100644 --- a/internal/fs/node_freebsd.go +++ b/internal/fs/node_freebsd.go @@ -3,12 +3,19 @@ package fs -import "syscall" +import ( + "os" + "syscall" +) func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { return nil } -func mknod(path string, mode uint32, dev uint64) (err error) { - return syscall.Mknod(path, mode, dev) +func mknod(path string, mode uint32, dev uint64) error { + err := syscall.Mknod(path, mode, dev) + if err != nil { + err = &os.PathError{Op: "mknod", Path: path, Err: err} + } + return err } diff --git a/internal/fs/node_unix_test.go b/internal/fs/node_unix_test.go index 4d01b6cc571..f38762fc792 100644 --- a/internal/fs/node_unix_test.go +++ b/internal/fs/node_unix_test.go @@ -8,9 +8,11 @@ import ( "os" "path/filepath" "runtime" + "strings" "syscall" "testing" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -134,3 +136,12 @@ func TestNodeFromFileInfo(t *testing.T) { }) } } + +func TestMknodError(t *testing.T) { + d := t.TempDir() + // Call mkfifo, which calls mknod, as mknod may give + // "operation not permitted" on Mac. + err := mkfifo(d, 0) + rtest.Assert(t, errors.Is(err, os.ErrExist), "want ErrExist, got %q", err) + rtest.Assert(t, strings.Contains(err.Error(), d), "filename not in %q", err) +} From 19653f9e06ffeede08ffd9387b39c19eabcc9ef8 Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Thu, 3 Oct 2024 21:36:48 +0200 Subject: [PATCH 704/893] fs: Simplify NodeCreateAt --- internal/fs/node.go | 32 ++++++++++---------------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/internal/fs/node.go b/internal/fs/node.go index 280e290c256..a5ca1654a2a 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -163,41 +163,29 @@ func lookupGroup(gid uint32) string { } // NodeCreateAt creates the node at the given path but does NOT restore node meta data. -func NodeCreateAt(node *restic.Node, path string) error { +func NodeCreateAt(node *restic.Node, path string) (err error) { debug.Log("create node %v at %v", node.Name, path) switch node.Type { case restic.NodeTypeDir: - if err := nodeCreateDirAt(node, path); err != nil { - return err - } + err = nodeCreateDirAt(node, path) case restic.NodeTypeFile: - if err := nodeCreateFileAt(path); err != nil { - return err - } + err = nodeCreateFileAt(path) case restic.NodeTypeSymlink: - if err := nodeCreateSymlinkAt(node, path); err != nil { - return err - } + err = nodeCreateSymlinkAt(node, path) case restic.NodeTypeDev: - if err := nodeCreateDevAt(node, path); err != nil { - return err - } + err = nodeCreateDevAt(node, path) case restic.NodeTypeCharDev: - if err := nodeCreateCharDevAt(node, path); err != nil { - return err - } + err = nodeCreateCharDevAt(node, path) case restic.NodeTypeFifo: - if err := nodeCreateFifoAt(path); err != nil { - return err - } + err = nodeCreateFifoAt(path) case restic.NodeTypeSocket: - return nil + err = nil default: - return errors.Errorf("filetype %q not implemented", node.Type) + err = errors.Errorf("filetype %q not implemented", node.Type) } - return nil + return err } func nodeCreateDirAt(node *restic.Node, path string) error { From 2b609d3e77bbaacbde8d1a5cfc3bcc688bd98dc3 Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Thu, 3 Oct 2024 21:39:35 +0200 Subject: [PATCH 705/893] errors, fs: Replace CombineErrors with stdlib Join This does not produce exactly the same messages, as it inserts newlines instead of "; ". But given how long our error messages can be, that might be a good thing. --- internal/errors/errors.go | 29 ++--------------------------- internal/fs/node.go | 2 +- internal/fs/node_windows.go | 2 +- 3 files changed, 4 insertions(+), 29 deletions(-) diff --git a/internal/errors/errors.go b/internal/errors/errors.go index ca36611ebe8..96e5b82bb3c 100644 --- a/internal/errors/errors.go +++ b/internal/errors/errors.go @@ -2,7 +2,6 @@ package errors import ( stderrors "errors" - "fmt" "github.com/pkg/errors" ) @@ -36,35 +35,11 @@ func As(err error, tgt interface{}) bool { return stderrors.As(err, tgt) } // Is reports whether any error in err's tree matches target. func Is(x, y error) bool { return stderrors.Is(x, y) } +func Join(errs ...error) error { return stderrors.Join(errs...) } + // Unwrap returns the result of calling the Unwrap method on err, if err's type contains // an Unwrap method returning error. Otherwise, Unwrap returns nil. // // Unwrap only calls a method of the form "Unwrap() error". In particular Unwrap does not // unwrap errors returned by [Join]. func Unwrap(err error) error { return stderrors.Unwrap(err) } - -// CombineErrors combines multiple errors into a single error after filtering out any nil values. -// If no errors are passed, it returns nil. -// If one error is passed, it simply returns that same error. -func CombineErrors(errors ...error) (err error) { - var combinedErrorMsg string - var multipleErrors bool - for _, errVal := range errors { - if errVal != nil { - if combinedErrorMsg != "" { - combinedErrorMsg += "; " // Separate error messages with a delimiter - multipleErrors = true - } else { - // Set the first error - err = errVal - } - combinedErrorMsg += errVal.Error() - } - } - if combinedErrorMsg == "" { - return nil // If no errors, return nil - } else if !multipleErrors { - return err // If only one error, return that first error - } - return fmt.Errorf("multiple errors occurred: [%s]", combinedErrorMsg) -} diff --git a/internal/fs/node.go b/internal/fs/node.go index a5ca1654a2a..f6bf1808789 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -103,7 +103,7 @@ func nodeFillExtra(node *restic.Node, path string, fi os.FileInfo, ignoreXattrLi allowExtended, err := nodeFillGenericAttributes(node, path, &stat) if allowExtended { // Skip processing ExtendedAttributes if allowExtended is false. - err = errors.CombineErrors(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) + err = errors.Join(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) } return err } diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index 9d46143cc82..0c1bf03654f 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -189,7 +189,7 @@ func nodeRestoreGenericAttributes(node *restic.Node, path string, warn func(msg } restic.HandleUnknownGenericAttributesFound(unknownAttribs, warn) - return errors.CombineErrors(errs...) + return errors.Join(errs...) } // genericAttributesToWindowsAttrs converts the generic attributes map to a WindowsAttributes and also returns a string of unknown attributes that it could not convert. From eac00eb9334f24b767dfe5e137312178acf5289b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Oct 2024 19:58:42 +0000 Subject: [PATCH 706/893] build(deps): bump golang.org/x/oauth2 from 0.22.0 to 0.23.0 Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.22.0 to 0.23.0. - [Commits](https://github.com/golang/oauth2/compare/v0.22.0...v0.23.0) --- updated-dependencies: - dependency-name: golang.org/x/oauth2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5ee5773f362..265af416628 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( go.uber.org/automaxprocs v1.6.0 golang.org/x/crypto v0.26.0 golang.org/x/net v0.28.0 - golang.org/x/oauth2 v0.22.0 + golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 golang.org/x/sys v0.25.0 golang.org/x/term v0.23.0 diff --git a/go.sum b/go.sum index a8ef757ea64..c80b105db63 100644 --- a/go.sum +++ b/go.sum @@ -250,8 +250,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 221fa0fa7ce292dbf534b731be6975975a11127c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Oct 2024 20:26:34 +0000 Subject: [PATCH 707/893] build(deps): bump google.golang.org/api from 0.195.0 to 0.199.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.195.0 to 0.199.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.195.0...v0.199.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 36 +++++++++++++------------- go.sum | 80 +++++++++++++++++++++++++++++----------------------------- 2 files changed, 58 insertions(+), 58 deletions(-) diff --git a/go.mod b/go.mod index 9cb56043487..ae00c92bf5e 100644 --- a/go.mod +++ b/go.mod @@ -25,23 +25,23 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.6.0 - golang.org/x/crypto v0.26.0 - golang.org/x/net v0.28.0 + golang.org/x/crypto v0.27.0 + golang.org/x/net v0.29.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 golang.org/x/sys v0.25.0 - golang.org/x/term v0.23.0 - golang.org/x/text v0.17.0 + golang.org/x/term v0.24.0 + golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 - google.golang.org/api v0.195.0 + google.golang.org/api v0.199.0 ) require ( cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/auth v0.9.1 // indirect + cloud.google.com/go/auth v0.9.5 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.13 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/iam v1.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect @@ -57,7 +57,7 @@ require ( github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect @@ -68,15 +68,15 @@ require ( github.com/rs/xid v1.6.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect - google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect - google.golang.org/grpc v1.65.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect + google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.67.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 0595a0664c1..fdbb820ea99 100644 --- a/go.sum +++ b/go.sum @@ -1,16 +1,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= -cloud.google.com/go/auth v0.9.1 h1:+pMtLEV2k0AXKvs/tGZojuj6QaioxfUjOpMsG5Gtx+w= -cloud.google.com/go/auth v0.9.1/go.mod h1:Sw8ocT5mhhXxFklyhT12Eiy0ed6tTrPMCJjSI8KhYLk= +cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= +cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= -cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= -cloud.google.com/go/longrunning v0.5.12 h1:5LqSIdERr71CqfUsFlJdBpOkBH8FBCFD7P1nTWy3TYE= -cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= +cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= +cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= +cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= @@ -117,8 +117,8 @@ github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -204,18 +204,18 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -223,8 +223,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= @@ -247,8 +247,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -282,15 +282,15 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -306,26 +306,26 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= -google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= +google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= +google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= -google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c/go.mod h1:2rC5OendXvZ8wGEo/cSLheztrZDZaSoHanUcd1xtZnw= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From f44b7cdf8c22f9530e53a62e3bc391895baa2119 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 14 Sep 2024 18:59:59 +0200 Subject: [PATCH 708/893] backup: exclude irregular files from backup restic cannot backup irregular files as those don't behave like normal files. Thus skip them with an error. --- internal/archiver/archiver.go | 3 +- internal/archiver/archiver_test.go | 43 ++++++++++++++++++++++ internal/archiver/archiver_unix_test.go | 10 +++++ internal/archiver/archiver_windows_test.go | 8 ++++ 4 files changed, 63 insertions(+), 1 deletion(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 0a0c18d2891..8b20113b600 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -270,7 +270,8 @@ func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, } // overwrite name to match that within the snapshot node.Name = path.Base(snPath) - if err != nil { + // do not filter error for nodes of irregular or invalid type + if node.Type != restic.NodeTypeIrregular && node.Type != restic.NodeTypeInvalid && err != nil { err = fmt.Errorf("incomplete metadata for %v: %w", filename, err) return node, arch.error(filename, err) } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index b95947a2e60..562f32414bb 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -2407,4 +2407,47 @@ func TestMetadataBackupErrorFiltering(t *testing.T) { rtest.Assert(t, node != nil, "node is missing") rtest.Assert(t, err == replacementErr, "expected %v got %v", replacementErr, err) rtest.Assert(t, filteredErr != nil, "missing inner error") + + // check that errors from reading irregular file are not filtered + filteredErr = nil + node, err = arch.nodeFromFileInfo("file", filename, wrapIrregularFileInfo(fi), false) + rtest.Assert(t, node != nil, "node is missing") + rtest.Assert(t, filteredErr == nil, "error for irregular node should not have been filtered") + rtest.Assert(t, strings.Contains(err.Error(), "irregular"), "unexpected error %q does not warn about irregular file mode", err) +} + +func TestIrregularFile(t *testing.T) { + files := TestDir{ + "testfile": TestFile{ + Content: "foo bar test file", + }, + } + tempdir, repo := prepareTempdirRepoSrc(t, files) + + back := rtest.Chdir(t, tempdir) + defer back() + + tempfile := filepath.Join(tempdir, "testfile") + fi := lstat(t, "testfile") + + statfs := &StatFS{ + FS: fs.Local{}, + OverrideLstat: map[string]os.FileInfo{ + tempfile: wrapIrregularFileInfo(fi), + }, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + arch := New(repo, fs.Track{FS: statfs}, Options{}) + _, excluded, err := arch.save(ctx, "/", tempfile, nil) + if err == nil { + t.Fatalf("Save() should have failed") + } + rtest.Assert(t, strings.Contains(err.Error(), "irregular"), "unexpected error %q does not warn about irregular file mode", err) + + if excluded { + t.Errorf("Save() excluded the node, that's unexpected") + } } diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index d91d993dd52..312e2d33e09 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -46,6 +46,16 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { return res } +// wrapIrregularFileInfo returns a new os.FileInfo with the mode changed to irregular file +func wrapIrregularFileInfo(fi os.FileInfo) os.FileInfo { + // wrap the os.FileInfo so we can return a modified stat_t + return wrappedFileInfo{ + FileInfo: fi, + sys: fi.Sys().(*syscall.Stat_t), + mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular, + } +} + func statAndSnapshot(t *testing.T, repo archiverRepo, name string) (*restic.Node, *restic.Node) { fi := lstat(t, name) want, err := fs.NodeFromFileInfo(name, fi, false) diff --git a/internal/archiver/archiver_windows_test.go b/internal/archiver/archiver_windows_test.go index e1195030f3d..ac8a67f2bd3 100644 --- a/internal/archiver/archiver_windows_test.go +++ b/internal/archiver/archiver_windows_test.go @@ -26,3 +26,11 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { return res } + +// wrapIrregularFileInfo returns a new os.FileInfo with the mode changed to irregular file +func wrapIrregularFileInfo(fi os.FileInfo) os.FileInfo { + return wrappedFileInfo{ + FileInfo: fi, + mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular, + } +} From 012cb06fe9a4db14618fbdbbbf1f97d1debe4354 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 14 Sep 2024 19:09:58 +0200 Subject: [PATCH 709/893] repair snapshots: remove irregular files --- cmd/restic/cmd_repair_snapshots.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index 46ba52dd415..ba952432afa 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -92,6 +92,10 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt // - files whose contents are not fully available (-> file will be modified) rewriter := walker.NewTreeRewriter(walker.RewriteOpts{ RewriteNode: func(node *restic.Node, path string) *restic.Node { + if node.Type == restic.NodeTypeIrregular || node.Type == restic.NodeTypeInvalid { + Verbosef(" file %q: removed node with invalid type %q\n", path, node.Type) + return nil + } if node.Type != restic.NodeTypeFile { return node } From 55c150054dad987f9d61c22182a276a9b48abdbb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 16 Oct 2024 20:46:58 +0200 Subject: [PATCH 710/893] add irregular files bug changelog --- changelog/unreleased/pull-5057 | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 changelog/unreleased/pull-5057 diff --git a/changelog/unreleased/pull-5057 b/changelog/unreleased/pull-5057 new file mode 100644 index 00000000000..c34436044c6 --- /dev/null +++ b/changelog/unreleased/pull-5057 @@ -0,0 +1,21 @@ +Bugfix: Do not include irregular files in backup + +Since restic 0.17.1, files with type `irregular` could incorrectly be included +in snapshots. This is most likely to occur when backing up special file types +on Windows that cannot be handled by restic. + +This has been fixed. + +When running the `check` command this bug resulted in an error like the +following: + +``` + tree 12345678[...]: node "example.zip" with invalid type "irregular" +``` + +Repairing the affected snapshots requires upgrading to restic 0.17.2 and then +manually running `restic repair snapshots --forget`. This will remove the +`irregular` files from the snapshots. + +https://github.com/restic/restic/pull/5057 +https://forum.restic.net/t/errors-found-by-check-1-invalid-type-irregular-2-ciphertext-verification-failed/8447/2 From da3c02405b51d9374fc42574e213988b7e510910 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 16 Oct 2024 21:09:05 +0200 Subject: [PATCH 711/893] dump/zip: only compress regular files --- internal/dump/zip.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/dump/zip.go b/internal/dump/zip.go index 6041c518733..17aeb4829a1 100644 --- a/internal/dump/zip.go +++ b/internal/dump/zip.go @@ -39,7 +39,9 @@ func (d *Dumper) dumpNodeZip(ctx context.Context, node *restic.Node, zw *zip.Wri Modified: node.ModTime, } header.SetMode(node.Mode) - header.Method = zip.Deflate + if node.Type == restic.NodeTypeFile { + header.Method = zip.Deflate + } if node.Type == restic.NodeTypeDir { header.Name += "/" From e29d38f8bfd9e7a1a99c26c1a12e4f475cb1b383 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 16 Oct 2024 21:11:24 +0200 Subject: [PATCH 712/893] dump/zip: test that files are compressed --- internal/dump/zip_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/dump/zip_test.go b/internal/dump/zip_test.go index 6f5f60f54fa..c6eb0420616 100644 --- a/internal/dump/zip_test.go +++ b/internal/dump/zip_test.go @@ -101,6 +101,9 @@ func checkZip(t *testing.T, testDir string, srcZip *bytes.Buffer) error { return fmt.Errorf("symlink target does not match, got %s want %s", string(linkName), target) } default: + if f.Method != zip.Deflate { + return fmt.Errorf("expected compression method got %v want %v", f.Method, zip.Deflate) + } if uint64(match.Size()) != f.UncompressedSize64 { return fmt.Errorf("size does not match got %v want %v", f.UncompressedSize64, match.Size()) } From 75711446e1cd8ff49a767d6014477857cfea1947 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 28 Aug 2024 10:58:07 +0200 Subject: [PATCH 713/893] fs: move NodeFromFileInfo into FS interface --- internal/archiver/archiver.go | 2 +- internal/archiver/archiver_test.go | 11 ++++--- internal/archiver/archiver_unix_test.go | 3 +- internal/archiver/file_saver_test.go | 4 +-- internal/fs/fs_local.go | 6 ++++ internal/fs/fs_reader.go | 12 +++++++ internal/fs/interface.go | 3 ++ internal/fs/node.go | 44 ++++++++++++------------- internal/fs/node_test.go | 11 ++++--- internal/fs/node_unix_test.go | 3 +- internal/fs/node_windows_test.go | 5 +-- internal/restic/tree_test.go | 6 ++-- 12 files changed, 69 insertions(+), 41 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 8b20113b600..d8f0157b1dd 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -256,7 +256,7 @@ func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s I // nodeFromFileInfo returns the restic node from an os.FileInfo. func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - node, err := fs.NodeFromFileInfo(filename, fi, ignoreXattrListError) + node, err := arch.FS.NodeFromFileInfo(filename, fi, ignoreXattrListError) if !arch.WithAtime { node.AccessTime = node.ModTime } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 562f32414bb..d4f15c80b74 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -556,7 +556,7 @@ func rename(t testing.TB, oldname, newname string) { } } -func nodeFromFI(t testing.TB, filename string, fi os.FileInfo) *restic.Node { +func nodeFromFI(t testing.TB, fs fs.FS, filename string, fi os.FileInfo) *restic.Node { node, err := fs.NodeFromFileInfo(filename, fi, false) if err != nil { t.Fatal(err) @@ -688,7 +688,7 @@ func TestFileChanged(t *testing.T) { fs := &fs.Local{} fiBefore := lstat(t, filename) - node := nodeFromFI(t, filename, fiBefore) + node := nodeFromFI(t, fs, filename, fiBefore) if fileChanged(fs, fiBefore, node, 0) { t.Fatalf("unchanged file detected as changed") @@ -729,7 +729,7 @@ func TestFilChangedSpecialCases(t *testing.T) { t.Run("type-change", func(t *testing.T) { fi := lstat(t, filename) - node := nodeFromFI(t, filename, fi) + node := nodeFromFI(t, &fs.Local{}, filename, fi) node.Type = "restic.NodeTypeSymlink" if !fileChanged(&fs.Local{}, fi, node, 0) { t.Fatal("node with changed type detected as unchanged") @@ -2275,13 +2275,14 @@ func TestMetadataChanged(t *testing.T) { // get metadata fi := lstat(t, "testfile") - want, err := fs.NodeFromFileInfo("testfile", fi, false) + localFS := &fs.Local{} + want, err := localFS.NodeFromFileInfo("testfile", fi, false) if err != nil { t.Fatal(err) } fs := &StatFS{ - FS: fs.Local{}, + FS: localFS, OverrideLstat: map[string]os.FileInfo{ "testfile": fi, }, diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 312e2d33e09..621f84826b0 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -58,10 +58,11 @@ func wrapIrregularFileInfo(fi os.FileInfo) os.FileInfo { func statAndSnapshot(t *testing.T, repo archiverRepo, name string) (*restic.Node, *restic.Node) { fi := lstat(t, name) + fs := &fs.Local{} want, err := fs.NodeFromFileInfo(name, fi, false) rtest.OK(t, err) - _, node := snapshot(t, repo, fs.Local{}, nil, name) + _, node := snapshot(t, repo, fs, nil, name) return want, node } diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go index ede616e28d3..5b17eca3797 100644 --- a/internal/archiver/file_saver_test.go +++ b/internal/archiver/file_saver_test.go @@ -30,7 +30,7 @@ func createTestFiles(t testing.TB, num int) (files []string) { return files } -func startFileSaver(ctx context.Context, t testing.TB) (*fileSaver, context.Context, *errgroup.Group) { +func startFileSaver(ctx context.Context, t testing.TB, fs fs.FS) (*fileSaver, context.Context, *errgroup.Group) { wg, ctx := errgroup.WithContext(ctx) saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *buffer, _ string, cb func(saveBlobResponse)) { @@ -67,7 +67,7 @@ func TestFileSaver(t *testing.T) { completeFn := func(*restic.Node, ItemStats) {} testFs := fs.Local{} - s, ctx, wg := startFileSaver(ctx, t) + s, ctx, wg := startFileSaver(ctx, t, testFs) var results []futureNode diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go index 034d1aa2477..5fac88dbb8a 100644 --- a/internal/fs/fs_local.go +++ b/internal/fs/fs_local.go @@ -3,6 +3,8 @@ package fs import ( "os" "path/filepath" + + "github.com/restic/restic/internal/restic" ) // Local is the local file system. Most methods are just passed on to the stdlib. @@ -57,6 +59,10 @@ func (fs Local) ExtendedStat(fi os.FileInfo) ExtendedFileInfo { return ExtendedStat(fi) } +func (fs Local) NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { + return nodeFromFileInfo(path, fi, ignoreXattrListError) +} + // Join joins any number of path elements into a single path, adding a // Separator if necessary. Join calls Clean on the result; in particular, all // empty strings are ignored. On Windows, the result is a UNC path if and only diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index 490a6b68d1d..c2bf23bb78a 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -10,6 +10,7 @@ import ( "time" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" ) // Reader is a file system which provides a directory with a single file. When @@ -132,6 +133,17 @@ func (fs *Reader) ExtendedStat(fi os.FileInfo) ExtendedFileInfo { } } +func (fs *Reader) NodeFromFileInfo(path string, fi os.FileInfo, _ bool) (*restic.Node, error) { + node := buildBasicNode(path, fi) + + // fill minimal info with current values for uid, gid + node.UID = uint32(os.Getuid()) + node.GID = uint32(os.Getgid()) + node.ChangeTime = node.ModTime + + return node, nil +} + // Join joins any number of path elements into a single path, adding a // Separator if necessary. Join calls Clean on the result; in particular, all // empty strings are ignored. On Windows, the result is a UNC path if and only diff --git a/internal/fs/interface.go b/internal/fs/interface.go index dcd16a0b34d..0bb3029dc5a 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -3,6 +3,8 @@ package fs import ( "io" "os" + + "github.com/restic/restic/internal/restic" ) // FS bundles all methods needed for a file system. @@ -12,6 +14,7 @@ type FS interface { Lstat(name string) (os.FileInfo, error) DeviceID(fi os.FileInfo) (deviceID uint64, err error) ExtendedStat(fi os.FileInfo) ExtendedFileInfo + NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) Join(elem ...string) string Separator() string diff --git a/internal/fs/node.go b/internal/fs/node.go index 18e1a2140e1..4be48e06422 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -12,9 +12,25 @@ import ( "github.com/restic/restic/internal/restic" ) -// NodeFromFileInfo returns a new node from the given path and FileInfo. It +// nodeFromFileInfo returns a new node from the given path and FileInfo. It // returns the first error that is encountered, together with a node. -func NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { +func nodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { + node := buildBasicNode(path, fi) + + stat := ExtendedStat(fi) + if err := nodeFillExtendedStat(node, path, &stat); err != nil { + return node, err + } + + allowExtended, err := nodeFillGenericAttributes(node, path, &stat) + if allowExtended { + // Skip processing ExtendedAttributes if allowExtended is false. + err = errors.Join(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) + } + return node, err +} + +func buildBasicNode(path string, fi os.FileInfo) *restic.Node { mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky node := &restic.Node{ Path: path, @@ -27,9 +43,7 @@ func NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (* if node.Type == restic.NodeTypeFile { node.Size = uint64(fi.Size()) } - - err := nodeFillExtra(node, path, fi, ignoreXattrListError) - return node, err + return node } func nodeTypeFromFileInfo(fi os.FileInfo) restic.NodeType { @@ -55,17 +69,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) restic.NodeType { return restic.NodeTypeInvalid } -func nodeFillExtra(node *restic.Node, path string, fi os.FileInfo, ignoreXattrListError bool) error { - if fi.Sys() == nil { - // fill minimal info with current values for uid, gid - node.UID = uint32(os.Getuid()) - node.GID = uint32(os.Getgid()) - node.ChangeTime = node.ModTime - return nil - } - - stat := ExtendedStat(fi) - +func nodeFillExtendedStat(node *restic.Node, path string, stat *ExtendedFileInfo) error { node.Inode = stat.Inode node.DeviceID = stat.DeviceID node.ChangeTime = stat.ChangeTime @@ -99,13 +103,7 @@ func nodeFillExtra(node *restic.Node, path string, fi os.FileInfo, ignoreXattrLi default: return errors.Errorf("unsupported file type %q", node.Type) } - - allowExtended, err := nodeFillGenericAttributes(node, path, &stat) - if allowExtended { - // Skip processing ExtendedAttributes if allowExtended is false. - err = errors.Join(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) - } - return err + return nil } var ( diff --git a/internal/fs/node_test.go b/internal/fs/node_test.go index 2623513a802..58facceb165 100644 --- a/internal/fs/node_test.go +++ b/internal/fs/node_test.go @@ -29,11 +29,12 @@ func BenchmarkNodeFillUser(t *testing.B) { } path := tempfile.Name() + fs := Local{} t.ResetTimer() for i := 0; i < t.N; i++ { - _, err := NodeFromFileInfo(path, fi, false) + _, err := fs.NodeFromFileInfo(path, fi, false) rtest.OK(t, err) } @@ -53,11 +54,12 @@ func BenchmarkNodeFromFileInfo(t *testing.B) { } path := tempfile.Name() + fs := Local{} t.ResetTimer() for i := 0; i < t.N; i++ { - _, err := NodeFromFileInfo(path, fi, false) + _, err := fs.NodeFromFileInfo(path, fi, false) if err != nil { t.Fatal(err) } @@ -250,9 +252,10 @@ func TestNodeRestoreAt(t *testing.T) { fi, err := os.Lstat(nodePath) rtest.OK(t, err) - n2, err := NodeFromFileInfo(nodePath, fi, false) + fs := &Local{} + n2, err := fs.NodeFromFileInfo(nodePath, fi, false) rtest.OK(t, err) - n3, err := NodeFromFileInfo(nodePath, fi, true) + n3, err := fs.NodeFromFileInfo(nodePath, fi, true) rtest.OK(t, err) rtest.Assert(t, n2.Equals(*n3), "unexpected node info mismatch %v", cmp.Diff(n2, n3)) diff --git a/internal/fs/node_unix_test.go b/internal/fs/node_unix_test.go index f38762fc792..6b47eafba2f 100644 --- a/internal/fs/node_unix_test.go +++ b/internal/fs/node_unix_test.go @@ -119,7 +119,8 @@ func TestNodeFromFileInfo(t *testing.T) { return } - node, err := NodeFromFileInfo(test.filename, fi, false) + fs := &Local{} + node, err := fs.NodeFromFileInfo(test.filename, fi, false) if err != nil { t.Fatal(err) } diff --git a/internal/fs/node_windows_test.go b/internal/fs/node_windows_test.go index 83ad72d5309..730740fe084 100644 --- a/internal/fs/node_windows_test.go +++ b/internal/fs/node_windows_test.go @@ -221,10 +221,11 @@ func restoreAndGetNode(t *testing.T, tempDir string, testNode *restic.Node, warn }) test.OK(t, errors.Wrapf(err, "Failed to restore metadata for: %s", testPath)) - fi, err := os.Lstat(testPath) + fs := &Local{} + fi, err := fs.Lstat(testPath) test.OK(t, errors.Wrapf(err, "Could not Lstat for path: %s", testPath)) - nodeFromFileInfo, err := NodeFromFileInfo(testPath, fi, false) + nodeFromFileInfo, err := fs.NodeFromFileInfo(testPath, fi, false) test.OK(t, errors.Wrapf(err, "Could not get NodeFromFileInfo for path: %s", testPath)) return testPath, nodeFromFileInfo diff --git a/internal/restic/tree_test.go b/internal/restic/tree_test.go index cdd6b3c18cc..f1979f135de 100644 --- a/internal/restic/tree_test.go +++ b/internal/restic/tree_test.go @@ -84,7 +84,8 @@ func TestNodeMarshal(t *testing.T) { } func TestNodeComparison(t *testing.T) { - fi, err := os.Lstat("tree_test.go") + fs := &fs.Local{} + fi, err := fs.Lstat("tree_test.go") rtest.OK(t, err) node, err := fs.NodeFromFileInfo("tree_test.go", fi, false) @@ -126,7 +127,8 @@ func TestTreeEqualSerialization(t *testing.T) { builder := restic.NewTreeJSONBuilder() for _, fn := range files[:i] { - fi, err := os.Lstat(fn) + fs := &fs.Local{} + fi, err := fs.Lstat(fn) rtest.OK(t, err) node, err := fs.NodeFromFileInfo(fn, fi, false) rtest.OK(t, err) From 58388969627776304667dcfc85a8e0c9b6908e04 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 17 Oct 2024 19:45:03 +0200 Subject: [PATCH 714/893] doc: document safety feature for --target / --delete --- doc/050_restore.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 1a920fad432..9558ab1d4a0 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -132,6 +132,10 @@ options will be deleted. For example, the command ``restic -r /srv/restic-repo restore 79766175:/work --target /tmp/restore-work --include /foo --delete`` would only delete files within ``/tmp/restore-work/foo``. +When using ``--target / --delete`` then the ``restore`` command only works if either an ``--include`` +or ``--exclude`` option is also specified. This ensures that one cannot accidentaly delete +the whole system. + Dry run ------- From 2e704c69ac1c05baceb368aa3d701fab480e991b Mon Sep 17 00:00:00 2001 From: Connor Findlay Date: Thu, 17 Oct 2024 20:08:11 +1300 Subject: [PATCH 715/893] backend/azure: Handle Container SAS/SAT Ignore AuthorizationFailure caused by using a container level SAS/SAT token when calling GetProperties during the Create() call. This is because the GetProperties call expects an Account Level token, and the container level token simply lacks the appropriate permissions. Supressing the Authorization Failure is OK, because if the token is actually invalid, this is caught elsewhere when we try to actually use the token to do work. --- internal/backend/azure/azure.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index e09593fd636..8f5ee9f0096 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -157,6 +157,12 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, er if err != nil { return nil, errors.Wrap(err, "container.Create") } + } else if err != nil && bloberror.HasCode(err, bloberror.AuthorizationFailure) { + // We ignore this Auth. Failure, as the failure is related to the type + // of SAS/SAT, not an actual real failure. If the token is invalid, we + // fail later on anyway. + // For details see Issue #4004. + debug.Log("Ignoring AuthorizationFailure when calling GetProperties") } else if err != nil { return be, errors.Wrap(err, "container.GetProperties") } From 7bdfcf13fb5a605775521ebafb944da41644cc1a Mon Sep 17 00:00:00 2001 From: Connor Findlay Date: Thu, 17 Oct 2024 20:23:32 +1300 Subject: [PATCH 716/893] changelog: Add changes in issue-4004 Add changelog entry in the 'unreleased' sub-folder for changes introduced when fixing issue #4004. --- changelog/unreleased/issue-4004 | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 changelog/unreleased/issue-4004 diff --git a/changelog/unreleased/issue-4004 b/changelog/unreleased/issue-4004 new file mode 100644 index 00000000000..ca23af26f1f --- /dev/null +++ b/changelog/unreleased/issue-4004 @@ -0,0 +1,12 @@ +Bugfix: Allow use of container level SAS/SAT tokens with Azure backend + +When using a SAS/SAT token for authentication with Azure, restic was expecting +the provided token to be generated at the account level, granting permissions +to the storage account and all its containers. This caused an error that did +not allow tokens that were generated at the container level to be used to +initalize a repository. +Restic now allows SAS/SAT tokens that were generated at the account or +container level to be used to initalize a repository. + +https://github.com/restic/restic/issues/4004 +https://github.com/restic/restic/pull/5093 From b434f560cc53b074c23f4aad1619014f0f9be465 Mon Sep 17 00:00:00 2001 From: Connor Findlay Date: Thu, 17 Oct 2024 21:26:52 +1300 Subject: [PATCH 717/893] backend/azure: Add tests for both token types Add two new test cases, TestBackendAzureAccountToken and TestBackendAzureContainerToken, that ensure that the authorization using both types of token works. This introduces two new environment variables, RESTIC_TEST_AZURE_ACCOUNT_SAS and RESTIC_TEST_AZURE_CONTAINER_SAS, that contain the tokens to use when testing restic. If an environment variable is missing, the related test is skipped. --- internal/backend/azure/azure_test.go | 85 ++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/internal/backend/azure/azure_test.go b/internal/backend/azure/azure_test.go index 7df27d325a3..adafb6b030a 100644 --- a/internal/backend/azure/azure_test.go +++ b/internal/backend/azure/azure_test.go @@ -80,6 +80,91 @@ func BenchmarkBackendAzure(t *testing.B) { newAzureTestSuite().RunBenchmarks(t) } +// TestBackendAzureAccountToken tests that a Storage Account SAS/SAT token can authorize. +// This test ensures that restic can use a token that was generated using the storage +// account keys can be used to authorize the azure connection. +// Requires the RESTIC_TEST_AZURE_ACCOUNT_NAME, RESTIC_TEST_AZURE_REPOSITORY, and the +// RESTIC_TEST_AZURE_ACCOUNT_SAS environment variables to be set, otherwise this test +// will be skipped. +func TestBackendAzureAccountToken(t *testing.T) { + vars := []string{ + "RESTIC_TEST_AZURE_ACCOUNT_NAME", + "RESTIC_TEST_AZURE_REPOSITORY", + "RESTIC_TEST_AZURE_ACCOUNT_SAS", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("set %v to test SAS/SAT Token Authentication", v) + return + } + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY")) + if err != nil { + t.Fatal(err) + } + + cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME") + cfg.AccountSAS = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_SAS")) + + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatal(err) + } + + _, err = azure.Create(ctx, *cfg, tr) + if err != nil { + t.Fatal(err) + } +} + +// TestBackendAzureContainerToken tests that a container SAS/SAT token can authorize. +// This test ensures that restic can use a token that was generated using a user +// delegation key against the container we are storing data in can be used to +// authorize the azure connection. +// Requires the RESTIC_TEST_AZURE_ACCOUNT_NAME, RESTIC_TEST_AZURE_REPOSITORY, and the +// RESTIC_TEST_AZURE_CONTAINER_SAS environment variables to be set, otherwise this test +// will be skipped. +func TestBackendAzureContainerToken(t *testing.T) { + vars := []string{ + "RESTIC_TEST_AZURE_ACCOUNT_NAME", + "RESTIC_TEST_AZURE_REPOSITORY", + "RESTIC_TEST_AZURE_CONTAINER_SAS", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("set %v to test SAS/SAT Token Authentication", v) + return + } + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY")) + if err != nil { + t.Fatal(err) + } + + cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME") + cfg.AccountSAS = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_CONTAINER_SAS")) + + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatal(err) + } + + _, err = azure.Create(ctx, *cfg, tr) + if err != nil { + t.Fatal(err) + } +} + func TestUploadLargeFile(t *testing.T) { if os.Getenv("RESTIC_AZURE_TEST_LARGE_UPLOAD") == "" { t.Skip("set RESTIC_AZURE_TEST_LARGE_UPLOAD=1 to test large uploads") From 60960d2405f513b9b7a7abb2c00c535c471524a8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 19:27:42 +0200 Subject: [PATCH 718/893] fs/vss: properly create node from vss path Previously, NodeFromFileInfo used the original file path to create the node, which also meant that extended metadata was read from there instead of within the vss snapshot. --- internal/fs/fs_local_vss.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 908e744eee2..54139ab2e3e 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -10,6 +10,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/options" + "github.com/restic/restic/internal/restic" ) // VSSConfig holds extended options of windows volume shadow copy service. @@ -140,6 +141,10 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { return os.Lstat(fs.snapshotPath(name)) } +func (fs *LocalVss) NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { + return fs.FS.NodeFromFileInfo(fs.snapshotPath(path), fi, ignoreXattrListError) +} + // isMountPointIncluded is true if given mountpoint included by user. func (fs *LocalVss) isMountPointIncluded(mountPoint string) bool { if fs.excludeVolumes == nil { From b988754a6dbcd0a3a93c400d86fb9fe4f499e086 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 19:30:05 +0200 Subject: [PATCH 719/893] fs/vss: reuse functions from underlying FS OpenFile, Stat and Lstat should reuse the underlying FS implementation to avoid diverging behavior. --- internal/fs/fs_local_vss.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 54139ab2e3e..1915e2a7ca1 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -128,17 +128,17 @@ func (fs *LocalVss) DeleteSnapshots() { // OpenFile wraps the Open method of the underlying file system. func (fs *LocalVss) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return os.OpenFile(fs.snapshotPath(name), flag, perm) + return fs.FS.OpenFile(fs.snapshotPath(name), flag, perm) } // Stat wraps the Stat method of the underlying file system. func (fs *LocalVss) Stat(name string) (os.FileInfo, error) { - return os.Stat(fs.snapshotPath(name)) + return fs.FS.Stat(fs.snapshotPath(name)) } // Lstat wraps the Lstat method of the underlying file system. func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { - return os.Lstat(fs.snapshotPath(name)) + return fs.FS.Lstat(fs.snapshotPath(name)) } func (fs *LocalVss) NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { From 26b77a543d5855ad0a8c74c0813adda840e78d99 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 21:41:02 +0200 Subject: [PATCH 720/893] archiver: use correct filepath in fileSaver for vss When using the VSS FS, then `f.Name()` contained the filename in the snapshot. This caused a double mapping when calling NodeFromFileInfo. --- internal/archiver/file_saver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index b9d07434a35..dccaa944245 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -156,7 +156,7 @@ func (s *fileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat debug.Log("%v", snPath) - node, err := s.NodeFromFileInfo(snPath, f.Name(), fi, false) + node, err := s.NodeFromFileInfo(snPath, target, fi, false) if err != nil { _ = f.Close() completeError(err) From 352605d9f0b23ec60bcf53a96ac2a39661f1e48a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 21:43:23 +0200 Subject: [PATCH 721/893] fs: remove file.Name() from interface The only user was archiver.fileSaver. --- internal/fs/fs_reader.go | 6 +----- internal/fs/interface.go | 1 - 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index c2bf23bb78a..97d4e1660f4 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -232,7 +232,7 @@ func (r *readerFile) Close() error { var _ File = &readerFile{} // fakeFile implements all File methods, but only returns errors for anything -// except Stat() and Name(). +// except Stat() type fakeFile struct { name string os.FileInfo @@ -257,10 +257,6 @@ func (f fakeFile) Stat() (os.FileInfo, error) { return f.FileInfo, nil } -func (f fakeFile) Name() string { - return f.name -} - // fakeDir implements Readdirnames and Readdir, everything else is delegated to fakeFile. type fakeDir struct { entries []os.FileInfo diff --git a/internal/fs/interface.go b/internal/fs/interface.go index 0bb3029dc5a..2967429c0d5 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -34,5 +34,4 @@ type File interface { Readdirnames(n int) ([]string, error) Stat() (os.FileInfo, error) - Name() string } From ca79cb92e3bce9b0c89b028595490087b000e183 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 19:45:48 +0200 Subject: [PATCH 722/893] fs/vss: test that vss functions actually read from snapshot --- internal/fs/fs_local_vss_test.go | 58 ++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index a59882381d7..f1a0431185d 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -5,13 +5,18 @@ package fs import ( "fmt" + "io" + "os" + "path/filepath" "regexp" + "runtime" "strings" "testing" "time" ole "github.com/go-ole/go-ole" "github.com/restic/restic/internal/options" + rtest "github.com/restic/restic/internal/test" ) func matchStrings(ptrs []string, strs []string) bool { @@ -284,3 +289,56 @@ func TestParseProvider(t *testing.T) { }) } } + +func TestVSSFS(t *testing.T) { + if runtime.GOOS != "windows" || HasSufficientPrivilegesForVSS() != nil { + t.Skip("vss fs test can only be run on windows with admin privileges") + } + + cfg, err := ParseVSSConfig(options.Options{}) + rtest.OK(t, err) + + errorHandler := func(item string, err error) { + t.Fatalf("unexpected error (%v)", err) + } + messageHandler := func(msg string, args ...interface{}) { + if strings.HasPrefix(msg, "creating VSS snapshot for") || strings.HasPrefix(msg, "successfully created snapshot") { + return + } + t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) + } + + localVss := NewLocalVss(errorHandler, messageHandler, cfg) + defer localVss.DeleteSnapshots() + + tempdir := t.TempDir() + tempfile := filepath.Join(tempdir, "file") + rtest.OK(t, os.WriteFile(tempfile, []byte("example"), 0o600)) + + // trigger snapshot creation and + // capture FI while file still exists (should already be within the snapshot) + origFi, err := localVss.Stat(tempfile) + rtest.OK(t, err) + + // remove original file + rtest.OK(t, os.Remove(tempfile)) + + statFi, err := localVss.Stat(tempfile) + rtest.OK(t, err) + rtest.Equals(t, origFi.Mode(), statFi.Mode()) + + lstatFi, err := localVss.Lstat(tempfile) + rtest.OK(t, err) + rtest.Equals(t, origFi.Mode(), lstatFi.Mode()) + + f, err := localVss.OpenFile(tempfile, os.O_RDONLY, 0) + rtest.OK(t, err) + data, err := io.ReadAll(f) + rtest.OK(t, err) + rtest.Equals(t, "example", string(data), "unexpected file content") + rtest.OK(t, f.Close()) + + node, err := localVss.NodeFromFileInfo(tempfile, statFi, false) + rtest.OK(t, err) + rtest.Equals(t, node.Mode, statFi.Mode()) +} From 9f206601af85bb9e88ad3acdbf7b766fe5f52fa4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 20:05:32 +0200 Subject: [PATCH 723/893] backup: test that vss backups work if underlying data was removed --- cmd/restic/cmd_backup.go | 5 +++ cmd/restic/cmd_backup_integration_test.go | 47 +++++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 107e8bbe0a2..b7eed13184c 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -97,6 +97,7 @@ type BackupOptions struct { } var backupOptions BackupOptions +var backupFSTestHook func(fs fs.FS) fs.FS // ErrInvalidSourceData is used to report an incomplete backup var ErrInvalidSourceData = errors.New("at least one source file could not be read") @@ -582,6 +583,10 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter targets = []string{filename} } + if backupFSTestHook != nil { + targetFS = backupFSTestHook(targetFS) + } + // rejectFuncs collect functions that can reject items from the backup based on path and file info rejectFuncs, err := collectRejectFuncs(opts, targets, targetFS) if err != nil { diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 5e00b84b0e5..cc6a2ca2233 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -111,6 +111,53 @@ func TestBackupWithRelativePath(t *testing.T) { rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "second snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID) } +type vssDeleteOriginalFS struct { + fs.FS + testdata string + hasRemoved bool +} + +func (f *vssDeleteOriginalFS) Lstat(name string) (os.FileInfo, error) { + if !f.hasRemoved { + // call Lstat to trigger snapshot creation + _, _ = f.FS.Lstat(name) + // nuke testdata + if err := os.RemoveAll(f.testdata); err != nil { + return nil, err + } + f.hasRemoved = true + } + return f.FS.Lstat(name) +} + +func TestBackupVSS(t *testing.T) { + if runtime.GOOS != "windows" || fs.HasSufficientPrivilegesForVSS() != nil { + t.Skip("vss fs test can only be run on windows with admin privileges") + } + + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + opts := BackupOptions{UseFsSnapshot: true} + + var testFS *vssDeleteOriginalFS + backupFSTestHook = func(fs fs.FS) fs.FS { + testFS = &vssDeleteOriginalFS{ + FS: fs, + testdata: env.testdata, + } + return testFS + } + defer func() { + backupFSTestHook = nil + }() + + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testListSnapshots(t, env.gopts, 1) + rtest.Equals(t, true, testFS.hasRemoved, "testdata was not removed") +} + func TestBackupParentSelection(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() From fc6f1b4b068fd7392ffc9dd24a3de9dbc414e2b2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 21:18:22 +0200 Subject: [PATCH 724/893] redirect test log output to t.Log() --- cmd/restic/integration_helpers_test.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index 978deab3dda..8ae3bb78a61 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "runtime" + "strings" "sync" "testing" @@ -168,6 +169,16 @@ type testEnvironment struct { gopts GlobalOptions } +type logOutputter struct { + t testing.TB +} + +func (l *logOutputter) Write(p []byte) (n int, err error) { + l.t.Helper() + l.t.Log(strings.TrimSuffix(string(p), "\n")) + return len(p), nil +} + // withTestEnvironment creates a test environment and returns a cleanup // function which removes it. func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) { @@ -200,8 +211,11 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) { Quiet: true, CacheDir: env.cache, password: rtest.TestPassword, - stdout: os.Stdout, - stderr: os.Stderr, + // stdout and stderr are written to by Warnf etc. That is the written data + // usually consists of one or multiple lines and therefore can be handled well + // by t.Log. + stdout: &logOutputter{t}, + stderr: &logOutputter{t}, extended: make(options.Options), // replace this hook with "nil" if listing a filetype more than once is necessary From 805618130129dd0423a09a17db4ca3dfb8cdb18f Mon Sep 17 00:00:00 2001 From: Git'Fellow <12234510+solracsf@users.noreply.github.com> Date: Wed, 4 Sep 2024 13:21:37 +0200 Subject: [PATCH 725/893] docs: Recommend to setup B2 versions lifecycle rules --- doc/030_preparing_a_new_repo.rst | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index fd5b3112748..832ceae7e72 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -455,9 +455,11 @@ Backblaze B2 than using the Backblaze B2 backend directly. Different from the B2 backend, restic's S3 backend will only hide no longer - necessary files. Thus, make sure to setup lifecycle rules to eventually - delete hidden files. The lifecycle setting "Keep only the last version of the file" - will keep only the most current version of a file. Read the [Backblaze documentation](https://www.backblaze.com/docs/cloud-storage-lifecycle-rules). + necessary files. By default, Backblaze B2 retains all of the different versions of the + files and "hides" the older versions. Thus, to make sure to setup B2 lifecycle rules to + delete hidden files, the B2 lifecycle setting "Keep only the last version of the file" + is **recommended**. The previous version of the file is "hidden" for one day and then + deleted automatically by B2. More details at the [Backblaze documentation](https://www.backblaze.com/docs/cloud-storage-lifecycle-rules). Restic can backup data to any Backblaze B2 bucket. You need to first setup the following environment variables with the credentials you can find in the From 5b60d49654555bf3785b679fa195f9caefa8f9cd Mon Sep 17 00:00:00 2001 From: Git'Fellow <12234510+solracsf@users.noreply.github.com> Date: Sat, 7 Sep 2024 15:27:15 +0200 Subject: [PATCH 726/893] fix: shorten sentence --- doc/030_preparing_a_new_repo.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 832ceae7e72..5826ffacf38 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -456,10 +456,10 @@ Backblaze B2 Different from the B2 backend, restic's S3 backend will only hide no longer necessary files. By default, Backblaze B2 retains all of the different versions of the - files and "hides" the older versions. Thus, to make sure to setup B2 lifecycle rules to - delete hidden files, the B2 lifecycle setting "Keep only the last version of the file" - is **recommended**. The previous version of the file is "hidden" for one day and then - deleted automatically by B2. More details at the [Backblaze documentation](https://www.backblaze.com/docs/cloud-storage-lifecycle-rules). + files and "hides" the older versions. Thus, to free space occupied by hidden files, + it is **recommended** to use the B2 lifecycle "Keep only the last version of the file". + The previous version of the file is "hidden" for one day and then deleted automatically + by B2. More details at the [Backblaze documentation](https://www.backblaze.com/docs/cloud-storage-lifecycle-rules). Restic can backup data to any Backblaze B2 bucket. You need to first setup the following environment variables with the credentials you can find in the From 9386acc4a6a7d23da607ecead10441337efd7cc2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 5 Sep 2024 22:33:57 +0200 Subject: [PATCH 727/893] Fix indentation of blockquotes in github release notes --- changelog/changelog-github.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog/changelog-github.tmpl b/changelog/changelog-github.tmpl index d19788daf53..9936da8e609 100644 --- a/changelog/changelog-github.tmpl +++ b/changelog/changelog-github.tmpl @@ -15,7 +15,7 @@ Details {{ range $entry := .Entries }}{{ with $entry }} * {{ .Type }} #{{ .PrimaryID }}: {{ .Title }} {{ range $par := .Paragraphs }} - {{ $par }} +{{ indent 3 $par }} {{ end }} {{ range $id := .Issues -}} {{ ` ` }}[#{{ $id }}](https://github.com/restic/restic/issues/{{ $id -}}) From 8c1d6a50c1a3d851b63dcafc8db2f01898f1ae60 Mon Sep 17 00:00:00 2001 From: Damien Clark Date: Tue, 10 Sep 2024 17:14:07 +1000 Subject: [PATCH 728/893] cache: fix race condition in cache cleanup Fix multiple restic processes executing concurrently and racing to remove obsolete snapshots. Co-authored-by: Michael Eischer --- changelog/unreleased/pull-5047 | 7 +++++++ internal/backend/cache/file.go | 4 ++++ 2 files changed, 11 insertions(+) create mode 100644 changelog/unreleased/pull-5047 diff --git a/changelog/unreleased/pull-5047 b/changelog/unreleased/pull-5047 new file mode 100644 index 00000000000..ee50c6ec752 --- /dev/null +++ b/changelog/unreleased/pull-5047 @@ -0,0 +1,7 @@ +Bugfix: Fix possible error on concurrent cache cleanup + +Fix for multiple restic processes executing concurrently and racing to +remove obsolete snapshots from the local backend cache. Restic now suppresses the `no +such file or directory` error. + +https://github.com/restic/restic/pull/5047 diff --git a/internal/backend/cache/file.go b/internal/backend/cache/file.go index 12f5f23c571..adc39d68796 100644 --- a/internal/backend/cache/file.go +++ b/internal/backend/cache/file.go @@ -211,6 +211,10 @@ func (c *Cache) list(t restic.FileType) (restic.IDSet, error) { dir := filepath.Join(c.path, cacheLayoutPaths[t]) err := filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error { if err != nil { + // ignore ErrNotExist to gracefully handle multiple processes clearing the cache + if errors.Is(err, os.ErrNotExist) { + return nil + } return errors.Wrap(err, "Walk") } From 491cc65e3a8d6cf1b880d569665162122826706c Mon Sep 17 00:00:00 2001 From: Roman Inflianskas Date: Fri, 13 Sep 2024 12:22:53 +0300 Subject: [PATCH 729/893] list: add subcommand completion --- cmd/restic/cmd_list.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index 1a4791e311d..acf964195a6 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -2,6 +2,7 @@ package main import ( "context" + "strings" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository/index" @@ -10,8 +11,11 @@ import ( "github.com/spf13/cobra" ) +var listAllowedArgs = []string{"blobs", "packs", "index", "snapshots", "keys", "locks"} +var listAllowedArgsUseString = strings.Join(listAllowedArgs, "|") + var cmdList = &cobra.Command{ - Use: "list [flags] [blobs|packs|index|snapshots|keys|locks]", + Use: "list [flags] [" + listAllowedArgsUseString + "]", Short: "List objects in the repository", Long: ` The "list" command allows listing objects in the repository based on type. @@ -30,6 +34,7 @@ Exit status is 12 if the password is incorrect. RunE: func(cmd *cobra.Command, args []string) error { return runList(cmd.Context(), globalOptions, args) }, + ValidArgs: listAllowedArgs, } func init() { From 49ccb7734c1c63d8cda7540bd7b7b7e08be3e0bc Mon Sep 17 00:00:00 2001 From: Roman Inflianskas Date: Fri, 13 Sep 2024 12:23:26 +0300 Subject: [PATCH 730/893] list: validate subcommand --- cmd/restic/cmd_list.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index acf964195a6..fcbed444062 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -35,6 +35,7 @@ Exit status is 12 if the password is incorrect. return runList(cmd.Context(), globalOptions, args) }, ValidArgs: listAllowedArgs, + Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), } func init() { From 0df2fa813513e862ac710034620269a27b832fd1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 7 Sep 2024 16:37:26 +0200 Subject: [PATCH 731/893] fs: retry preallocate on Linux if interrupted by signal --- internal/fs/preallocate_linux.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/internal/fs/preallocate_linux.go b/internal/fs/preallocate_linux.go index 30b9e46441c..7b044950796 100644 --- a/internal/fs/preallocate_linux.go +++ b/internal/fs/preallocate_linux.go @@ -2,6 +2,7 @@ package fs import ( "os" + "syscall" "golang.org/x/sys/unix" ) @@ -12,5 +13,17 @@ func PreallocateFile(wr *os.File, size int64) error { } // int fallocate(int fd, int mode, off_t offset, off_t len) // use mode = 0 to also change the file size - return unix.Fallocate(int(wr.Fd()), 0, 0, size) + return ignoringEINTR(func() error { return unix.Fallocate(int(wr.Fd()), 0, 0, size) }) +} + +// ignoringEINTR makes a function call and repeats it if it returns +// an EINTR error. +// copied from /usr/lib/go/src/internal/poll/fd_posix.go of go 1.23.1 +func ignoringEINTR(fn func() error) error { + for { + err := fn() + if err != syscall.EINTR { + return err + } + } } From 986d981bf6b3d6795b40a6eea738c80f922d921f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 14 Sep 2024 18:25:29 +0200 Subject: [PATCH 732/893] tag: fix swallowed error if repository cannot be opened --- changelog/unreleased/issue-5050 | 7 +++++++ cmd/restic/cmd_tag.go | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/issue-5050 diff --git a/changelog/unreleased/issue-5050 b/changelog/unreleased/issue-5050 new file mode 100644 index 00000000000..9604fc857dd --- /dev/null +++ b/changelog/unreleased/issue-5050 @@ -0,0 +1,7 @@ +Bugfix: Missing error if `tag` fails to lock repository + +Since restic 0.17.0, the `tag` command did not return an error if it failed to +open or lock the repository. This has been fixed. + +https://github.com/restic/restic/issues/5050 +https://github.com/restic/restic/pull/5056 diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index c7bf725e92d..8a2a83678c7 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -110,7 +110,7 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st Verbosef("create exclusive lock for repository\n") ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { - return nil + return err } defer unlock() From 3c82fe6ef54909fd5fb8773e0e557e23ba1e6189 Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Thu, 3 Oct 2024 21:17:22 +0200 Subject: [PATCH 733/893] fs: Include filename in mknod errors --- internal/restic/mknod_unix.go | 14 +++++++++++--- internal/restic/node_freebsd.go | 13 ++++++++++--- internal/restic/node_unix_test.go | 11 +++++++++++ 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/internal/restic/mknod_unix.go b/internal/restic/mknod_unix.go index 7dd6c60d0b9..b9a71bdf623 100644 --- a/internal/restic/mknod_unix.go +++ b/internal/restic/mknod_unix.go @@ -3,8 +3,16 @@ package restic -import "golang.org/x/sys/unix" +import ( + "os" -func mknod(path string, mode uint32, dev uint64) (err error) { - return unix.Mknod(path, mode, int(dev)) + "golang.org/x/sys/unix" +) + +func mknod(path string, mode uint32, dev uint64) error { + err := unix.Mknod(path, mode, int(dev)) + if err != nil { + err = &os.PathError{Op: "mknod", Path: path, Err: err} + } + return err } diff --git a/internal/restic/node_freebsd.go b/internal/restic/node_freebsd.go index 34d5b272c01..6a2d04f3648 100644 --- a/internal/restic/node_freebsd.go +++ b/internal/restic/node_freebsd.go @@ -3,14 +3,21 @@ package restic -import "syscall" +import ( + "os" + "syscall" +) func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { return nil } -func mknod(path string, mode uint32, dev uint64) (err error) { - return syscall.Mknod(path, mode, dev) +func mknod(path string, mode uint32, dev uint64) error { + err := syscall.Mknod(path, mode, dev) + if err != nil { + err = &os.PathError{Op: "mknod", Path: path, Err: err} + } + return err } func (s statT) atim() syscall.Timespec { return s.Atimespec } diff --git a/internal/restic/node_unix_test.go b/internal/restic/node_unix_test.go index 9ea7b172595..b3927de2297 100644 --- a/internal/restic/node_unix_test.go +++ b/internal/restic/node_unix_test.go @@ -7,10 +7,12 @@ import ( "os" "path/filepath" "runtime" + "strings" "syscall" "testing" "time" + "github.com/restic/restic/internal/errors" rtest "github.com/restic/restic/internal/test" ) @@ -145,3 +147,12 @@ func TestNodeFromFileInfo(t *testing.T) { }) } } + +func TestMknodError(t *testing.T) { + d := t.TempDir() + // Call mkfifo, which calls mknod, as mknod may give + // "operation not permitted" on Mac. + err := mkfifo(d, 0) + rtest.Assert(t, errors.Is(err, os.ErrExist), "want ErrExist, got %q", err) + rtest.Assert(t, strings.Contains(err.Error(), d), "filename not in %q", err) +} From fc1fc00aa4f88810ca936bf6152c35a83e6ede79 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 14 Sep 2024 18:59:59 +0200 Subject: [PATCH 734/893] backup: exclude irregular files from backup restic cannot backup irregular files as those don't behave like normal files. Thus skip them with an error. --- internal/archiver/archiver.go | 3 +- internal/archiver/archiver_test.go | 43 ++++++++++++++++++++++ internal/archiver/archiver_unix_test.go | 10 +++++ internal/archiver/archiver_windows_test.go | 8 ++++ 4 files changed, 63 insertions(+), 1 deletion(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index e7c346d3a43..8393208164e 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -262,7 +262,8 @@ func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, } // overwrite name to match that within the snapshot node.Name = path.Base(snPath) - if err != nil { + // do not filter error for nodes of irregular or invalid type + if node.Type != "irregular" && node.Type != "" && err != nil { err = fmt.Errorf("incomplete metadata for %v: %w", filename, err) return node, arch.error(filename, err) } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index c54f9ea3311..5ecfd4bc4f2 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -2423,4 +2423,47 @@ func TestMetadataBackupErrorFiltering(t *testing.T) { rtest.Assert(t, node != nil, "node is missing") rtest.Assert(t, err == replacementErr, "expected %v got %v", replacementErr, err) rtest.Assert(t, filteredErr != nil, "missing inner error") + + // check that errors from reading irregular file are not filtered + filteredErr = nil + node, err = arch.nodeFromFileInfo("file", filename, wrapIrregularFileInfo(fi), false) + rtest.Assert(t, node != nil, "node is missing") + rtest.Assert(t, filteredErr == nil, "error for irregular node should not have been filtered") + rtest.Assert(t, strings.Contains(err.Error(), "irregular"), "unexpected error %q does not warn about irregular file mode", err) +} + +func TestIrregularFile(t *testing.T) { + files := TestDir{ + "testfile": TestFile{ + Content: "foo bar test file", + }, + } + tempdir, repo := prepareTempdirRepoSrc(t, files) + + back := rtest.Chdir(t, tempdir) + defer back() + + tempfile := filepath.Join(tempdir, "testfile") + fi := lstat(t, "testfile") + + statfs := &StatFS{ + FS: fs.Local{}, + OverrideLstat: map[string]os.FileInfo{ + tempfile: wrapIrregularFileInfo(fi), + }, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + arch := New(repo, fs.Track{FS: statfs}, Options{}) + _, excluded, err := arch.save(ctx, "/", tempfile, nil) + if err == nil { + t.Fatalf("Save() should have failed") + } + rtest.Assert(t, strings.Contains(err.Error(), "irregular"), "unexpected error %q does not warn about irregular file mode", err) + + if excluded { + t.Errorf("Save() excluded the node, that's unexpected") + } } diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 4a380dff8d2..bc64a1047ae 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -46,6 +46,16 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { return res } +// wrapIrregularFileInfo returns a new os.FileInfo with the mode changed to irregular file +func wrapIrregularFileInfo(fi os.FileInfo) os.FileInfo { + // wrap the os.FileInfo so we can return a modified stat_t + return wrappedFileInfo{ + FileInfo: fi, + sys: fi.Sys().(*syscall.Stat_t), + mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular, + } +} + func statAndSnapshot(t *testing.T, repo archiverRepo, name string) (*restic.Node, *restic.Node) { fi := lstat(t, name) want, err := restic.NodeFromFileInfo(name, fi, false) diff --git a/internal/archiver/archiver_windows_test.go b/internal/archiver/archiver_windows_test.go index e1195030f3d..ac8a67f2bd3 100644 --- a/internal/archiver/archiver_windows_test.go +++ b/internal/archiver/archiver_windows_test.go @@ -26,3 +26,11 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { return res } + +// wrapIrregularFileInfo returns a new os.FileInfo with the mode changed to irregular file +func wrapIrregularFileInfo(fi os.FileInfo) os.FileInfo { + return wrappedFileInfo{ + FileInfo: fi, + mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular, + } +} From fe269c752afc29eff7a555d57f975ffe46aee213 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 14 Sep 2024 19:09:58 +0200 Subject: [PATCH 735/893] repair snapshots: remove irregular files --- cmd/restic/cmd_repair_snapshots.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index 385854312ad..01281cf3eb2 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -92,6 +92,10 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt // - files whose contents are not fully available (-> file will be modified) rewriter := walker.NewTreeRewriter(walker.RewriteOpts{ RewriteNode: func(node *restic.Node, path string) *restic.Node { + if node.Type == "irregular" || node.Type == "" { + Verbosef(" file %q: removed node with invalid type %q\n", path, node.Type) + return nil + } if node.Type != "file" { return node } From e3a022f9b54f81b75fe3afd9d68be6e93bd9ca15 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 16 Oct 2024 20:46:58 +0200 Subject: [PATCH 736/893] add irregular files bug changelog --- changelog/unreleased/pull-5057 | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 changelog/unreleased/pull-5057 diff --git a/changelog/unreleased/pull-5057 b/changelog/unreleased/pull-5057 new file mode 100644 index 00000000000..c34436044c6 --- /dev/null +++ b/changelog/unreleased/pull-5057 @@ -0,0 +1,21 @@ +Bugfix: Do not include irregular files in backup + +Since restic 0.17.1, files with type `irregular` could incorrectly be included +in snapshots. This is most likely to occur when backing up special file types +on Windows that cannot be handled by restic. + +This has been fixed. + +When running the `check` command this bug resulted in an error like the +following: + +``` + tree 12345678[...]: node "example.zip" with invalid type "irregular" +``` + +Repairing the affected snapshots requires upgrading to restic 0.17.2 and then +manually running `restic repair snapshots --forget`. This will remove the +`irregular` files from the snapshots. + +https://github.com/restic/restic/pull/5057 +https://forum.restic.net/t/errors-found-by-check-1-invalid-type-irregular-2-ciphertext-verification-failed/8447/2 From d6e76a22a8cfd151238f191a42baa2696c3b7359 Mon Sep 17 00:00:00 2001 From: Connor Findlay Date: Thu, 17 Oct 2024 20:08:11 +1300 Subject: [PATCH 737/893] backend/azure: Handle Container SAS/SAT Ignore AuthorizationFailure caused by using a container level SAS/SAT token when calling GetProperties during the Create() call. This is because the GetProperties call expects an Account Level token, and the container level token simply lacks the appropriate permissions. Supressing the Authorization Failure is OK, because if the token is actually invalid, this is caught elsewhere when we try to actually use the token to do work. --- internal/backend/azure/azure.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 737cf0e1479..76c8d755a59 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -160,6 +160,12 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, er if err != nil { return nil, errors.Wrap(err, "container.Create") } + } else if err != nil && bloberror.HasCode(err, bloberror.AuthorizationFailure) { + // We ignore this Auth. Failure, as the failure is related to the type + // of SAS/SAT, not an actual real failure. If the token is invalid, we + // fail later on anyway. + // For details see Issue #4004. + debug.Log("Ignoring AuthorizationFailure when calling GetProperties") } else if err != nil { return be, errors.Wrap(err, "container.GetProperties") } From 048c3bb240d810ffe398ad25d224b8abccd4d94b Mon Sep 17 00:00:00 2001 From: Connor Findlay Date: Thu, 17 Oct 2024 20:23:32 +1300 Subject: [PATCH 738/893] changelog: Add changes in issue-4004 Add changelog entry in the 'unreleased' sub-folder for changes introduced when fixing issue #4004. --- changelog/unreleased/issue-4004 | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 changelog/unreleased/issue-4004 diff --git a/changelog/unreleased/issue-4004 b/changelog/unreleased/issue-4004 new file mode 100644 index 00000000000..ca23af26f1f --- /dev/null +++ b/changelog/unreleased/issue-4004 @@ -0,0 +1,12 @@ +Bugfix: Allow use of container level SAS/SAT tokens with Azure backend + +When using a SAS/SAT token for authentication with Azure, restic was expecting +the provided token to be generated at the account level, granting permissions +to the storage account and all its containers. This caused an error that did +not allow tokens that were generated at the container level to be used to +initalize a repository. +Restic now allows SAS/SAT tokens that were generated at the account or +container level to be used to initalize a repository. + +https://github.com/restic/restic/issues/4004 +https://github.com/restic/restic/pull/5093 From 9553d873ff59f3c3cd4e3d4a6ac0f79d33527c2a Mon Sep 17 00:00:00 2001 From: Connor Findlay Date: Thu, 17 Oct 2024 21:26:52 +1300 Subject: [PATCH 739/893] backend/azure: Add tests for both token types Add two new test cases, TestBackendAzureAccountToken and TestBackendAzureContainerToken, that ensure that the authorization using both types of token works. This introduces two new environment variables, RESTIC_TEST_AZURE_ACCOUNT_SAS and RESTIC_TEST_AZURE_CONTAINER_SAS, that contain the tokens to use when testing restic. If an environment variable is missing, the related test is skipped. --- internal/backend/azure/azure_test.go | 85 ++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/internal/backend/azure/azure_test.go b/internal/backend/azure/azure_test.go index 7df27d325a3..adafb6b030a 100644 --- a/internal/backend/azure/azure_test.go +++ b/internal/backend/azure/azure_test.go @@ -80,6 +80,91 @@ func BenchmarkBackendAzure(t *testing.B) { newAzureTestSuite().RunBenchmarks(t) } +// TestBackendAzureAccountToken tests that a Storage Account SAS/SAT token can authorize. +// This test ensures that restic can use a token that was generated using the storage +// account keys can be used to authorize the azure connection. +// Requires the RESTIC_TEST_AZURE_ACCOUNT_NAME, RESTIC_TEST_AZURE_REPOSITORY, and the +// RESTIC_TEST_AZURE_ACCOUNT_SAS environment variables to be set, otherwise this test +// will be skipped. +func TestBackendAzureAccountToken(t *testing.T) { + vars := []string{ + "RESTIC_TEST_AZURE_ACCOUNT_NAME", + "RESTIC_TEST_AZURE_REPOSITORY", + "RESTIC_TEST_AZURE_ACCOUNT_SAS", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("set %v to test SAS/SAT Token Authentication", v) + return + } + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY")) + if err != nil { + t.Fatal(err) + } + + cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME") + cfg.AccountSAS = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_SAS")) + + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatal(err) + } + + _, err = azure.Create(ctx, *cfg, tr) + if err != nil { + t.Fatal(err) + } +} + +// TestBackendAzureContainerToken tests that a container SAS/SAT token can authorize. +// This test ensures that restic can use a token that was generated using a user +// delegation key against the container we are storing data in can be used to +// authorize the azure connection. +// Requires the RESTIC_TEST_AZURE_ACCOUNT_NAME, RESTIC_TEST_AZURE_REPOSITORY, and the +// RESTIC_TEST_AZURE_CONTAINER_SAS environment variables to be set, otherwise this test +// will be skipped. +func TestBackendAzureContainerToken(t *testing.T) { + vars := []string{ + "RESTIC_TEST_AZURE_ACCOUNT_NAME", + "RESTIC_TEST_AZURE_REPOSITORY", + "RESTIC_TEST_AZURE_CONTAINER_SAS", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("set %v to test SAS/SAT Token Authentication", v) + return + } + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY")) + if err != nil { + t.Fatal(err) + } + + cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME") + cfg.AccountSAS = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_CONTAINER_SAS")) + + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatal(err) + } + + _, err = azure.Create(ctx, *cfg, tr) + if err != nil { + t.Fatal(err) + } +} + func TestUploadLargeFile(t *testing.T) { if os.Getenv("RESTIC_AZURE_TEST_LARGE_UPLOAD") == "" { t.Skip("set RESTIC_AZURE_TEST_LARGE_UPLOAD=1 to test large uploads") From e1faf7b18cedbe9fb1832017a064a1649fdea7f3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 22:08:10 +0200 Subject: [PATCH 740/893] backup: work around file deletion error in test --- cmd/restic/cmd_backup_integration_test.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index cc6a2ca2233..5926fdd5400 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -8,6 +8,7 @@ import ( "path/filepath" "runtime" "testing" + "time" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" @@ -122,7 +123,17 @@ func (f *vssDeleteOriginalFS) Lstat(name string) (os.FileInfo, error) { // call Lstat to trigger snapshot creation _, _ = f.FS.Lstat(name) // nuke testdata - if err := os.RemoveAll(f.testdata); err != nil { + var err error + for i := 0; i < 3; i++ { + // The CI sometimes runs into "The process cannot access the file because it is being used by another process" errors + // thus try a few times to remove the data + err = os.RemoveAll(f.testdata) + if err == nil { + break + } + time.Sleep(10 * time.Millisecond) + } + if err != nil { return nil, err } f.hasRemoved = true From 4df2e33568426da1fe5991d4bb14949146a9a8d1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 22:26:18 +0200 Subject: [PATCH 741/893] archiver: properly create node for vss backups Previously, NodeFromFileInfo used the original file path to create the node, which also meant that extended metadata was read from there instead of within the vss snapshot. This change is a temporary solution for restic 0.17.2 and will be replaced with a clean fix in restic 0.18.0. --- internal/archiver/archiver.go | 3 ++- internal/fs/fs_local.go | 6 ++++++ internal/fs/fs_local_vss.go | 6 ++++++ internal/fs/fs_reader.go | 6 ++++++ internal/fs/interface.go | 1 + 5 files changed, 21 insertions(+), 1 deletion(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 8393208164e..03b3b99864c 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -248,7 +248,8 @@ func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s I // nodeFromFileInfo returns the restic node from an os.FileInfo. func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - node, err := restic.NodeFromFileInfo(filename, fi, ignoreXattrListError) + mappedFilename := arch.FS.MapFilename(filename) + node, err := restic.NodeFromFileInfo(mappedFilename, fi, ignoreXattrListError) if !arch.WithAtime { node.AccessTime = node.ModTime } diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go index 48c40dc9098..06dbae9a06b 100644 --- a/internal/fs/fs_local.go +++ b/internal/fs/fs_local.go @@ -18,6 +18,12 @@ func (fs Local) VolumeName(path string) string { return filepath.VolumeName(path) } +// MapFilename is a temporary hack to prepare a filename for usage with +// NodeFromFileInfo. This is only relevant for LocalVss. +func (fs Local) MapFilename(filename string) string { + return filename +} + // Open opens a file for reading. func (fs Local) Open(name string) (File, error) { f, err := os.Open(fixpath(name)) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 718dfc46d9f..db6c95155e7 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -145,6 +145,12 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { return os.Lstat(fs.snapshotPath(name)) } +// MapFilename is a temporary hack to prepare a filename for usage with +// NodeFromFileInfo. This is only relevant for LocalVss. +func (fs *LocalVss) MapFilename(filename string) string { + return fs.snapshotPath(filename) +} + // isMountPointIncluded is true if given mountpoint included by user. func (fs *LocalVss) isMountPointIncluded(mountPoint string) bool { if fs.excludeVolumes == nil { diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index 47af74245be..a39b4dad299 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -39,6 +39,12 @@ func (fs *Reader) VolumeName(_ string) string { return "" } +// MapFilename is a temporary hack to prepare a filename for usage with +// NodeFromFileInfo. This is only relevant for LocalVss. +func (fs *Reader) MapFilename(filename string) string { + return filename +} + // Open opens a file for reading. func (fs *Reader) Open(name string) (f File, err error) { switch name { diff --git a/internal/fs/interface.go b/internal/fs/interface.go index b26c56944bc..0fd84715db5 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -11,6 +11,7 @@ type FS interface { OpenFile(name string, flag int, perm os.FileMode) (File, error) Stat(name string) (os.FileInfo, error) Lstat(name string) (os.FileInfo, error) + MapFilename(filename string) string Join(elem ...string) string Separator() string From 0c711f5605e42c12db5812bb47d51e1d7b57974e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 21:41:02 +0200 Subject: [PATCH 742/893] archiver: use correct filepath in fileSaver for vss When using the VSS FS, then `f.Name()` contained the filename in the snapshot. This caused a double mapping when calling NodeFromFileInfo. --- internal/archiver/file_saver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index d10334301ac..70666506d08 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -156,7 +156,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat debug.Log("%v", snPath) - node, err := s.NodeFromFileInfo(snPath, f.Name(), fi, false) + node, err := s.NodeFromFileInfo(snPath, target, fi, false) if err != nil { _ = f.Close() completeError(err) From a7b13bd603eece2f8509dfb4fcfb74b9af398832 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 21:43:23 +0200 Subject: [PATCH 743/893] fs: remove file.Name() from interface The only user was archiver.fileSaver. --- internal/fs/fs_reader.go | 6 +----- internal/fs/interface.go | 1 - 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index a39b4dad299..57864c87b35 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -229,7 +229,7 @@ func (r *readerFile) Close() error { var _ File = &readerFile{} // fakeFile implements all File methods, but only returns errors for anything -// except Stat() and Name(). +// except Stat() type fakeFile struct { name string os.FileInfo @@ -266,10 +266,6 @@ func (f fakeFile) Stat() (os.FileInfo, error) { return f.FileInfo, nil } -func (f fakeFile) Name() string { - return f.name -} - // fakeDir implements Readdirnames and Readdir, everything else is delegated to fakeFile. type fakeDir struct { entries []os.FileInfo diff --git a/internal/fs/interface.go b/internal/fs/interface.go index 0fd84715db5..147773e2dde 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -34,5 +34,4 @@ type File interface { Readdir(int) ([]os.FileInfo, error) Seek(int64, int) (int64, error) Stat() (os.FileInfo, error) - Name() string } From ec43594003ec4c04a25847cea399fe54386bcc5c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 22:36:03 +0200 Subject: [PATCH 744/893] add vss metadata changelog --- changelog/unreleased/issue-5063 | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 changelog/unreleased/issue-5063 diff --git a/changelog/unreleased/issue-5063 b/changelog/unreleased/issue-5063 new file mode 100644 index 00000000000..95048ec58a4 --- /dev/null +++ b/changelog/unreleased/issue-5063 @@ -0,0 +1,10 @@ +Bugfix: Correctly `backup` extended metadata when using VSS on Windows + +On Windows, when creating a backup using the `--use-fs-snapshot` option, +then the extended metadata was not read from the filesystem snapshot. This +could result in errors when files have been removed in the meantime. + +This issue has been resolved. + +https://github.com/restic/restic/issues/5063 +https://github.com/restic/restic/pull/5097 From 1f5791222a3ab3833b541e93f069170146340141 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 20:05:32 +0200 Subject: [PATCH 745/893] backup: test that vss backups work if underlying data was removed --- cmd/restic/cmd_backup.go | 5 +++ cmd/restic/cmd_backup_integration_test.go | 47 +++++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 562108a3370..c7c0bcc5032 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -95,6 +95,7 @@ type BackupOptions struct { } var backupOptions BackupOptions +var backupFSTestHook func(fs fs.FS) fs.FS // ErrInvalidSourceData is used to report an incomplete backup var ErrInvalidSourceData = errors.New("at least one source file could not be read") @@ -598,6 +599,10 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter targets = []string{filename} } + if backupFSTestHook != nil { + targetFS = backupFSTestHook(targetFS) + } + wg, wgCtx := errgroup.WithContext(ctx) cancelCtx, cancel := context.WithCancel(wgCtx) defer cancel() diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 5e00b84b0e5..cc6a2ca2233 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -111,6 +111,53 @@ func TestBackupWithRelativePath(t *testing.T) { rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "second snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID) } +type vssDeleteOriginalFS struct { + fs.FS + testdata string + hasRemoved bool +} + +func (f *vssDeleteOriginalFS) Lstat(name string) (os.FileInfo, error) { + if !f.hasRemoved { + // call Lstat to trigger snapshot creation + _, _ = f.FS.Lstat(name) + // nuke testdata + if err := os.RemoveAll(f.testdata); err != nil { + return nil, err + } + f.hasRemoved = true + } + return f.FS.Lstat(name) +} + +func TestBackupVSS(t *testing.T) { + if runtime.GOOS != "windows" || fs.HasSufficientPrivilegesForVSS() != nil { + t.Skip("vss fs test can only be run on windows with admin privileges") + } + + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + opts := BackupOptions{UseFsSnapshot: true} + + var testFS *vssDeleteOriginalFS + backupFSTestHook = func(fs fs.FS) fs.FS { + testFS = &vssDeleteOriginalFS{ + FS: fs, + testdata: env.testdata, + } + return testFS + } + defer func() { + backupFSTestHook = nil + }() + + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testListSnapshots(t, env.gopts, 1) + rtest.Equals(t, true, testFS.hasRemoved, "testdata was not removed") +} + func TestBackupParentSelection(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() From 841f8bfef025dff55443664ea744a740bf4a14df Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 21:18:22 +0200 Subject: [PATCH 746/893] redirect test log output to t.Log() --- cmd/restic/integration_helpers_test.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index 978deab3dda..8ae3bb78a61 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "runtime" + "strings" "sync" "testing" @@ -168,6 +169,16 @@ type testEnvironment struct { gopts GlobalOptions } +type logOutputter struct { + t testing.TB +} + +func (l *logOutputter) Write(p []byte) (n int, err error) { + l.t.Helper() + l.t.Log(strings.TrimSuffix(string(p), "\n")) + return len(p), nil +} + // withTestEnvironment creates a test environment and returns a cleanup // function which removes it. func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) { @@ -200,8 +211,11 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) { Quiet: true, CacheDir: env.cache, password: rtest.TestPassword, - stdout: os.Stdout, - stderr: os.Stderr, + // stdout and stderr are written to by Warnf etc. That is the written data + // usually consists of one or multiple lines and therefore can be handled well + // by t.Log. + stdout: &logOutputter{t}, + stderr: &logOutputter{t}, extended: make(options.Options), // replace this hook with "nil" if listing a filetype more than once is necessary From 46dce1f4faad5f5801b32a0864fc6a3c140f8b60 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 22:08:10 +0200 Subject: [PATCH 747/893] backup: work around file deletion error in test --- cmd/restic/cmd_backup_integration_test.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index cc6a2ca2233..5926fdd5400 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -8,6 +8,7 @@ import ( "path/filepath" "runtime" "testing" + "time" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" @@ -122,7 +123,17 @@ func (f *vssDeleteOriginalFS) Lstat(name string) (os.FileInfo, error) { // call Lstat to trigger snapshot creation _, _ = f.FS.Lstat(name) // nuke testdata - if err := os.RemoveAll(f.testdata); err != nil { + var err error + for i := 0; i < 3; i++ { + // The CI sometimes runs into "The process cannot access the file because it is being used by another process" errors + // thus try a few times to remove the data + err = os.RemoveAll(f.testdata) + if err == nil { + break + } + time.Sleep(10 * time.Millisecond) + } + if err != nil { return nil, err } f.hasRemoved = true From ee9a5cdf70fdc787ae764abec74efadf59fc318d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Oct 2024 22:36:03 +0200 Subject: [PATCH 748/893] add vss metadata changelog --- changelog/unreleased/issue-5063 | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 changelog/unreleased/issue-5063 diff --git a/changelog/unreleased/issue-5063 b/changelog/unreleased/issue-5063 new file mode 100644 index 00000000000..65aa379e4d4 --- /dev/null +++ b/changelog/unreleased/issue-5063 @@ -0,0 +1,11 @@ +Bugfix: Correctly `backup` extended metadata when using VSS on Windows + +On Windows, when creating a backup using the `--use-fs-snapshot` option, +then the extended metadata was not read from the filesystem snapshot. This +could result in errors when files have been removed in the meantime. + +This issue has been resolved. + +https://github.com/restic/restic/issues/5063 +https://github.com/restic/restic/pull/5097 +https://github.com/restic/restic/pull/5099 From f967a33ccc34ce20fd42e0ef3ca4086337a11e09 Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:06:26 +0200 Subject: [PATCH 749/893] fs: Use AT_FDCWD in Linux nodeRestoreSymlinkTimestamps There's no need to open the containing directory. This is exactly what syscall.UtimesNano does, except for the AT_SYMLINK_NOFOLLOW flag. --- internal/fs/node_linux.go | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/internal/fs/node_linux.go b/internal/fs/node_linux.go index 91ef4f90747..55b4b05d7ae 100644 --- a/internal/fs/node_linux.go +++ b/internal/fs/node_linux.go @@ -1,33 +1,18 @@ package fs import ( - "os" - "path/filepath" "syscall" - "golang.org/x/sys/unix" - "github.com/restic/restic/internal/errors" + "golang.org/x/sys/unix" ) func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - dir, err := os.Open(fixpath(filepath.Dir(path))) - if err != nil { - return errors.WithStack(err) - } - times := []unix.Timespec{ {Sec: utimes[0].Sec, Nsec: utimes[0].Nsec}, {Sec: utimes[1].Sec, Nsec: utimes[1].Nsec}, } - err = unix.UtimesNanoAt(int(dir.Fd()), filepath.Base(path), times, unix.AT_SYMLINK_NOFOLLOW) - - if err != nil { - // ignore subsequent errors - _ = dir.Close() - return errors.Wrap(err, "UtimesNanoAt") - } - - return dir.Close() + err := unix.UtimesNanoAt(unix.AT_FDCWD, path, times, unix.AT_SYMLINK_NOFOLLOW) + return errors.Wrap(err, "UtimesNanoAt") } From 8f20d5dcd520ca1dae777c1a6831e752daf6cdc1 Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Fri, 4 Oct 2024 10:06:18 +0200 Subject: [PATCH 750/893] fs: Refactor UtimesNano replacements Previously, nodeRestoreTimestamps would do something like if node.Type == restic.NodeTypeSymlink { return nodeRestoreSymlinkTimestamps(...) } return syscall.UtimesNano(...) where nodeRestoreSymlinkTimestamps was either a no-op or a reimplementation of syscall.UtimesNano that handles symlinks, with some repeated converting between timestamp types. The Linux implementation was a bit clumsy, requiring three syscalls to set the timestamps. In this new setup, there is a function utimesNano that has three implementations: * on Linux, it's a modified syscall.UtimesNano that uses AT_SYMLINK_NOFOLLOW and AT_FDCWD so it can handle any type in a single call; * on other Unix platforms, it just calls the syscall function after skipping symlinks; * on Windows, it's the modified UtimesNano that was previously called nodeRestoreSymlinkTimestamps, except with different arguments. --- internal/fs/node.go | 15 ++++----------- internal/fs/node_aix.go | 10 +--------- internal/fs/node_darwin.go | 7 ------- internal/fs/node_linux.go | 15 ++++++--------- internal/fs/node_linux_test.go | 19 +++++++++++++++++++ internal/fs/node_netbsd.go | 10 +--------- internal/fs/node_openbsd.go | 10 +--------- internal/fs/node_solaris.go | 7 ------- internal/fs/node_unix_notlinux.go | 21 +++++++++++++++++++++ internal/fs/node_windows.go | 8 ++++---- 10 files changed, 57 insertions(+), 65 deletions(-) delete mode 100644 internal/fs/node_darwin.go create mode 100644 internal/fs/node_linux_test.go delete mode 100644 internal/fs/node_solaris.go create mode 100644 internal/fs/node_unix_notlinux.go diff --git a/internal/fs/node.go b/internal/fs/node.go index 4be48e06422..d3619432212 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -292,18 +292,11 @@ func nodeRestoreMetadata(node *restic.Node, path string, warn func(msg string)) } func nodeRestoreTimestamps(node *restic.Node, path string) error { - var utimes = [...]syscall.Timespec{ - syscall.NsecToTimespec(node.AccessTime.UnixNano()), - syscall.NsecToTimespec(node.ModTime.UnixNano()), - } - - if node.Type == restic.NodeTypeSymlink { - return nodeRestoreSymlinkTimestamps(path, utimes) - } + atime := node.AccessTime.UnixNano() + mtime := node.ModTime.UnixNano() - if err := syscall.UtimesNano(fixpath(path), utimes[:]); err != nil { - return errors.Wrap(err, "UtimesNano") + if err := utimesNano(fixpath(path), atime, mtime, node.Type); err != nil { + return &os.PathError{Op: "UtimesNano", Path: path, Err: err} } - return nil } diff --git a/internal/fs/node_aix.go b/internal/fs/node_aix.go index 463ed1c33e1..fd185724f3b 100644 --- a/internal/fs/node_aix.go +++ b/internal/fs/node_aix.go @@ -3,15 +3,7 @@ package fs -import ( - "syscall" - - "github.com/restic/restic/internal/restic" -) - -func nodeRestoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { - return nil -} +import "github.com/restic/restic/internal/restic" // nodeRestoreExtendedAttributes is a no-op on AIX. func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { diff --git a/internal/fs/node_darwin.go b/internal/fs/node_darwin.go deleted file mode 100644 index f4c843498ce..00000000000 --- a/internal/fs/node_darwin.go +++ /dev/null @@ -1,7 +0,0 @@ -package fs - -import "syscall" - -func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - return nil -} diff --git a/internal/fs/node_linux.go b/internal/fs/node_linux.go index 55b4b05d7ae..ee13e0a9ec0 100644 --- a/internal/fs/node_linux.go +++ b/internal/fs/node_linux.go @@ -1,18 +1,15 @@ package fs import ( - "syscall" - - "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" "golang.org/x/sys/unix" ) -func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { +// utimesNano is like syscall.UtimesNano, except that it does not follow symlinks. +func utimesNano(path string, atime, mtime int64, _ restic.NodeType) error { times := []unix.Timespec{ - {Sec: utimes[0].Sec, Nsec: utimes[0].Nsec}, - {Sec: utimes[1].Sec, Nsec: utimes[1].Nsec}, + unix.NsecToTimespec(atime), + unix.NsecToTimespec(mtime), } - - err := unix.UtimesNanoAt(unix.AT_FDCWD, path, times, unix.AT_SYMLINK_NOFOLLOW) - return errors.Wrap(err, "UtimesNanoAt") + return unix.UtimesNanoAt(unix.AT_FDCWD, path, times, unix.AT_SYMLINK_NOFOLLOW) } diff --git a/internal/fs/node_linux_test.go b/internal/fs/node_linux_test.go new file mode 100644 index 00000000000..e9f1cf86083 --- /dev/null +++ b/internal/fs/node_linux_test.go @@ -0,0 +1,19 @@ +package fs + +import ( + "io/fs" + "strings" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestRestoreSymlinkTimestampsError(t *testing.T) { + d := t.TempDir() + node := restic.Node{Type: restic.NodeTypeSymlink} + err := nodeRestoreTimestamps(&node, d+"/nosuchfile") + rtest.Assert(t, errors.Is(err, fs.ErrNotExist), "want ErrNotExist, got %q", err) + rtest.Assert(t, strings.Contains(err.Error(), d), "filename not in %q", err) +} diff --git a/internal/fs/node_netbsd.go b/internal/fs/node_netbsd.go index 182050da072..d295bf57948 100644 --- a/internal/fs/node_netbsd.go +++ b/internal/fs/node_netbsd.go @@ -1,14 +1,6 @@ package fs -import ( - "syscall" - - "github.com/restic/restic/internal/restic" -) - -func nodeRestoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { - return nil -} +import "github.com/restic/restic/internal/restic" // nodeRestoreExtendedAttributes is a no-op on netbsd. func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { diff --git a/internal/fs/node_openbsd.go b/internal/fs/node_openbsd.go index 2a7a410dd75..712b144b46a 100644 --- a/internal/fs/node_openbsd.go +++ b/internal/fs/node_openbsd.go @@ -1,14 +1,6 @@ package fs -import ( - "syscall" - - "github.com/restic/restic/internal/restic" -) - -func nodeRestoreSymlinkTimestamps(_ string, _ [2]syscall.Timespec) error { - return nil -} +import "github.com/restic/restic/internal/restic" // nodeRestoreExtendedAttributes is a no-op on openbsd. func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { diff --git a/internal/fs/node_solaris.go b/internal/fs/node_solaris.go deleted file mode 100644 index f4c843498ce..00000000000 --- a/internal/fs/node_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package fs - -import "syscall" - -func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { - return nil -} diff --git a/internal/fs/node_unix_notlinux.go b/internal/fs/node_unix_notlinux.go new file mode 100644 index 00000000000..f8846638c32 --- /dev/null +++ b/internal/fs/node_unix_notlinux.go @@ -0,0 +1,21 @@ +//go:build !linux && unix + +package fs + +import ( + "syscall" + + "github.com/restic/restic/internal/restic" +) + +// utimesNano is like syscall.UtimesNano, except that it skips symlinks. +func utimesNano(path string, atime, mtime int64, typ restic.NodeType) error { + if typ == restic.NodeTypeSymlink { + return nil + } + + return syscall.UtimesNano(path, []syscall.Timespec{ + syscall.NsecToTimespec(atime), + syscall.NsecToTimespec(mtime), + }) +} diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index 6f473375cfe..9ea813eb180 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -42,8 +42,8 @@ func lchown(_ string, _ int, _ int) (err error) { return nil } -// restoreSymlinkTimestamps restores timestamps for symlinks -func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { +// utimesNano is like syscall.UtimesNano, except that it sets FILE_FLAG_OPEN_REPARSE_POINT. +func utimesNano(path string, atime, mtime int64, _ restic.NodeType) error { // tweaked version of UtimesNano from go/src/syscall/syscall_windows.go pathp, e := syscall.UTF16PtrFromString(fixpath(path)) if e != nil { @@ -63,8 +63,8 @@ func nodeRestoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error } }() - a := syscall.NsecToFiletime(syscall.TimespecToNsec(utimes[0])) - w := syscall.NsecToFiletime(syscall.TimespecToNsec(utimes[1])) + a := syscall.NsecToFiletime(atime) + w := syscall.NsecToFiletime(mtime) return syscall.SetFileTime(h, nil, &a, &w) } From bff3341d1025630d1f1523e30c91e6203dc42f3f Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 20 Oct 2024 15:27:21 +0530 Subject: [PATCH 751/893] azure: add support for hot, cool, or cool access tiers --- changelog/unreleased/issue-4521 | 21 ++++++++++++++ doc/030_preparing_a_new_repo.rst | 4 +++ internal/backend/azure/azure.go | 47 ++++++++++++++++++++++++++++---- internal/backend/azure/config.go | 3 +- 4 files changed, 68 insertions(+), 7 deletions(-) create mode 100644 changelog/unreleased/issue-4521 diff --git a/changelog/unreleased/issue-4521 b/changelog/unreleased/issue-4521 new file mode 100644 index 00000000000..709741d1193 --- /dev/null +++ b/changelog/unreleased/issue-4521 @@ -0,0 +1,21 @@ +Enhancement: Add config option to set Microsoft Blob Storage Access Tier + +The `azure.access-tier` option can be passed to Restic (using `-o`) to +specify the access tier for Microsoft Blob Storage objects created by Restic. + +The access tier is passed as-is to Microsoft Blob Storage, so it needs to be +understood by the API. The allowed values are `Hot`, `Cool`, or `Cold`. + +If unspecified, the default is inferred from the default configured on the +storage account. + +You can mix access tiers in the same container, and the setting isn't +stored in the restic repository, so be sure to specify it with each +command that writes to Microsoft Blob Storage. + +There is no official `Archive` storage support in restic, use this option at +your own risk. To restore any data, it is still necessary to manually warm up +the required data in the `Archive` tier. + +https://github.com/restic/restic/issues/4521 +https://github.com/restic/restic/pull/5046 \ No newline at end of file diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 0b35d1a1ee6..720bfc11d53 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -568,6 +568,10 @@ The number of concurrent connections to the Azure Blob Storage service can be se ``-o azure.connections=10`` switch. By default, at most five parallel connections are established. +The access tier of the blobs uploaded to the Azure Blob Storage service can be set with the +``-o azure.access-tier=Cool`` switch. The allowed values are ``Hot``, ``Cool`` or ``Cold``. +If unspecified, the default is inferred from the default configured on the storage account. + Google Cloud Storage ******************** diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 8f5ee9f0096..c7aa80185f1 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -37,6 +37,8 @@ type Backend struct { prefix string listMaxItems int layout.Layout + + accessTier blob.AccessTier } const saveLargeSize = 256 * 1024 * 1024 @@ -124,17 +126,32 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { } } + var accessTier blob.AccessTier + // if the access tier is not supported, then we will not set the access tier; during the upload process, + // the value will be inferred from the default configured on the storage account. + for _, tier := range supportedAccessTiers() { + if strings.EqualFold(string(tier), cfg.AccessTier) { + accessTier = tier + break + } + } + be := &Backend{ container: client, cfg: cfg, connections: cfg.Connections, Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, + accessTier: accessTier, } return be, nil } +func supportedAccessTiers() []blob.AccessTier { + return []blob.AccessTier{blob.AccessTierHot, blob.AccessTierCool, blob.AccessTierCold, blob.AccessTierArchive} +} + // Open opens the Azure backend at specified container. func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) { return open(cfg, rt) @@ -213,25 +230,39 @@ func (be *Backend) Path() string { return be.prefix } +// useAccessTier determines whether to apply the configured access tier to a given file. +// For archive access tier, only data files are stored using that class; metadata +// must remain instantly accessible. +func (be *Backend) useAccessTier(h backend.Handle) bool { + notArchiveClass := !strings.EqualFold(be.cfg.AccessTier, "archive") + isDataFile := h.Type == backend.PackFile && !h.IsMetadata + return isDataFile || notArchiveClass +} + // Save stores data in the backend at the handle. func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { objName := be.Filename(h) debug.Log("InsertObject(%v, %v)", be.cfg.AccountName, objName) + var accessTier blob.AccessTier + if be.useAccessTier(h) { + accessTier = be.accessTier + } + var err error if rd.Length() < saveLargeSize { // if it's smaller than 256miB, then just create the file directly from the reader - err = be.saveSmall(ctx, objName, rd) + err = be.saveSmall(ctx, objName, rd, accessTier) } else { // otherwise use the more complicated method - err = be.saveLarge(ctx, objName, rd) + err = be.saveLarge(ctx, objName, rd, accessTier) } return err } -func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader) error { +func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error { blockBlobClient := be.container.NewBlockBlobClient(objName) // upload it as a new "block", use the base64 hash for the ID @@ -252,11 +283,13 @@ func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.Rew } blocks := []string{id} - _, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{}) + _, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{ + Tier: &accessTier, + }) return errors.Wrap(err, "CommitBlockList") } -func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader) error { +func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error { blockBlobClient := be.container.NewBlockBlobClient(objName) buf := make([]byte, 100*1024*1024) @@ -303,7 +336,9 @@ func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.Rew return errors.Errorf("wrote %d bytes instead of the expected %d bytes", uploadedBytes, rd.Length()) } - _, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{}) + _, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{ + Tier: &accessTier, + }) debug.Log("uploaded %d parts: %v", len(blocks), blocks) return errors.Wrap(err, "CommitBlockList") diff --git a/internal/backend/azure/config.go b/internal/backend/azure/config.go index 7d69719eff3..ee7ac51d8f0 100644 --- a/internal/backend/azure/config.go +++ b/internal/backend/azure/config.go @@ -22,7 +22,8 @@ type Config struct { Container string Prefix string - Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` + AccessTier string `option:"access-tier" help:"set the access tier for the blob storage (default: inferred from the storage account defaults)"` } // NewConfig returns a new Config with the default values filled in. From db686592a1ba00f105214fb14e6009a2ad5c0265 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 20 Oct 2024 20:24:49 +0530 Subject: [PATCH 752/893] debug: azure add debug log to show access-tier --- internal/backend/azure/azure.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index c7aa80185f1..c1c049a948f 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -132,6 +132,7 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { for _, tier := range supportedAccessTiers() { if strings.EqualFold(string(tier), cfg.AccessTier) { accessTier = tier + debug.Log(" - using access tier %v", accessTier) break } } From 2ce485063fb03a41ffc3abb0091bb2ac8444af07 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 22 Oct 2024 19:47:44 +0200 Subject: [PATCH 753/893] polish changelogs --- changelog/unreleased/issue-4004 | 16 ++++++++-------- changelog/unreleased/issue-5050 | 6 +++--- changelog/unreleased/issue-5063 | 7 ++++--- changelog/unreleased/pull-5047 | 9 +++++---- changelog/unreleased/pull-5057 | 24 +++++++++++++----------- 5 files changed, 33 insertions(+), 29 deletions(-) diff --git a/changelog/unreleased/issue-4004 b/changelog/unreleased/issue-4004 index ca23af26f1f..d95ad02e9ce 100644 --- a/changelog/unreleased/issue-4004 +++ b/changelog/unreleased/issue-4004 @@ -1,12 +1,12 @@ -Bugfix: Allow use of container level SAS/SAT tokens with Azure backend +Bugfix: Support container-level SAS/SAT tokens for Azure backend -When using a SAS/SAT token for authentication with Azure, restic was expecting -the provided token to be generated at the account level, granting permissions -to the storage account and all its containers. This caused an error that did -not allow tokens that were generated at the container level to be used to -initalize a repository. -Restic now allows SAS/SAT tokens that were generated at the account or -container level to be used to initalize a repository. +Restic previously expected SAS/SAT tokens to be generated at the account level, +which prevented tokens created at the container level from being used to +initialize a repository. This caused an error when attempting to initialize a +repository with container-level tokens. + +Restic now supports both account-level and container-level SAS/SAT tokens for +initializing a repository. https://github.com/restic/restic/issues/4004 https://github.com/restic/restic/pull/5093 diff --git a/changelog/unreleased/issue-5050 b/changelog/unreleased/issue-5050 index 9604fc857dd..ba736e4dbc5 100644 --- a/changelog/unreleased/issue-5050 +++ b/changelog/unreleased/issue-5050 @@ -1,7 +1,7 @@ -Bugfix: Missing error if `tag` fails to lock repository +Bugfix: Return error if `tag` fails to lock repository -Since restic 0.17.0, the `tag` command did not return an error if it failed to -open or lock the repository. This has been fixed. +Since restic 0.17.0, the `tag` command did not return an error when it failed +to open or lock the repository. This issue has been fixed. https://github.com/restic/restic/issues/5050 https://github.com/restic/restic/pull/5056 diff --git a/changelog/unreleased/issue-5063 b/changelog/unreleased/issue-5063 index 65aa379e4d4..63e26fe610c 100644 --- a/changelog/unreleased/issue-5063 +++ b/changelog/unreleased/issue-5063 @@ -1,8 +1,9 @@ Bugfix: Correctly `backup` extended metadata when using VSS on Windows -On Windows, when creating a backup using the `--use-fs-snapshot` option, -then the extended metadata was not read from the filesystem snapshot. This -could result in errors when files have been removed in the meantime. +On Windows, when creating a backup with the `--use-fs-snapshot` option, restic +read extended metadata from the original filesystem path instead of from the +snapshot. This could result in errors if files were removed during the backup +process. This issue has been resolved. diff --git a/changelog/unreleased/pull-5047 b/changelog/unreleased/pull-5047 index ee50c6ec752..ace02c3b47a 100644 --- a/changelog/unreleased/pull-5047 +++ b/changelog/unreleased/pull-5047 @@ -1,7 +1,8 @@ -Bugfix: Fix possible error on concurrent cache cleanup +Bugfix: Resolve potential error during concurrent cache cleanup -Fix for multiple restic processes executing concurrently and racing to -remove obsolete snapshots from the local backend cache. Restic now suppresses the `no -such file or directory` error. +When multiple restic processes ran concurrently, they could compete to remove +obsolete snapshots from the local backend cache, sometimes leading to a "no +such file or directory" error. Restic now suppresses this error to prevent +issues during cache cleanup. https://github.com/restic/restic/pull/5047 diff --git a/changelog/unreleased/pull-5057 b/changelog/unreleased/pull-5057 index c34436044c6..99df31d2be5 100644 --- a/changelog/unreleased/pull-5057 +++ b/changelog/unreleased/pull-5057 @@ -1,21 +1,23 @@ -Bugfix: Do not include irregular files in backup +Bugfix: Exclude irregular files from backups -Since restic 0.17.1, files with type `irregular` could incorrectly be included -in snapshots. This is most likely to occur when backing up special file types -on Windows that cannot be handled by restic. +Since restic 0.17.1, files with the type irregular could mistakenly be included +in snapshots, especially when backing up special file types on Windows that +restic cannot process. This issue has been fixed. -This has been fixed. - -When running the `check` command this bug resulted in an error like the -following: +Previously, this bug caused the `check` command to report errors like the +following one: ``` tree 12345678[...]: node "example.zip" with invalid type "irregular" ``` -Repairing the affected snapshots requires upgrading to restic 0.17.2 and then -manually running `restic repair snapshots --forget`. This will remove the -`irregular` files from the snapshots. +To repair affected snapshots, upgrade to restic 0.17.2 and run: + +``` +restic repair snapshots --forget +``` + +This will remove the `irregular` files from the snapshots. https://github.com/restic/restic/pull/5057 https://forum.restic.net/t/errors-found-by-check-1-invalid-type-irregular-2-ciphertext-verification-failed/8447/2 From d8d955e0aa2885cbb634288db1b89c691785e71e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 22 Oct 2024 20:00:39 +0200 Subject: [PATCH 754/893] Tweak wording Co-authored-by: rawtaz --- changelog/unreleased/issue-5050 | 2 +- changelog/unreleased/issue-5063 | 2 +- changelog/unreleased/pull-5057 | 7 ++++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/changelog/unreleased/issue-5050 b/changelog/unreleased/issue-5050 index ba736e4dbc5..34536f6dae3 100644 --- a/changelog/unreleased/issue-5050 +++ b/changelog/unreleased/issue-5050 @@ -1,7 +1,7 @@ Bugfix: Return error if `tag` fails to lock repository Since restic 0.17.0, the `tag` command did not return an error when it failed -to open or lock the repository. This issue has been fixed. +to open or lock the repository. This issue has now been fixed. https://github.com/restic/restic/issues/5050 https://github.com/restic/restic/pull/5056 diff --git a/changelog/unreleased/issue-5063 b/changelog/unreleased/issue-5063 index 63e26fe610c..54f97f0af9b 100644 --- a/changelog/unreleased/issue-5063 +++ b/changelog/unreleased/issue-5063 @@ -5,7 +5,7 @@ read extended metadata from the original filesystem path instead of from the snapshot. This could result in errors if files were removed during the backup process. -This issue has been resolved. +This issue has now been resolved. https://github.com/restic/restic/issues/5063 https://github.com/restic/restic/pull/5097 diff --git a/changelog/unreleased/pull-5057 b/changelog/unreleased/pull-5057 index 99df31d2be5..aba2992b756 100644 --- a/changelog/unreleased/pull-5057 +++ b/changelog/unreleased/pull-5057 @@ -1,8 +1,8 @@ Bugfix: Exclude irregular files from backups -Since restic 0.17.1, files with the type irregular could mistakenly be included +Since restic 0.17.1, files with the type `irregular` could mistakenly be included in snapshots, especially when backing up special file types on Windows that -restic cannot process. This issue has been fixed. +restic cannot process. This issue has now been fixed. Previously, this bug caused the `check` command to report errors like the following one: @@ -17,7 +17,8 @@ To repair affected snapshots, upgrade to restic 0.17.2 and run: restic repair snapshots --forget ``` -This will remove the `irregular` files from the snapshots. +This will remove the `irregular` files from the snapshots (creating +a new snapshot ID for each of the affected snapshots). https://github.com/restic/restic/pull/5057 https://forum.restic.net/t/errors-found-by-check-1-invalid-type-irregular-2-ciphertext-verification-failed/8447/2 From dbb5fb9fbd1c7e67e9fcf99bd6015774895b25e4 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 27 Oct 2024 16:37:08 +0100 Subject: [PATCH 755/893] Prepare changelog for 0.17.2 --- changelog/{unreleased => 0.17.2_2024-10-27}/issue-4004 | 0 changelog/{unreleased => 0.17.2_2024-10-27}/issue-5050 | 0 changelog/{unreleased => 0.17.2_2024-10-27}/issue-5063 | 0 changelog/{unreleased => 0.17.2_2024-10-27}/pull-5047 | 0 changelog/{unreleased => 0.17.2_2024-10-27}/pull-5057 | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename changelog/{unreleased => 0.17.2_2024-10-27}/issue-4004 (100%) rename changelog/{unreleased => 0.17.2_2024-10-27}/issue-5050 (100%) rename changelog/{unreleased => 0.17.2_2024-10-27}/issue-5063 (100%) rename changelog/{unreleased => 0.17.2_2024-10-27}/pull-5047 (100%) rename changelog/{unreleased => 0.17.2_2024-10-27}/pull-5057 (100%) diff --git a/changelog/unreleased/issue-4004 b/changelog/0.17.2_2024-10-27/issue-4004 similarity index 100% rename from changelog/unreleased/issue-4004 rename to changelog/0.17.2_2024-10-27/issue-4004 diff --git a/changelog/unreleased/issue-5050 b/changelog/0.17.2_2024-10-27/issue-5050 similarity index 100% rename from changelog/unreleased/issue-5050 rename to changelog/0.17.2_2024-10-27/issue-5050 diff --git a/changelog/unreleased/issue-5063 b/changelog/0.17.2_2024-10-27/issue-5063 similarity index 100% rename from changelog/unreleased/issue-5063 rename to changelog/0.17.2_2024-10-27/issue-5063 diff --git a/changelog/unreleased/pull-5047 b/changelog/0.17.2_2024-10-27/pull-5047 similarity index 100% rename from changelog/unreleased/pull-5047 rename to changelog/0.17.2_2024-10-27/pull-5047 diff --git a/changelog/unreleased/pull-5057 b/changelog/0.17.2_2024-10-27/pull-5057 similarity index 100% rename from changelog/unreleased/pull-5057 rename to changelog/0.17.2_2024-10-27/pull-5057 From 44968c7d43b7fd53d7d7e62483b82304441e077f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 27 Oct 2024 16:37:08 +0100 Subject: [PATCH 756/893] Generate CHANGELOG.md for 0.17.2 --- CHANGELOG.md | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a53939156a..c5e638c51a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ # Table of Contents +* [Changelog for 0.17.2](#changelog-for-restic-0172-2024-10-27) * [Changelog for 0.17.1](#changelog-for-restic-0171-2024-09-05) * [Changelog for 0.17.0](#changelog-for-restic-0170-2024-07-26) * [Changelog for 0.16.5](#changelog-for-restic-0165-2024-07-01) @@ -36,6 +37,89 @@ * [Changelog for 0.6.0](#changelog-for-restic-060-2017-05-29) +# Changelog for restic 0.17.2 (2024-10-27) +The following sections list the changes in restic 0.17.2 relevant to +restic users. The changes are ordered by importance. + +## Summary + + * Fix #4004: Support container-level SAS/SAT tokens for Azure backend + * Fix #5047: Resolve potential error during concurrent cache cleanup + * Fix #5050: Return error if `tag` fails to lock repository + * Fix #5057: Exclude irregular files from backups + * Fix #5063: Correctly `backup` extended metadata when using VSS on Windows + +## Details + + * Bugfix #4004: Support container-level SAS/SAT tokens for Azure backend + + Restic previously expected SAS/SAT tokens to be generated at the account level, + which prevented tokens created at the container level from being used to + initialize a repository. This caused an error when attempting to initialize a + repository with container-level tokens. + + Restic now supports both account-level and container-level SAS/SAT tokens for + initializing a repository. + + https://github.com/restic/restic/issues/4004 + https://github.com/restic/restic/pull/5093 + + * Bugfix #5047: Resolve potential error during concurrent cache cleanup + + When multiple restic processes ran concurrently, they could compete to remove + obsolete snapshots from the local backend cache, sometimes leading to a "no such + file or directory" error. Restic now suppresses this error to prevent issues + during cache cleanup. + + https://github.com/restic/restic/pull/5047 + + * Bugfix #5050: Return error if `tag` fails to lock repository + + Since restic 0.17.0, the `tag` command did not return an error when it failed to + open or lock the repository. This issue has now been fixed. + + https://github.com/restic/restic/issues/5050 + https://github.com/restic/restic/pull/5056 + + * Bugfix #5057: Exclude irregular files from backups + + Since restic 0.17.1, files with the type `irregular` could mistakenly be + included in snapshots, especially when backing up special file types on Windows + that restic cannot process. This issue has now been fixed. + + Previously, this bug caused the `check` command to report errors like the + following one: + + ``` + tree 12345678[...]: node "example.zip" with invalid type "irregular" + ``` + + To repair affected snapshots, upgrade to restic 0.17.2 and run: + + ``` + restic repair snapshots --forget + ``` + + This will remove the `irregular` files from the snapshots (creating a new + snapshot ID for each of the affected snapshots). + + https://github.com/restic/restic/pull/5057 + https://forum.restic.net/t/errors-found-by-check-1-invalid-type-irregular-2-ciphertext-verification-failed/8447/2 + + * Bugfix #5063: Correctly `backup` extended metadata when using VSS on Windows + + On Windows, when creating a backup with the `--use-fs-snapshot` option, restic + read extended metadata from the original filesystem path instead of from the + snapshot. This could result in errors if files were removed during the backup + process. + + This issue has now been resolved. + + https://github.com/restic/restic/issues/5063 + https://github.com/restic/restic/pull/5097 + https://github.com/restic/restic/pull/5099 + + # Changelog for restic 0.17.1 (2024-09-05) The following sections list the changes in restic 0.17.1 relevant to restic users. The changes are ordered by importance. From 5dcee7f0a32d7a3a79659be52e37cc0f21740b4f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 27 Oct 2024 16:37:19 +0100 Subject: [PATCH 757/893] Update manpages and auto-completion --- doc/bash-completion.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/bash-completion.sh b/doc/bash-completion.sh index 0517fdf7c7d..985d0e369bc 100644 --- a/doc/bash-completion.sh +++ b/doc/bash-completion.sh @@ -2177,6 +2177,12 @@ _restic_list() must_have_one_flag=() must_have_one_noun=() + must_have_one_noun+=("blobs") + must_have_one_noun+=("index") + must_have_one_noun+=("keys") + must_have_one_noun+=("locks") + must_have_one_noun+=("packs") + must_have_one_noun+=("snapshots") noun_aliases=() } From 2fb07dcdb190dcc2a0f960567debb9d5b5963ef6 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 27 Oct 2024 16:37:19 +0100 Subject: [PATCH 758/893] Add version for 0.17.2 --- VERSION | 2 +- cmd/restic/global.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index 21997e69ac4..c3d16c1646b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.17.1-dev +0.17.2 diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 99f9df8cf5a..4ecb9f9e0b9 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -47,7 +47,7 @@ import ( // to a missing backend storage location or config file var ErrNoRepository = errors.New("repository does not exist") -var version = "0.17.1-dev (compiled manually)" +var version = "0.17.2" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" From 7eec85b4eb074da62cbfbbddfe04c8a79f99dbe3 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 27 Oct 2024 16:37:28 +0100 Subject: [PATCH 759/893] Set development version for 0.17.2 --- VERSION | 2 +- cmd/restic/global.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index c3d16c1646b..a799160352f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.17.2 +0.17.2-dev diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 4ecb9f9e0b9..2b67708a86b 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -47,7 +47,7 @@ import ( // to a missing backend storage location or config file var ErrNoRepository = errors.New("repository does not exist") -var version = "0.17.2" +var version = "0.17.2-dev (compiled manually)" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" From cd44b2bf8bc6cc828e3a4f3d620f3e6414306575 Mon Sep 17 00:00:00 2001 From: Joram Berger Date: Sun, 27 Oct 2024 19:22:34 +0100 Subject: [PATCH 760/893] doc: Clarify number of blobs are added The numbers reported as `data_blobs` and `tree_blobs` are not total numbers of blobs but numbers of blobs added with the given snapshot. --- doc/075_scripting.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index c619ead7ceb..de7b9d1cd4c 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -214,9 +214,9 @@ Summary is the last output line in a successful backup. +---------------------------+---------------------------------------------------------+ | ``dirs_unmodified`` | Number of directories that did not change | +---------------------------+---------------------------------------------------------+ -| ``data_blobs`` | Number of data blobs | +| ``data_blobs`` | Number of data blobs added | +---------------------------+---------------------------------------------------------+ -| ``tree_blobs`` | Number of tree blobs | +| ``tree_blobs`` | Number of tree blobs added | +---------------------------+---------------------------------------------------------+ | ``data_added`` | Amount of (uncompressed) data added, in bytes | +---------------------------+---------------------------------------------------------+ @@ -676,9 +676,9 @@ was created. +---------------------------+---------------------------------------------------------+ | ``dirs_unmodified`` | Number of directories that did not change | +---------------------------+---------------------------------------------------------+ -| ``data_blobs`` | Number of data blobs | +| ``data_blobs`` | Number of data blobs added | +---------------------------+---------------------------------------------------------+ -| ``tree_blobs`` | Number of tree blobs | +| ``tree_blobs`` | Number of tree blobs added | +---------------------------+---------------------------------------------------------+ | ``data_added`` | Amount of (uncompressed) data added, in bytes | +---------------------------+---------------------------------------------------------+ From 15b7fb784f487f4c373b58d308fb896eaf828d5f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 31 Oct 2024 21:13:58 +0100 Subject: [PATCH 761/893] fs: cleanup fixpath --- internal/fs/file_windows.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/fs/file_windows.go b/internal/fs/file_windows.go index 3d011f719cb..7fb53e65851 100644 --- a/internal/fs/file_windows.go +++ b/internal/fs/file_windows.go @@ -18,19 +18,19 @@ func fixpath(name string) string { abspath, err := filepath.Abs(name) if err == nil { // Check if \\?\UNC\ already exist - if strings.HasPrefix(abspath, `\\?\UNC\`) { + if strings.HasPrefix(abspath, uncPathPrefix) { return abspath } // Check if \\?\ already exist - if strings.HasPrefix(abspath, `\\?\`) { + if strings.HasPrefix(abspath, extendedPathPrefix) { return abspath } // Check if path starts with \\ if strings.HasPrefix(abspath, `\\`) { - return strings.Replace(abspath, `\\`, `\\?\UNC\`, 1) + return strings.Replace(abspath, `\\`, uncPathPrefix, 1) } // Normal path - return `\\?\` + abspath + return extendedPathPrefix + abspath } return name } From 03aad742d3f4d46e5e6245e9ff765b7e53e1b2f9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 31 Oct 2024 21:44:34 +0100 Subject: [PATCH 762/893] fs: add correct vss support to fixpath Paths that only contain the volume shadow copy snapshot name require special treatment. These paths must end with a slash for regular file operations to work. --- internal/fs/file_windows.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/internal/fs/file_windows.go b/internal/fs/file_windows.go index 7fb53e65851..d7aabf360c3 100644 --- a/internal/fs/file_windows.go +++ b/internal/fs/file_windows.go @@ -21,6 +21,15 @@ func fixpath(name string) string { if strings.HasPrefix(abspath, uncPathPrefix) { return abspath } + // Check if \\?\GLOBALROOT exists which marks volume shadow copy snapshots + if strings.HasPrefix(abspath, globalRootPrefix) { + if strings.Count(abspath, `\`) == 5 { + // Append slash if this just a volume name, e.g. `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX` + // Without the trailing slash any access to the volume itself will fail. + return abspath + string(filepath.Separator) + } + return abspath + } // Check if \\?\ already exist if strings.HasPrefix(abspath, extendedPathPrefix) { return abspath From 1d0d5d87bc726a12ff28fe785d0904b66d179d55 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 31 Oct 2024 19:10:01 +0100 Subject: [PATCH 763/893] fs: fix error in fillGenericAttributes for vss volumes Extended attributes and security descriptors apparently cannot be retrieved from a vss volume. Fix the volume check to correctly detect vss volumes and just completely disable extended attributes for volumes. --- internal/fs/node_windows.go | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index 9ea813eb180..d20d16642b1 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -325,8 +325,11 @@ func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFil return false, nil } - if strings.HasSuffix(filepath.Clean(path), `\`) { - // filepath.Clean(path) ends with '\' for Windows root volume paths only + isVolume, err := isVolumePath(path) + if err != nil { + return false, err + } + if isVolume { // Do not process file attributes, created time and sd for windows root volume paths // Security descriptors are not supported for root volume paths. // Though file attributes and created time are supported for root volume paths, @@ -335,7 +338,7 @@ func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFil if err != nil { return false, err } - return allowExtended, nil + return allowExtended, err } var sd *[]byte @@ -420,6 +423,18 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { return isEASupportedVolume, err } +// isVolumePath returns whether a path refers to a volume +func isVolumePath(path string) (bool, error) { + volName, err := prepareVolumeName(path) + if err != nil { + return false, err + } + + cleanPath := filepath.Clean(path) + cleanVolume := filepath.Clean(volName + `\`) + return cleanPath == cleanVolume, nil +} + // prepareVolumeName prepares the volume name for different cases in Windows func prepareVolumeName(path string) (volumeName string, err error) { // Check if it's an extended length path From cb9247530e9fa8623d5682a3aa678191c18899a9 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 31 Oct 2024 19:30:21 +0100 Subject: [PATCH 764/893] backup: run test with absolute path --- cmd/restic/cmd_backup_integration_test.go | 24 +++++++++++++++++----- cmd/restic/cmd_copy_integration_test.go | 4 ++-- cmd/restic/cmd_restore_integration_test.go | 12 +++++------ cmd/restic/integration_test.go | 2 +- 4 files changed, 28 insertions(+), 14 deletions(-) diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 5926fdd5400..b692cad87a5 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -52,14 +52,14 @@ func testBackup(t *testing.T, useFsSnapshot bool) { opts := BackupOptions{UseFsSnapshot: useFsSnapshot} // first backup - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) testListSnapshots(t, env.gopts, 1) testRunCheck(t, env.gopts) stat1 := dirStats(env.repo) // second backup, implicit incremental - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) snapshotIDs := testListSnapshots(t, env.gopts, 2) stat2 := dirStats(env.repo) @@ -71,7 +71,7 @@ func testBackup(t *testing.T, useFsSnapshot bool) { testRunCheck(t, env.gopts) // third backup, explicit incremental opts.Parent = snapshotIDs[0].String() - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) snapshotIDs = testListSnapshots(t, env.gopts, 3) stat3 := dirStats(env.repo) @@ -84,7 +84,7 @@ func testBackup(t *testing.T, useFsSnapshot bool) { for i, snapshotID := range snapshotIDs { restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) - testRunRestore(t, env.gopts, restoredir, snapshotID) + testRunRestore(t, env.gopts, restoredir, snapshotID.String()+":"+toPathInSnapshot(filepath.Dir(env.testdata))) diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata")) rtest.Assert(t, diff == "", "directories are not equal: %v", diff) } @@ -92,6 +92,20 @@ func testBackup(t *testing.T, useFsSnapshot bool) { testRunCheck(t, env.gopts) } +func toPathInSnapshot(path string) string { + // use path as is on most platforms, but convert it on windows + if runtime.GOOS == "windows" { + // the path generate by the test is always local so take the shortcut + vol := filepath.VolumeName(path) + if vol[len(vol)-1] != ':' { + panic(fmt.Sprintf("unexpected path: %q", path)) + } + path = vol[:len(vol)-1] + string(filepath.Separator) + path[len(vol)+1:] + path = filepath.ToSlash(path) + } + return path +} + func TestBackupWithRelativePath(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -557,7 +571,7 @@ func TestHardLink(t *testing.T) { for i, snapshotID := range snapshotIDs { restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) - testRunRestore(t, env.gopts, restoredir, snapshotID) + testRunRestore(t, env.gopts, restoredir, snapshotID.String()) diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata")) rtest.Assert(t, diff == "", "directories are not equal %v", diff) diff --git a/cmd/restic/cmd_copy_integration_test.go b/cmd/restic/cmd_copy_integration_test.go index 70461587034..9ae78ba5090 100644 --- a/cmd/restic/cmd_copy_integration_test.go +++ b/cmd/restic/cmd_copy_integration_test.go @@ -62,11 +62,11 @@ func TestCopy(t *testing.T) { for i, snapshotID := range snapshotIDs { restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) origRestores[restoredir] = struct{}{} - testRunRestore(t, env.gopts, restoredir, snapshotID) + testRunRestore(t, env.gopts, restoredir, snapshotID.String()) } for i, snapshotID := range copiedSnapshotIDs { restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i)) - testRunRestore(t, env2.gopts, restoredir, snapshotID) + testRunRestore(t, env2.gopts, restoredir, snapshotID.String()) foundMatch := false for cmpdir := range origRestores { diff := directoriesContentsDiff(restoredir, cmpdir) diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 42cd1f87dca..945c24a37e5 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -17,17 +17,17 @@ import ( "github.com/restic/restic/internal/ui/termstatus" ) -func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) { +func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID string) { testRunRestoreExcludes(t, opts, dir, snapshotID, nil) } -func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) { +func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID string, excludes []string) { opts := RestoreOptions{ Target: dir, } opts.Excludes = excludes - rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) + rtest.OK(t, testRunRestoreAssumeFailure(snapshotID, opts, gopts)) } func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error { @@ -197,7 +197,7 @@ func TestRestoreFilter(t *testing.T) { snapshotID := testListSnapshots(t, env.gopts, 1)[0] // no restore filter should restore all files - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID) + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID.String()) for _, testFile := range testfiles { rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size))) } @@ -219,7 +219,7 @@ func TestRestoreFilter(t *testing.T) { // restore with excludes restoredir := filepath.Join(env.base, "restore-with-excludes") - testRunRestoreExcludes(t, env.gopts, restoredir, snapshotID, excludePatterns) + testRunRestoreExcludes(t, env.gopts, restoredir, snapshotID.String(), excludePatterns) testRestoredFileExclusions(t, restoredir) // Create an exclude file with some patterns @@ -339,7 +339,7 @@ func TestRestoreWithPermissionFailure(t *testing.T) { _ = withRestoreGlobalOptions(func() error { globalOptions.stderr = io.Discard - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0]) + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0].String()) return nil }) diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index df95031dca5..d39ea6980ca 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -35,7 +35,7 @@ func TestCheckRestoreNoLock(t *testing.T) { testRunCheck(t, env.gopts) snapshotIDs := testListSnapshots(t, env.gopts, 4) - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0]) + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0].String()) } // a listOnceBackend only allows listing once per filetype From 62af5f0b4a01a8b2d373c5d03e1b0c7f3770033d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 31 Oct 2024 19:32:51 +0100 Subject: [PATCH 765/893] restic: test path handling of volume shadow copy root path --- internal/fs/node_windows_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/internal/fs/node_windows_test.go b/internal/fs/node_windows_test.go index 730740fe084..b40483f3167 100644 --- a/internal/fs/node_windows_test.go +++ b/internal/fs/node_windows_test.go @@ -451,6 +451,13 @@ func TestPrepareVolumeName(t *testing.T) { expectError: false, expectedEASupported: false, }, + { + name: "Volume Shadow Copy root", + path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1`, + expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1`, + expectError: false, + expectedEASupported: false, + }, { name: "Volume Shadow Copy path", path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1\Users\test`, From 14370fbf9e3b0f9b15ce96e0352b35efd64a24df Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 31 Oct 2024 20:01:52 +0100 Subject: [PATCH 766/893] add vss metadata changelog --- changelog/unreleased/issue-5107 | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 changelog/unreleased/issue-5107 diff --git a/changelog/unreleased/issue-5107 b/changelog/unreleased/issue-5107 new file mode 100644 index 00000000000..13bb380e4ef --- /dev/null +++ b/changelog/unreleased/issue-5107 @@ -0,0 +1,15 @@ +Bugfix: Fix metadata error on Windows for backups using VSS + +Since restic 0.17.2, when creating a backup on Windows using `--use-fs-snapshot`, +restic would report an error like the following: + +``` +error: incomplete metadata for C:\: get EA failed while opening file handle for path \\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX\, with: The process cannot access the file because it is being used by another process. +``` + +This has now been fixed by correctly handling paths that refer to volume +shadow copy snapshots. + +https://github.com/restic/restic/issues/5107 +https://github.com/restic/restic/pull/5110 +https://github.com/restic/restic/pull/5112 From 51cd1c847b1f5a006d6e8fbcd4656a2989f34266 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 31 Oct 2024 21:59:53 +0100 Subject: [PATCH 767/893] backup: log error if test backup fails --- cmd/restic/cmd_backup_integration_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index b692cad87a5..71367b23617 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -31,7 +31,7 @@ func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) { err := testRunBackupAssumeFailure(t, dir, target, opts, gopts) - rtest.Assert(t, err == nil, "Error while backing up") + rtest.Assert(t, err == nil, "Error while backing up: %v", err) } func TestBackup(t *testing.T) { @@ -95,7 +95,7 @@ func testBackup(t *testing.T, useFsSnapshot bool) { func toPathInSnapshot(path string) string { // use path as is on most platforms, but convert it on windows if runtime.GOOS == "windows" { - // the path generate by the test is always local so take the shortcut + // the path generated by the test is always local so take the shortcut vol := filepath.VolumeName(path) if vol[len(vol)-1] != ':' { panic(fmt.Sprintf("unexpected path: %q", path)) From f77e67086c1e47a2b99dbc81651624039875dcd0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 31 Oct 2024 21:53:08 +0100 Subject: [PATCH 768/893] fs: add correct vss support to fixpath Paths that only contain the volume shadow copy snapshot name require special treatment. These paths must end with a slash for regular file operations to work. --- internal/fs/file_windows.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/internal/fs/file_windows.go b/internal/fs/file_windows.go index 50c7e9938f6..31d495509ea 100644 --- a/internal/fs/file_windows.go +++ b/internal/fs/file_windows.go @@ -20,6 +20,15 @@ func fixpath(name string) string { if strings.HasPrefix(abspath, `\\?\UNC\`) { return abspath } + // Check if \\?\GLOBALROOT exists which marks volume shadow copy snapshots + if strings.HasPrefix(abspath, `\\?\GLOBALROOT\`) { + if strings.Count(abspath, `\`) == 5 { + // Append slash if this just a volume name, e.g. `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX` + // Without the trailing slash any access to the volume itself will fail. + return abspath + string(filepath.Separator) + } + return abspath + } // Check if \\?\ already exist if strings.HasPrefix(abspath, `\\?\`) { return abspath From e38f6794cde01f2950d900122048f4218eaedd33 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 31 Oct 2024 19:10:01 +0100 Subject: [PATCH 769/893] restic: fix error in fillGenericAttributes for vss volumes Extended attributes and security descriptors apparently cannot be retrieved from a vss volume. Fix the volume check to correctly detect vss volumes and just completely disable extended attributes for volumes. --- internal/restic/node_windows.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index bce01ccad04..722ef09db9e 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -372,8 +372,11 @@ func (node *Node) fillGenericAttributes(path string, fi os.FileInfo, stat *statT return false, nil } - if strings.HasSuffix(filepath.Clean(path), `\`) { - // filepath.Clean(path) ends with '\' for Windows root volume paths only + isVolume, err := isVolumePath(path) + if err != nil { + return false, err + } + if isVolume { // Do not process file attributes, created time and sd for windows root volume paths // Security descriptors are not supported for root volume paths. // Though file attributes and created time are supported for root volume paths, @@ -464,6 +467,18 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { return isEASupportedVolume, err } +// isVolumePath returns whether a path refers to a volume +func isVolumePath(path string) (bool, error) { + volName, err := prepareVolumeName(path) + if err != nil { + return false, err + } + + cleanPath := filepath.Clean(path) + cleanVolume := filepath.Clean(volName + `\`) + return cleanPath == cleanVolume, nil +} + // prepareVolumeName prepares the volume name for different cases in Windows func prepareVolumeName(path string) (volumeName string, err error) { // Check if it's an extended length path From 4380627cb7fe72fe3f909d62e8d11406ee147425 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 31 Oct 2024 19:30:21 +0100 Subject: [PATCH 770/893] backup: run test with absolute path --- cmd/restic/cmd_backup_integration_test.go | 24 +++++++++++++++++----- cmd/restic/cmd_copy_integration_test.go | 4 ++-- cmd/restic/cmd_restore_integration_test.go | 12 +++++------ cmd/restic/integration_test.go | 2 +- 4 files changed, 28 insertions(+), 14 deletions(-) diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 5926fdd5400..0cdf8e5b42e 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -52,14 +52,14 @@ func testBackup(t *testing.T, useFsSnapshot bool) { opts := BackupOptions{UseFsSnapshot: useFsSnapshot} // first backup - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) testListSnapshots(t, env.gopts, 1) testRunCheck(t, env.gopts) stat1 := dirStats(env.repo) // second backup, implicit incremental - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) snapshotIDs := testListSnapshots(t, env.gopts, 2) stat2 := dirStats(env.repo) @@ -71,7 +71,7 @@ func testBackup(t *testing.T, useFsSnapshot bool) { testRunCheck(t, env.gopts) // third backup, explicit incremental opts.Parent = snapshotIDs[0].String() - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) snapshotIDs = testListSnapshots(t, env.gopts, 3) stat3 := dirStats(env.repo) @@ -84,7 +84,7 @@ func testBackup(t *testing.T, useFsSnapshot bool) { for i, snapshotID := range snapshotIDs { restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) - testRunRestore(t, env.gopts, restoredir, snapshotID) + testRunRestore(t, env.gopts, restoredir, snapshotID.String()+":"+toPathInSnapshot(filepath.Dir(env.testdata))) diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata")) rtest.Assert(t, diff == "", "directories are not equal: %v", diff) } @@ -92,6 +92,20 @@ func testBackup(t *testing.T, useFsSnapshot bool) { testRunCheck(t, env.gopts) } +func toPathInSnapshot(path string) string { + // use path as is on most platforms, but convert it on windows + if runtime.GOOS == "windows" { + // the path generated by the test is always local so take the shortcut + vol := filepath.VolumeName(path) + if vol[len(vol)-1] != ':' { + panic(fmt.Sprintf("unexpected path: %q", path)) + } + path = vol[:len(vol)-1] + string(filepath.Separator) + path[len(vol)+1:] + path = filepath.ToSlash(path) + } + return path +} + func TestBackupWithRelativePath(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -557,7 +571,7 @@ func TestHardLink(t *testing.T) { for i, snapshotID := range snapshotIDs { restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) - testRunRestore(t, env.gopts, restoredir, snapshotID) + testRunRestore(t, env.gopts, restoredir, snapshotID.String()) diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata")) rtest.Assert(t, diff == "", "directories are not equal %v", diff) diff --git a/cmd/restic/cmd_copy_integration_test.go b/cmd/restic/cmd_copy_integration_test.go index 70461587034..9ae78ba5090 100644 --- a/cmd/restic/cmd_copy_integration_test.go +++ b/cmd/restic/cmd_copy_integration_test.go @@ -62,11 +62,11 @@ func TestCopy(t *testing.T) { for i, snapshotID := range snapshotIDs { restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) origRestores[restoredir] = struct{}{} - testRunRestore(t, env.gopts, restoredir, snapshotID) + testRunRestore(t, env.gopts, restoredir, snapshotID.String()) } for i, snapshotID := range copiedSnapshotIDs { restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i)) - testRunRestore(t, env2.gopts, restoredir, snapshotID) + testRunRestore(t, env2.gopts, restoredir, snapshotID.String()) foundMatch := false for cmpdir := range origRestores { diff := directoriesContentsDiff(restoredir, cmpdir) diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index b0543850bbb..f876bfae1f7 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -18,17 +18,17 @@ import ( "github.com/restic/restic/internal/ui/termstatus" ) -func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) { +func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID string) { testRunRestoreExcludes(t, opts, dir, snapshotID, nil) } -func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) { +func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID string, excludes []string) { opts := RestoreOptions{ Target: dir, } opts.Excludes = excludes - rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) + rtest.OK(t, testRunRestoreAssumeFailure(snapshotID, opts, gopts)) } func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error { @@ -198,7 +198,7 @@ func TestRestoreFilter(t *testing.T) { snapshotID := testListSnapshots(t, env.gopts, 1)[0] // no restore filter should restore all files - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID) + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID.String()) for _, testFile := range testfiles { rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size))) } @@ -220,7 +220,7 @@ func TestRestoreFilter(t *testing.T) { // restore with excludes restoredir := filepath.Join(env.base, "restore-with-excludes") - testRunRestoreExcludes(t, env.gopts, restoredir, snapshotID, excludePatterns) + testRunRestoreExcludes(t, env.gopts, restoredir, snapshotID.String(), excludePatterns) testRestoredFileExclusions(t, restoredir) // Create an exclude file with some patterns @@ -340,7 +340,7 @@ func TestRestoreWithPermissionFailure(t *testing.T) { _ = withRestoreGlobalOptions(func() error { globalOptions.stderr = io.Discard - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0]) + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0].String()) return nil }) diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index df95031dca5..d39ea6980ca 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -35,7 +35,7 @@ func TestCheckRestoreNoLock(t *testing.T) { testRunCheck(t, env.gopts) snapshotIDs := testListSnapshots(t, env.gopts, 4) - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0]) + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0].String()) } // a listOnceBackend only allows listing once per filetype From 0aee70b496dc9979902363189763330263b6dd30 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 31 Oct 2024 19:32:51 +0100 Subject: [PATCH 771/893] restic: test path handling of volume shadow copy root path --- internal/restic/node_windows_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/internal/restic/node_windows_test.go b/internal/restic/node_windows_test.go index 6ba25559b7a..c3936cfc868 100644 --- a/internal/restic/node_windows_test.go +++ b/internal/restic/node_windows_test.go @@ -450,6 +450,13 @@ func TestPrepareVolumeName(t *testing.T) { expectError: false, expectedEASupported: false, }, + { + name: "Volume Shadow Copy root", + path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1`, + expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1`, + expectError: false, + expectedEASupported: false, + }, { name: "Volume Shadow Copy path", path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1\Users\test`, From 962279479d85174e92698a144f30879d1c7f07d2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 31 Oct 2024 20:01:52 +0100 Subject: [PATCH 772/893] add vss metadata changelog --- changelog/unreleased/issue-5107 | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 changelog/unreleased/issue-5107 diff --git a/changelog/unreleased/issue-5107 b/changelog/unreleased/issue-5107 new file mode 100644 index 00000000000..13bb380e4ef --- /dev/null +++ b/changelog/unreleased/issue-5107 @@ -0,0 +1,15 @@ +Bugfix: Fix metadata error on Windows for backups using VSS + +Since restic 0.17.2, when creating a backup on Windows using `--use-fs-snapshot`, +restic would report an error like the following: + +``` +error: incomplete metadata for C:\: get EA failed while opening file handle for path \\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX\, with: The process cannot access the file because it is being used by another process. +``` + +This has now been fixed by correctly handling paths that refer to volume +shadow copy snapshots. + +https://github.com/restic/restic/issues/5107 +https://github.com/restic/restic/pull/5110 +https://github.com/restic/restic/pull/5112 From a494bf661d7255c2b5fbb90d3b75cc74ed3a4b34 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 01:43:08 +0000 Subject: [PATCH 773/893] build(deps): bump golang.org/x/time from 0.6.0 to 0.7.0 Bumps [golang.org/x/time](https://github.com/golang/time) from 0.6.0 to 0.7.0. - [Commits](https://github.com/golang/time/compare/v0.6.0...v0.7.0) --- updated-dependencies: - dependency-name: golang.org/x/time dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ae00c92bf5e..ad0dbf15cdc 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( golang.org/x/sys v0.25.0 golang.org/x/term v0.24.0 golang.org/x/text v0.18.0 - golang.org/x/time v0.6.0 + golang.org/x/time v0.7.0 google.golang.org/api v0.199.0 ) diff --git a/go.sum b/go.sum index fdbb820ea99..bc0e8f3e133 100644 --- a/go.sum +++ b/go.sum @@ -291,8 +291,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= From ae9268dadf7666b41377e79c388fd5c37af65858 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 01:43:12 +0000 Subject: [PATCH 774/893] build(deps): bump golang.org/x/sys from 0.25.0 to 0.26.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.25.0 to 0.26.0. - [Commits](https://github.com/golang/sys/compare/v0.25.0...v0.26.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ae00c92bf5e..416c4b24ce9 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( golang.org/x/net v0.29.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.25.0 + golang.org/x/sys v0.26.0 golang.org/x/term v0.24.0 golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 diff --git a/go.sum b/go.sum index fdbb820ea99..5f4d709842e 100644 --- a/go.sum +++ b/go.sum @@ -276,8 +276,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From 53cb6200fa1b5c5c5756abd339190eeefc56d1cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 01:43:23 +0000 Subject: [PATCH 775/893] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore Bumps [github.com/Azure/azure-sdk-for-go/sdk/azcore](https://github.com/Azure/azure-sdk-for-go) from 1.14.0 to 1.16.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.14.0...sdk/azcore/v1.16.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azcore dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ae00c92bf5e..9626d1c6ae1 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.43.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 github.com/Backblaze/blazer v0.6.1 diff --git a/go.sum b/go.sum index fdbb820ea99..60b1bf732bd 100644 --- a/go.sum +++ b/go.sum @@ -13,8 +13,8 @@ cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkB cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= From de847a48bfaa115a52e8b405ddfd6271b04f94cc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 13:52:51 +0000 Subject: [PATCH 776/893] build(deps): bump google.golang.org/api from 0.199.0 to 0.204.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.199.0 to 0.204.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.199.0...v0.204.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 28 ++++++++++++------------- go.sum | 64 +++++++++++++++++++++++++++++----------------------------- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/go.mod b/go.mod index 4809a60157a..413e8e8fc10 100644 --- a/go.mod +++ b/go.mod @@ -25,23 +25,23 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.6.0 - golang.org/x/crypto v0.27.0 - golang.org/x/net v0.29.0 + golang.org/x/crypto v0.28.0 + golang.org/x/net v0.30.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 golang.org/x/sys v0.26.0 - golang.org/x/term v0.24.0 - golang.org/x/text v0.18.0 + golang.org/x/term v0.25.0 + golang.org/x/text v0.19.0 golang.org/x/time v0.7.0 - google.golang.org/api v0.199.0 + google.golang.org/api v0.204.0 ) require ( - cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/auth v0.9.5 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect - cloud.google.com/go/iam v1.2.0 // indirect + cloud.google.com/go/iam v1.2.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect @@ -73,11 +73,11 @@ require ( go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/metric v1.29.0 // indirect go.opentelemetry.io/otel/trace v1.29.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc v1.67.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 1c887ced21d..9898756048b 100644 --- a/go.sum +++ b/go.sum @@ -1,16 +1,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= -cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= -cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= -cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= -cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= -cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= -cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= -cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= @@ -212,8 +212,8 @@ go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -223,8 +223,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= @@ -247,8 +247,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -282,15 +282,15 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -306,26 +306,26 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= -google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= +google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= +google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= -google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -335,8 +335,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From d8bf327d8b2c39630afe958beab63258a3bbbb9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 13:54:05 +0000 Subject: [PATCH 777/893] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.7.0 to 1.8.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.7.0...sdk/azcore/v1.8.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azidentity dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3045dc0854e..e9302a159bb 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.43.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.3.1 diff --git a/go.sum b/go.sum index 3770ca180e6..1b670bfb6dc 100644 --- a/go.sum +++ b/go.sum @@ -15,14 +15,18 @@ cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyX cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 h1:Be6KInmFEKV81c0pOAEbRYehLMwmmGI1exuFj248AMk= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0/go.mod h1:WCPBHsOXfBVnivScjs2ypRfimjEW0qPVLGgJkZlrIOA= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s= @@ -54,6 +58,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvyukov/go-fuzz v0.0.0-20220726122315-1d375ef9f9f6/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= @@ -126,6 +132,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -171,6 +179,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/restic/chunker v0.4.0 h1:YUPYCUn70MYP7VO4yllypp2SjmsRhRJaad3xKu1QFRw= github.com/restic/chunker v0.4.0/go.mod h1:z0cH2BejpW636LXw0R/BGyv+Ey8+m9QGiOanDHItzyw= github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s= From 74c783b850db9036f984a2fda82febe4669b9334 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 17 Oct 2024 20:21:54 +0200 Subject: [PATCH 778/893] retry load or creating repository config By now missing files are not endlessly retried by the retry backend such that it can be enabled right from the start. In addition, this change also enables the retry backend for the `init` command. --- cmd/restic/global.go | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 36f6e9b0c6f..fc52882d780 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -439,26 +439,6 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi return nil, err } - report := func(msg string, err error, d time.Duration) { - if d >= 0 { - Warnf("%v returned error, retrying after %v: %v\n", msg, d, err) - } else { - Warnf("%v failed: %v\n", msg, err) - } - } - success := func(msg string, retries int) { - Warnf("%v operation successful after %d retries\n", msg, retries) - } - be = retry.New(be, 15*time.Minute, report, success) - - // wrap backend if a test specified a hook - if opts.backendTestHook != nil { - be, err = opts.backendTestHook(be) - if err != nil { - return nil, err - } - } - s, err := repository.New(be, repository.Options{ Compression: opts.Compression, PackSize: opts.PackSize * 1024 * 1024, @@ -629,12 +609,31 @@ func innerOpen(ctx context.Context, s string, gopts GlobalOptions, opts options. } } + report := func(msg string, err error, d time.Duration) { + if d >= 0 { + Warnf("%v returned error, retrying after %v: %v\n", msg, d, err) + } else { + Warnf("%v failed: %v\n", msg, err) + } + } + success := func(msg string, retries int) { + Warnf("%v operation successful after %d retries\n", msg, retries) + } + be = retry.New(be, 15*time.Minute, report, success) + + // wrap backend if a test specified a hook + if gopts.backendTestHook != nil { + be, err = gopts.backendTestHook(be) + if err != nil { + return nil, err + } + } + return be, nil } // Open the backend specified by a location config. func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { - be, err := innerOpen(ctx, s, gopts, opts, false) if err != nil { return nil, err From 58dc4a6892e80c2768c82961af1cd6656fcd01d0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 17 Oct 2024 20:35:26 +0200 Subject: [PATCH 779/893] backend/retry: hide final log for `stat()` method stat is only used to check the config file's existence. We don't want log output in this case. --- internal/backend/retry/backend_retry.go | 9 ++++++++- internal/backend/retry/backend_retry_test.go | 6 +++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index 92c285c4b36..de8a520ec9e 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -221,12 +221,19 @@ func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offse // Stat returns information about the File identified by h. func (be *Backend) Stat(ctx context.Context, h backend.Handle) (fi backend.FileInfo, err error) { - err = be.retry(ctx, fmt.Sprintf("Stat(%v)", h), + // see the call to `cancel()` below for why this context exists + statCtx, cancel := context.WithCancel(ctx) + defer cancel() + + err = be.retry(statCtx, fmt.Sprintf("Stat(%v)", h), func() error { var innerError error fi, innerError = be.Backend.Stat(ctx, h) if be.Backend.IsNotExist(innerError) { + // stat is only used to check the existence of the config file. + // cancel the context to suppress the final error message if the file is not found. + cancel() // do not retry if file is not found, as stat is usually used to check whether a file exists return backoff.Permanent(innerError) } diff --git a/internal/backend/retry/backend_retry_test.go b/internal/backend/retry/backend_retry_test.go index ffb8ae18699..9259144d483 100644 --- a/internal/backend/retry/backend_retry_test.go +++ b/internal/backend/retry/backend_retry_test.go @@ -400,7 +400,11 @@ func TestBackendStatNotExists(t *testing.T) { } TestFastRetries(t) - retryBackend := New(be, 10, nil, nil) + retryBackend := New(be, 10, func(s string, err error, d time.Duration) { + t.Fatalf("unexpected error output %v", s) + }, func(s string, i int) { + t.Fatalf("unexpected log output %v", s) + }) _, err := retryBackend.Stat(context.TODO(), backend.Handle{}) test.Assert(t, be.IsNotExistFn(err), "unexpected error %v", err) From b5bc76cdc77931d616909ad6531f07192682e200 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 15:16:24 +0100 Subject: [PATCH 780/893] test retry on repo opening --- cmd/restic/integration_test.go | 44 ++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index df95031dca5..cb4ccba4118 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -177,3 +177,47 @@ func TestFindListOnce(t *testing.T) { // the snapshots can only be listed once, if both lists match then the there has been only a single List() call rtest.Equals(t, thirdSnapshot, snapshotIDs) } + +type failConfigOnceBackend struct { + backend.Backend + failedOnce bool +} + +func (be *failConfigOnceBackend) Load(ctx context.Context, h backend.Handle, + length int, offset int64, fn func(rd io.Reader) error) error { + + if !be.failedOnce && h.Type == restic.ConfigFile { + be.failedOnce = true + return fmt.Errorf("oops") + } + return be.Backend.Load(ctx, h, length, offset, fn) +} + +func (be *failConfigOnceBackend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) { + if !be.failedOnce && h.Type == restic.ConfigFile { + be.failedOnce = true + return backend.FileInfo{}, fmt.Errorf("oops") + } + return be.Backend.Stat(ctx, h) +} + +func TestBackendRetryConfig(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + var wrappedBackend *failConfigOnceBackend + // cause config loading to fail once + env.gopts.backendInnerTestHook = func(r backend.Backend) (backend.Backend, error) { + wrappedBackend = &failConfigOnceBackend{Backend: r} + return wrappedBackend, nil + } + + testSetupBackupData(t, env) + rtest.Assert(t, wrappedBackend != nil, "backend not wrapped on init") + rtest.Assert(t, wrappedBackend != nil && wrappedBackend.failedOnce, "config loading was not retried on init") + wrappedBackend = nil + + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, BackupOptions{}, env.gopts) + rtest.Assert(t, wrappedBackend != nil, "backend not wrapped on backup") + rtest.Assert(t, wrappedBackend != nil && wrappedBackend.failedOnce, "config loading was not retried on init") +} From 3be2b8a54b48ba855a49d99286c546d8486c0502 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 15:22:55 +0100 Subject: [PATCH 781/893] add config retry changelog --- changelog/unreleased/issue-5081 | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 changelog/unreleased/issue-5081 diff --git a/changelog/unreleased/issue-5081 b/changelog/unreleased/issue-5081 new file mode 100644 index 00000000000..6cf1bf5929f --- /dev/null +++ b/changelog/unreleased/issue-5081 @@ -0,0 +1,7 @@ +Enhancement: Retry loading repository config + +Restic now retries loading the repository config file when opening a repository. +In addition, the `init` command now also retries backend operations. + +https://github.com/restic/restic/issues/5081 +https://github.com/restic/restic/pull/5095 From 868efe4968351e34d40d951f1acf98f1c0bf804a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 17 Oct 2024 20:52:14 +0200 Subject: [PATCH 782/893] prune: allow dry-run without taking a lock --- changelog/unreleased/pull-5096 | 7 +++++++ cmd/restic/cmd_prune.go | 6 +++++- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/pull-5096 diff --git a/changelog/unreleased/pull-5096 b/changelog/unreleased/pull-5096 new file mode 100644 index 00000000000..d1e1d09b26b --- /dev/null +++ b/changelog/unreleased/pull-5096 @@ -0,0 +1,7 @@ +Enhancement: Allow prune dry-run without lock + +The `prune --dry-run --no-lock` now allows performing a dry-run without +taking a lock. If the repository is modified concurrently, `prune` may +return inaccurate statistics or errors. + +https://github.com/restic/restic/pull/5096 diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index e8473bd6f2f..a74ba23f786 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -149,7 +149,11 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive") } - ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) + if gopts.NoLock && !opts.DryRun { + return errors.Fatal("--no-lock is only applicable in combination with --dry-run for prune command") + } + + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun && gopts.NoLock) if err != nil { return err } From 71c185313ecdb3d2b261841160bf1b1841facc12 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 21 Oct 2024 21:41:56 +0200 Subject: [PATCH 783/893] sftp: check for broken connection in Load/List operation --- changelog/unreleased/pull-5101 | 9 +++++++++ internal/backend/sftp/sftp.go | 8 ++++++++ 2 files changed, 17 insertions(+) create mode 100644 changelog/unreleased/pull-5101 diff --git a/changelog/unreleased/pull-5101 b/changelog/unreleased/pull-5101 new file mode 100644 index 00000000000..f784d0c4721 --- /dev/null +++ b/changelog/unreleased/pull-5101 @@ -0,0 +1,9 @@ +Bugfix: Do not retry load/list operation is SFTP connection is broken + +When using restic with the SFTP backend, backend operations that load +a file or list files were retried even if the SFTP connection is broken. + +This has been fixed now. + +https://github.com/restic/restic/pull/5101 +https://forum.restic.net/t/restic-hanging-on-backup/8559/2 diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index 0ecf7ae6283..14819a2df6f 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -391,6 +391,10 @@ func (r *SFTP) checkNoSpace(dir string, size int64, origErr error) error { // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (r *SFTP) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + if err := r.clientError(); err != nil { + return err + } + return util.DefaultLoad(ctx, h, length, offset, r.openReader, func(rd io.Reader) error { if length == 0 || !feature.Flag.Enabled(feature.BackendErrorRedesign) { return fn(rd) @@ -460,6 +464,10 @@ func (r *SFTP) Remove(_ context.Context, h backend.Handle) error { // List runs fn for each file in the backend which has the type t. When an // error occurs (or fn returns an error), List stops and returns it. func (r *SFTP) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error { + if err := r.clientError(); err != nil { + return err + } + basedir, subdirs := r.Basedir(t) walker := r.c.Walk(basedir) for { From b3b173a47cd8b01e97714892d2abd1aa3f91fa4f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 15:38:03 +0100 Subject: [PATCH 784/893] fs: use non existing vss path to avoid flaky test The test used \\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1 , which if it exists and supports extended attributes can cause the test to fail. --- internal/fs/node_windows_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/fs/node_windows_test.go b/internal/fs/node_windows_test.go index b40483f3167..94052662f99 100644 --- a/internal/fs/node_windows_test.go +++ b/internal/fs/node_windows_test.go @@ -453,15 +453,15 @@ func TestPrepareVolumeName(t *testing.T) { }, { name: "Volume Shadow Copy root", - path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1`, - expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1`, + path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy5555`, + expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy5555`, expectError: false, expectedEASupported: false, }, { name: "Volume Shadow Copy path", - path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1\Users\test`, - expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1`, + path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy5555\Users\test`, + expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy5555`, expectError: false, expectedEASupported: false, }, From bcf5fbe4987ad633bbfcc52d0b72307ec24d1b65 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 16:22:32 +0100 Subject: [PATCH 785/893] prepare-release: improve handling of release from non-master branch The final push command now states the correct branch to push. --- helpers/prepare-release/main.go | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/helpers/prepare-release/main.go b/helpers/prepare-release/main.go index ba3de38a521..607d16936eb 100644 --- a/helpers/prepare-release/main.go +++ b/helpers/prepare-release/main.go @@ -31,7 +31,7 @@ var opts = struct { var versionRegex = regexp.MustCompile(`^\d+\.\d+\.\d+$`) func init() { - pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches as 'master'") + pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches than 'master'") pflag.BoolVar(&opts.IgnoreUncommittedChanges, "ignore-uncommitted-changes", false, "allow uncommitted changes") pflag.BoolVar(&opts.IgnoreChangelogVersion, "ignore-changelog-version", false, "ignore missing entry in CHANGELOG.md") pflag.BoolVar(&opts.IgnoreChangelogReleaseDate, "ignore-changelog-release-date", false, "ignore missing subdir with date in changelog/") @@ -128,17 +128,22 @@ func uncommittedChanges(dirs ...string) string { return string(changes) } -func preCheckBranchMaster() { - if opts.IgnoreBranchName { - return - } - +func getBranchName() string { branch, err := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD").Output() if err != nil { die("error running 'git': %v", err) } - if strings.TrimSpace(string(branch)) != "master" { + return strings.TrimSpace(string(branch)) +} + +func preCheckBranchMaster() { + if opts.IgnoreBranchName { + return + } + + branch := getBranchName() + if branch != "master" { die("wrong branch: %s", branch) } } @@ -449,6 +454,7 @@ func main() { } preCheckBranchMaster() + branch := getBranchName() preCheckUncommittedChanges() preCheckVersionExists() preCheckDockerBuilderGoVersion() @@ -485,5 +491,5 @@ func main() { msg("done, output dir is %v", opts.OutputDir) - msg("now run:\n\ngit push --tags origin master\n%s\n\nrm -rf %q", dockerCmds, sourceDir) + msg("now run:\n\ngit push --tags origin %s\n%s\n\nrm -rf %q", branch, dockerCmds, sourceDir) } From e65f4e2231b0ac83f920b4d3b4f3830ecabc75ea Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 15:50:09 +0100 Subject: [PATCH 786/893] backup: include start and end time in json output The timestamps were already stored in the created snapshot. --- changelog/unreleased/pull-5119 | 6 +++++ doc/075_scripting.rst | 4 +++ internal/archiver/archiver.go | 11 +++++--- internal/archiver/archiver_test.go | 19 ++++++++------ internal/ui/backup/json.go | 40 ++++++++++++++++------------- internal/ui/backup/progress.go | 4 +-- internal/ui/backup/progress_test.go | 2 +- internal/ui/backup/text.go | 4 +-- 8 files changed, 56 insertions(+), 34 deletions(-) create mode 100644 changelog/unreleased/pull-5119 diff --git a/changelog/unreleased/pull-5119 b/changelog/unreleased/pull-5119 new file mode 100644 index 00000000000..731e3ecd769 --- /dev/null +++ b/changelog/unreleased/pull-5119 @@ -0,0 +1,6 @@ +Enhancement: Include backup start and end in JSON output + +The JSON output of the backup command now also includes the timestamps +of the `backup_start` and `backup_end` times. + +https://github.com/restic/restic/pull/5119 diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index de7b9d1cd4c..88fc1f35b0f 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -226,6 +226,10 @@ Summary is the last output line in a successful backup. +---------------------------+---------------------------------------------------------+ | ``total_bytes_processed`` | Total number of bytes processed | +---------------------------+---------------------------------------------------------+ +| ``backup_start`` | Time at which the backup was started | ++---------------------------+---------------------------------------------------------+ +| ``backup_end`` | Time at which the backup was completed | ++---------------------------+---------------------------------------------------------+ | ``total_duration`` | Total time it took for the operation to complete | +---------------------------+---------------------------------------------------------+ | ``snapshot_id`` | ID of the new snapshot. Field is omitted if snapshot | diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index d8f0157b1dd..f7a9f275009 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -49,6 +49,8 @@ type ChangeStats struct { } type Summary struct { + BackupStart time.Time + BackupEnd time.Time Files, Dirs ChangeStats ProcessedBytes uint64 ItemStats @@ -811,7 +813,9 @@ func (arch *Archiver) stopWorkers() { // Snapshot saves several targets and returns a snapshot. func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, *Summary, error) { - arch.summary = &Summary{} + arch.summary = &Summary{ + BackupStart: opts.BackupStart, + } cleanTargets, err := resolveRelativeTargets(arch.FS, targets) if err != nil { @@ -894,9 +898,10 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps sn.Parent = opts.ParentSnapshot.ID() } sn.Tree = &rootTreeID + arch.summary.BackupEnd = time.Now() sn.Summary = &restic.SnapshotSummary{ - BackupStart: opts.BackupStart, - BackupEnd: time.Now(), + BackupStart: arch.summary.BackupStart, + BackupEnd: arch.summary.BackupEnd, FilesNew: arch.summary.Files.New, FilesChanged: arch.summary.Files.Changed, diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index d4f15c80b74..97d27dc649f 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1692,14 +1692,17 @@ func (f MockFile) Read(p []byte) (int, error) { } func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) { - rtest.Equals(t, stat.Files.New, sn.Summary.FilesNew) - rtest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged) - rtest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified) - rtest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew) - rtest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged) - rtest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified) - rtest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed) - rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed) + t.Helper() + rtest.Equals(t, stat.BackupStart, sn.Summary.BackupStart, "BackupStart") + // BackupEnd is set to time.Now() and can't be compared to a fixed value + rtest.Equals(t, stat.Files.New, sn.Summary.FilesNew, "FilesNew") + rtest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged, "FilesChanged") + rtest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified, "FilesUnmodified") + rtest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew, "DirsNew") + rtest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged, "DirsChanged") + rtest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified, "DirsUnmodified") + rtest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed, "TotalBytesProcessed") + rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed, "TotalFilesProcessed") bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs)) bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs)) bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded)) diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index f4a76afd7a6..79da353eb82 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -162,7 +162,7 @@ func (b *JSONProgress) ReportTotal(start time.Time, s archiver.ScanStats) { } // Finish prints the finishing messages. -func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { +func (b *JSONProgress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryRun bool) { id := "" // empty if snapshot creation was skipped if !snapshotID.IsNull() { @@ -182,7 +182,9 @@ func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *ar DataAddedPacked: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo, TotalFilesProcessed: summary.Files.New + summary.Files.Changed + summary.Files.Unchanged, TotalBytesProcessed: summary.ProcessedBytes, - TotalDuration: time.Since(start).Seconds(), + BackupStart: summary.BackupStart, + BackupEnd: summary.BackupEnd, + TotalDuration: summary.BackupEnd.Sub(summary.BackupStart).Seconds(), SnapshotID: id, DryRun: dryRun, }) @@ -229,20 +231,22 @@ type verboseUpdate struct { } type summaryOutput struct { - MessageType string `json:"message_type"` // "summary" - FilesNew uint `json:"files_new"` - FilesChanged uint `json:"files_changed"` - FilesUnmodified uint `json:"files_unmodified"` - DirsNew uint `json:"dirs_new"` - DirsChanged uint `json:"dirs_changed"` - DirsUnmodified uint `json:"dirs_unmodified"` - DataBlobs int `json:"data_blobs"` - TreeBlobs int `json:"tree_blobs"` - DataAdded uint64 `json:"data_added"` - DataAddedPacked uint64 `json:"data_added_packed"` - TotalFilesProcessed uint `json:"total_files_processed"` - TotalBytesProcessed uint64 `json:"total_bytes_processed"` - TotalDuration float64 `json:"total_duration"` // in seconds - SnapshotID string `json:"snapshot_id,omitempty"` - DryRun bool `json:"dry_run,omitempty"` + MessageType string `json:"message_type"` // "summary" + FilesNew uint `json:"files_new"` + FilesChanged uint `json:"files_changed"` + FilesUnmodified uint `json:"files_unmodified"` + DirsNew uint `json:"dirs_new"` + DirsChanged uint `json:"dirs_changed"` + DirsUnmodified uint `json:"dirs_unmodified"` + DataBlobs int `json:"data_blobs"` + TreeBlobs int `json:"tree_blobs"` + DataAdded uint64 `json:"data_added"` + DataAddedPacked uint64 `json:"data_added_packed"` + TotalFilesProcessed uint `json:"total_files_processed"` + TotalBytesProcessed uint64 `json:"total_bytes_processed"` + TotalDuration float64 `json:"total_duration"` // in seconds + BackupStart time.Time `json:"backup_start"` + BackupEnd time.Time `json:"backup_end"` + SnapshotID string `json:"snapshot_id,omitempty"` + DryRun bool `json:"dry_run,omitempty"` } diff --git a/internal/ui/backup/progress.go b/internal/ui/backup/progress.go index 24640d71f06..318d304357f 100644 --- a/internal/ui/backup/progress.go +++ b/internal/ui/backup/progress.go @@ -17,7 +17,7 @@ type ProgressPrinter interface { ScannerError(item string, err error) error CompleteItem(messageType string, item string, s archiver.ItemStats, d time.Duration) ReportTotal(start time.Time, s archiver.ScanStats) - Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) + Finish(snapshotID restic.ID, summary *archiver.Summary, dryRun bool) Reset() P(msg string, args ...interface{}) @@ -173,5 +173,5 @@ func (p *Progress) ReportTotal(item string, s archiver.ScanStats) { func (p *Progress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryrun bool) { // wait for the status update goroutine to shut down p.Updater.Done() - p.printer.Finish(snapshotID, p.start, summary, dryrun) + p.printer.Finish(snapshotID, summary, dryrun) } diff --git a/internal/ui/backup/progress_test.go b/internal/ui/backup/progress_test.go index 512fbab26be..60e754b4a4d 100644 --- a/internal/ui/backup/progress_test.go +++ b/internal/ui/backup/progress_test.go @@ -33,7 +33,7 @@ func (p *mockPrinter) CompleteItem(messageType string, _ string, _ archiver.Item } func (p *mockPrinter) ReportTotal(_ time.Time, _ archiver.ScanStats) {} -func (p *mockPrinter) Finish(id restic.ID, _ time.Time, _ *archiver.Summary, _ bool) { +func (p *mockPrinter) Finish(id restic.ID, _ *archiver.Summary, _ bool) { p.Lock() defer p.Unlock() diff --git a/internal/ui/backup/text.go b/internal/ui/backup/text.go index 097f0d0d818..efd7ffdfee6 100644 --- a/internal/ui/backup/text.go +++ b/internal/ui/backup/text.go @@ -130,7 +130,7 @@ func (b *TextProgress) Reset() { } // Finish prints the finishing messages. -func (b *TextProgress) Finish(id restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { +func (b *TextProgress) Finish(id restic.ID, summary *archiver.Summary, dryRun bool) { b.P("\n") b.P("Files: %5d new, %5d changed, %5d unmodified\n", summary.Files.New, summary.Files.Changed, summary.Files.Unchanged) b.P("Dirs: %5d new, %5d changed, %5d unmodified\n", summary.Dirs.New, summary.Dirs.Changed, summary.Dirs.Unchanged) @@ -147,7 +147,7 @@ func (b *TextProgress) Finish(id restic.ID, start time.Time, summary *archiver.S b.P("processed %v files, %v in %s", summary.Files.New+summary.Files.Changed+summary.Files.Unchanged, ui.FormatBytes(summary.ProcessedBytes), - ui.FormatDuration(time.Since(start)), + ui.FormatDuration(summary.BackupEnd.Sub(summary.BackupStart)), ) if !dryRun { From f5b1f9c8b1be10d60897d4843805df415dacfc5c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 16:33:47 +0100 Subject: [PATCH 787/893] CI: bump golangci-lint to latest version --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bf4e63bb7d6..6e84bec4332 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -254,7 +254,7 @@ jobs: uses: golangci/golangci-lint-action@v6 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.57.1 + version: v1.61.0 args: --verbose --timeout 5m # only run golangci-lint for pull requests, otherwise ALL hints get From 3eb9556f6a570660f8afe477107faf51063e84d6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 16:34:00 +0100 Subject: [PATCH 788/893] CI: add go 1.23 --- .github/workflows/tests.yml | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 6e84bec4332..5a41723bbd3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -13,7 +13,7 @@ permissions: contents: read env: - latest_go: "1.22.x" + latest_go: "1.23.x" GO111MODULE: on jobs: @@ -23,27 +23,32 @@ jobs: # list of jobs to run: include: - job_name: Windows - go: 1.22.x + go: 1.23.x os: windows-latest - job_name: macOS - go: 1.22.x + go: 1.23.x os: macOS-latest test_fuse: false - job_name: Linux - go: 1.22.x + go: 1.23.x os: ubuntu-latest test_cloud_backends: true test_fuse: true check_changelog: true - job_name: Linux (race) - go: 1.22.x + go: 1.23.x os: ubuntu-latest test_fuse: true test_opts: "-race" + - job_name: Linux + go: 1.22.x + os: ubuntu-latest + test_fuse: true + - job_name: Linux go: 1.21.x os: ubuntu-latest From 41fa41b28bcdbef71d99111dfc7f467182c56d14 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 16:36:23 +0100 Subject: [PATCH 789/893] fix double printf usage --- cmd/restic/cmd_rewrite.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index a9f664110da..b62d1ed9564 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -2,7 +2,6 @@ package main import ( "context" - "fmt" "time" "github.com/spf13/cobra" @@ -141,7 +140,7 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti if selectByName(path) { return node } - Verbosef(fmt.Sprintf("excluding %s\n", path)) + Verbosef("excluding %s\n", path) return nil } From 569a117a1d1e38e42a4d50cd355d0c261fe522c1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 17:04:49 +0100 Subject: [PATCH 790/893] improve fprintf related error handling --- cmd/restic/cmd_backup_integration_test.go | 7 +------ cmd/restic/cmd_backup_test.go | 9 ++++++--- cmd/restic/cmd_snapshots.go | 12 ++++++++---- cmd/restic/global.go | 14 ++++++++++---- cmd/restic/integration_helpers_unix_test.go | 12 ++++++------ cmd/restic/main.go | 6 +++--- internal/backend/rest/rest_test.go | 2 +- internal/debug/debug.go | 2 +- internal/debug/round_tripper.go | 2 +- internal/ui/termstatus/status.go | 10 ++++++---- 10 files changed, 43 insertions(+), 33 deletions(-) diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 5926fdd5400..4081ef9369d 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -351,12 +351,7 @@ func TestBackupExclude(t *testing.T) { for _, filename := range backupExcludeFilenames { fp := filepath.Join(datadir, filename) rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755)) - - f, err := os.Create(fp) - rtest.OK(t, err) - - fmt.Fprint(f, filename) - rtest.OK(t, f.Close()) + rtest.OK(t, os.WriteFile(fp, []byte(filename), 0o666)) } snapshots := make(map[string]struct{}) diff --git a/cmd/restic/cmd_backup_test.go b/cmd/restic/cmd_backup_test.go index 5cbc42436c8..44e08ff9630 100644 --- a/cmd/restic/cmd_backup_test.go +++ b/cmd/restic/cmd_backup_test.go @@ -39,21 +39,24 @@ func TestCollectTargets(t *testing.T) { f1, err := os.Create(filepath.Join(dir, "fromfile")) rtest.OK(t, err) // Empty lines should be ignored. A line starting with '#' is a comment. - fmt.Fprintf(f1, "\n%s*\n # here's a comment\n", f1.Name()) + _, err = fmt.Fprintf(f1, "\n%s*\n # here's a comment\n", f1.Name()) + rtest.OK(t, err) rtest.OK(t, f1.Close()) f2, err := os.Create(filepath.Join(dir, "fromfile-verbatim")) rtest.OK(t, err) for _, filename := range []string{fooSpace, barStar} { // Empty lines should be ignored. CR+LF is allowed. - fmt.Fprintf(f2, "%s\r\n\n", filepath.Join(dir, filename)) + _, err = fmt.Fprintf(f2, "%s\r\n\n", filepath.Join(dir, filename)) + rtest.OK(t, err) } rtest.OK(t, f2.Close()) f3, err := os.Create(filepath.Join(dir, "fromfile-raw")) rtest.OK(t, err) for _, filename := range []string{"baz", "quux"} { - fmt.Fprintf(f3, "%s\x00", filepath.Join(dir, filename)) + _, err = fmt.Fprintf(f3, "%s\x00", filepath.Join(dir, filename)) + rtest.OK(t, err) } rtest.OK(t, err) rtest.OK(t, f3.Close()) diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index 466f536e061..f935cec86c7 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -296,7 +296,9 @@ func PrintSnapshotGroupHeader(stdout io.Writer, groupKeyJSON string) error { } // Info - fmt.Fprintf(stdout, "snapshots") + if _, err := fmt.Fprintf(stdout, "snapshots"); err != nil { + return err + } var infoStrings []string if key.Hostname != "" { infoStrings = append(infoStrings, "host ["+key.Hostname+"]") @@ -308,11 +310,13 @@ func PrintSnapshotGroupHeader(stdout io.Writer, groupKeyJSON string) error { infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]") } if infoStrings != nil { - fmt.Fprintf(stdout, " for (%s)", strings.Join(infoStrings, ", ")) + if _, err := fmt.Fprintf(stdout, " for (%s)", strings.Join(infoStrings, ", ")); err != nil { + return err + } } - fmt.Fprintf(stdout, ":\n") + _, err = fmt.Fprintf(stdout, ":\n") - return nil + return err } // Snapshot helps to print Snapshots as JSON with their ID included. diff --git a/cmd/restic/global.go b/cmd/restic/global.go index fc52882d780..b824b7449bc 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -308,7 +308,7 @@ func readPasswordTerminal(ctx context.Context, in *os.File, out *os.File, prompt fd := int(out.Fd()) state, err := term.GetState(fd) if err != nil { - fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) return "", err } @@ -317,16 +317,22 @@ func readPasswordTerminal(ctx context.Context, in *os.File, out *os.File, prompt go func() { defer close(done) - fmt.Fprint(out, prompt) + _, err = fmt.Fprint(out, prompt) + if err != nil { + return + } buf, err = term.ReadPassword(int(in.Fd())) - fmt.Fprintln(out) + if err != nil { + return + } + _, err = fmt.Fprintln(out) }() select { case <-ctx.Done(): err := term.Restore(fd, state) if err != nil { - fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err) } return "", ctx.Err() case <-done: diff --git a/cmd/restic/integration_helpers_unix_test.go b/cmd/restic/integration_helpers_unix_test.go index df0c4fe63f1..30852a75341 100644 --- a/cmd/restic/integration_helpers_unix_test.go +++ b/cmd/restic/integration_helpers_unix_test.go @@ -13,17 +13,17 @@ import ( func (e *dirEntry) equals(out io.Writer, other *dirEntry) bool { if e.path != other.path { - fmt.Fprintf(out, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path) + _, _ = fmt.Fprintf(out, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path) return false } if e.fi.Mode() != other.fi.Mode() { - fmt.Fprintf(out, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode()) + _, _ = fmt.Fprintf(out, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode()) return false } if !sameModTime(e.fi, other.fi) { - fmt.Fprintf(out, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime()) + _, _ = fmt.Fprintf(out, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime()) return false } @@ -31,17 +31,17 @@ func (e *dirEntry) equals(out io.Writer, other *dirEntry) bool { stat2, _ := other.fi.Sys().(*syscall.Stat_t) if stat.Uid != stat2.Uid { - fmt.Fprintf(out, "%v: UID does not match (%v != %v)\n", e.path, stat.Uid, stat2.Uid) + _, _ = fmt.Fprintf(out, "%v: UID does not match (%v != %v)\n", e.path, stat.Uid, stat2.Uid) return false } if stat.Gid != stat2.Gid { - fmt.Fprintf(out, "%v: GID does not match (%v != %v)\n", e.path, stat.Gid, stat2.Gid) + _, _ = fmt.Fprintf(out, "%v: GID does not match (%v != %v)\n", e.path, stat.Gid, stat2.Gid) return false } if stat.Nlink != stat2.Nlink { - fmt.Fprintf(out, "%v: Number of links do not match (%v != %v)\n", e.path, stat.Nlink, stat2.Nlink) + _, _ = fmt.Fprintf(out, "%v: Number of links do not match (%v != %v)\n", e.path, stat.Nlink, stat2.Nlink) return false } diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 4cb135c4840..096c5695c15 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -140,7 +140,7 @@ func printExitError(code int, message string) { return } } else { - fmt.Fprintf(globalOptions.stderr, "%v\n", message) + _, _ = fmt.Fprintf(globalOptions.stderr, "%v\n", message) } } @@ -152,10 +152,10 @@ func main() { log.SetOutput(logBuffer) err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES"), func(s string) { - fmt.Fprintln(os.Stderr, s) + _, _ = fmt.Fprintln(os.Stderr, s) }) if err != nil { - fmt.Fprintln(os.Stderr, err) + _, _ = fmt.Fprintln(os.Stderr, err) Exit(1) } diff --git a/internal/backend/rest/rest_test.go b/internal/backend/rest/rest_test.go index 891f60a874a..50560f66d49 100644 --- a/internal/backend/rest/rest_test.go +++ b/internal/backend/rest/rest_test.go @@ -106,7 +106,7 @@ func runRESTServer(ctx context.Context, t testing.TB, dir, reqListenAddr string) matched = true } } - fmt.Fprintln(os.Stdout, line) // print all output to console + _, _ = fmt.Fprintln(os.Stdout, line) // print all output to console } }() diff --git a/internal/debug/debug.go b/internal/debug/debug.go index 7bc3291d1f1..a09d6e74a3a 100644 --- a/internal/debug/debug.go +++ b/internal/debug/debug.go @@ -120,7 +120,7 @@ func goroutineNum() int { runtime.Stack(b, false) var num int - fmt.Sscanf(string(b), "goroutine %d ", &num) + _, _ = fmt.Sscanf(string(b), "goroutine %d ", &num) return num } diff --git a/internal/debug/round_tripper.go b/internal/debug/round_tripper.go index 9dced95c62d..4afab729897 100644 --- a/internal/debug/round_tripper.go +++ b/internal/debug/round_tripper.go @@ -42,7 +42,7 @@ func (rd *eofDetectReader) Close() error { msg += fmt.Sprintf(", body: %q", buf) } - fmt.Fprintln(os.Stderr, msg) + _, _ = fmt.Fprintln(os.Stderr, msg) Log("%s: %+v", msg, errors.New("Close()")) } return rd.rd.Close() diff --git a/internal/ui/termstatus/status.go b/internal/ui/termstatus/status.go index 39654cc8cdc..e6533095871 100644 --- a/internal/ui/termstatus/status.go +++ b/internal/ui/termstatus/status.go @@ -212,7 +212,7 @@ func (t *Terminal) runWithoutStatus(ctx context.Context) { } if _, err := io.WriteString(dst, msg.line); err != nil { - fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "write failed: %v\n", err) } if flush == nil { @@ -220,16 +220,18 @@ func (t *Terminal) runWithoutStatus(ctx context.Context) { } if err := flush(); err != nil { - fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) } case stat := <-t.status: for _, line := range stat.lines { // Ensure that each message ends with exactly one newline. - fmt.Fprintln(t.wr, strings.TrimRight(line, "\n")) + if _, err := fmt.Fprintln(t.wr, strings.TrimRight(line, "\n")); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + } } if err := t.wr.Flush(); err != nil { - fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) } } } From d3c3390a5131822275dac208c9959514ab76930a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 17:05:53 +0100 Subject: [PATCH 791/893] ls: proper error handling if output is not possible --- cmd/restic/cmd_ls.go | 90 +++++++++++++++++++--------------- cmd/restic/cmd_ls_test.go | 20 ++++---- internal/walker/walker.go | 4 +- internal/walker/walker_test.go | 16 +++--- 4 files changed, 70 insertions(+), 60 deletions(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 2213d8e7ae5..06ae6cc2054 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -75,17 +75,17 @@ func init() { } type lsPrinter interface { - Snapshot(sn *restic.Snapshot) - Node(path string, node *restic.Node, isPrefixDirectory bool) - LeaveDir(path string) - Close() + Snapshot(sn *restic.Snapshot) error + Node(path string, node *restic.Node, isPrefixDirectory bool) error + LeaveDir(path string) error + Close() error } type jsonLsPrinter struct { enc *json.Encoder } -func (p *jsonLsPrinter) Snapshot(sn *restic.Snapshot) { +func (p *jsonLsPrinter) Snapshot(sn *restic.Snapshot) error { type lsSnapshot struct { *restic.Snapshot ID *restic.ID `json:"id"` @@ -94,27 +94,21 @@ func (p *jsonLsPrinter) Snapshot(sn *restic.Snapshot) { StructType string `json:"struct_type"` // "snapshot", deprecated } - err := p.enc.Encode(lsSnapshot{ + return p.enc.Encode(lsSnapshot{ Snapshot: sn, ID: sn.ID(), ShortID: sn.ID().Str(), MessageType: "snapshot", StructType: "snapshot", }) - if err != nil { - Warnf("JSON encode failed: %v\n", err) - } } // Print node in our custom JSON format, followed by a newline. -func (p *jsonLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) { +func (p *jsonLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) error { if isPrefixDirectory { - return - } - err := lsNodeJSON(p.enc, path, node) - if err != nil { - Warnf("JSON encode failed: %v\n", err) + return nil } + return lsNodeJSON(p.enc, path, node) } func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error { @@ -160,8 +154,8 @@ func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error { return enc.Encode(n) } -func (p *jsonLsPrinter) LeaveDir(_ string) {} -func (p *jsonLsPrinter) Close() {} +func (p *jsonLsPrinter) LeaveDir(_ string) error { return nil } +func (p *jsonLsPrinter) Close() error { return nil } type ncduLsPrinter struct { out io.Writer @@ -171,16 +165,17 @@ type ncduLsPrinter struct { // lsSnapshotNcdu prints a restic snapshot in Ncdu save format. // It opens the JSON list. Nodes are added with lsNodeNcdu and the list is closed by lsCloseNcdu. // Format documentation: https://dev.yorhel.nl/ncdu/jsonfmt -func (p *ncduLsPrinter) Snapshot(sn *restic.Snapshot) { +func (p *ncduLsPrinter) Snapshot(sn *restic.Snapshot) error { const NcduMajorVer = 1 const NcduMinorVer = 2 snapshotBytes, err := json.Marshal(sn) if err != nil { - Warnf("JSON encode failed: %v\n", err) + return err } p.depth++ - fmt.Fprintf(p.out, "[%d, %d, %s, [{\"name\":\"/\"}", NcduMajorVer, NcduMinorVer, string(snapshotBytes)) + _, err = fmt.Fprintf(p.out, "[%d, %d, %s, [{\"name\":\"/\"}", NcduMajorVer, NcduMinorVer, string(snapshotBytes)) + return err } func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { @@ -232,27 +227,30 @@ func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { return json.Marshal(outNode) } -func (p *ncduLsPrinter) Node(path string, node *restic.Node, _ bool) { +func (p *ncduLsPrinter) Node(path string, node *restic.Node, _ bool) error { out, err := lsNcduNode(path, node) if err != nil { - Warnf("JSON encode failed: %v\n", err) + return err } if node.Type == restic.NodeTypeDir { - fmt.Fprintf(p.out, ",\n%s[\n%s%s", strings.Repeat(" ", p.depth), strings.Repeat(" ", p.depth+1), string(out)) + _, err = fmt.Fprintf(p.out, ",\n%s[\n%s%s", strings.Repeat(" ", p.depth), strings.Repeat(" ", p.depth+1), string(out)) p.depth++ } else { - fmt.Fprintf(p.out, ",\n%s%s", strings.Repeat(" ", p.depth), string(out)) + _, err = fmt.Fprintf(p.out, ",\n%s%s", strings.Repeat(" ", p.depth), string(out)) } + return err } -func (p *ncduLsPrinter) LeaveDir(_ string) { +func (p *ncduLsPrinter) LeaveDir(_ string) error { p.depth-- - fmt.Fprintf(p.out, "\n%s]", strings.Repeat(" ", p.depth)) + _, err := fmt.Fprintf(p.out, "\n%s]", strings.Repeat(" ", p.depth)) + return err } -func (p *ncduLsPrinter) Close() { - fmt.Fprint(p.out, "\n]\n]\n") +func (p *ncduLsPrinter) Close() error { + _, err := fmt.Fprint(p.out, "\n]\n]\n") + return err } type textLsPrinter struct { @@ -261,17 +259,23 @@ type textLsPrinter struct { HumanReadable bool } -func (p *textLsPrinter) Snapshot(sn *restic.Snapshot) { +func (p *textLsPrinter) Snapshot(sn *restic.Snapshot) error { Verbosef("%v filtered by %v:\n", sn, p.dirs) + return nil } -func (p *textLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) { +func (p *textLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) error { if !isPrefixDirectory { Printf("%s\n", formatNode(path, node, p.ListLong, p.HumanReadable)) } + return nil } -func (p *textLsPrinter) LeaveDir(_ string) {} -func (p *textLsPrinter) Close() {} +func (p *textLsPrinter) LeaveDir(_ string) error { + return nil +} +func (p *textLsPrinter) Close() error { + return nil +} func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []string) error { if len(args) == 0 { @@ -374,7 +378,9 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return err } - printer.Snapshot(sn) + if err := printer.Snapshot(sn); err != nil { + return err + } processNode := func(_ restic.ID, nodepath string, node *restic.Node, err error) error { if err != nil { @@ -387,7 +393,9 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri printedDir := false if withinDir(nodepath) { // if we're within a target path, print the node - printer.Node(nodepath, node, false) + if err := printer.Node(nodepath, node, false); err != nil { + return err + } printedDir = true // if recursive listing is requested, signal the walker that it @@ -402,7 +410,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri if approachingMatchingTree(nodepath) { // print node leading up to the target paths if !printedDir { - printer.Node(nodepath, node, true) + return printer.Node(nodepath, node, true) } return nil } @@ -412,7 +420,9 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri if node.Type == restic.NodeTypeDir { // immediately generate leaveDir if the directory is skipped if printedDir { - printer.LeaveDir(nodepath) + if err := printer.LeaveDir(nodepath); err != nil { + return err + } } return walker.ErrSkipNode } @@ -421,11 +431,12 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri err = walker.Walk(ctx, repo, *sn.Tree, walker.WalkVisitor{ ProcessNode: processNode, - LeaveDir: func(path string) { + LeaveDir: func(path string) error { // the root path `/` has no corresponding node and is thus also skipped by processNode if path != "/" { - printer.LeaveDir(path) + return printer.LeaveDir(path) } + return nil }, }) @@ -433,6 +444,5 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return err } - printer.Close() - return nil + return printer.Close() } diff --git a/cmd/restic/cmd_ls_test.go b/cmd/restic/cmd_ls_test.go index b8b07424245..3d4e1dbc7a2 100644 --- a/cmd/restic/cmd_ls_test.go +++ b/cmd/restic/cmd_ls_test.go @@ -134,29 +134,29 @@ func TestLsNcdu(t *testing.T) { } modTime := time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC) - printer.Snapshot(&restic.Snapshot{ + rtest.OK(t, printer.Snapshot(&restic.Snapshot{ Hostname: "host", Paths: []string{"/example"}, - }) - printer.Node("/directory", &restic.Node{ + })) + rtest.OK(t, printer.Node("/directory", &restic.Node{ Type: restic.NodeTypeDir, Name: "directory", ModTime: modTime, - }, false) - printer.Node("/directory/data", &restic.Node{ + }, false)) + rtest.OK(t, printer.Node("/directory/data", &restic.Node{ Type: restic.NodeTypeFile, Name: "data", Size: 42, ModTime: modTime, - }, false) - printer.LeaveDir("/directory") - printer.Node("/file", &restic.Node{ + }, false)) + rtest.OK(t, printer.LeaveDir("/directory")) + rtest.OK(t, printer.Node("/file", &restic.Node{ Type: restic.NodeTypeFile, Name: "file", Size: 12345, ModTime: modTime, - }, false) - printer.Close() + }, false)) + rtest.OK(t, printer.Close()) rtest.Equals(t, `[1, 2, {"time":"0001-01-01T00:00:00Z","tree":null,"paths":["/example"],"hostname":"host"}, [{"name":"/"}, [ diff --git a/internal/walker/walker.go b/internal/walker/walker.go index 8acfed2f2b8..252bc3530e0 100644 --- a/internal/walker/walker.go +++ b/internal/walker/walker.go @@ -28,7 +28,7 @@ type WalkVisitor struct { // was returned. This function is mandatory ProcessNode WalkFunc // Optional callback - LeaveDir func(path string) + LeaveDir func(path string) error } // Walk calls walkFn recursively for each node in root. If walkFn returns an @@ -100,7 +100,7 @@ func walk(ctx context.Context, repo restic.BlobLoader, prefix string, parentTree } if visitor.LeaveDir != nil { - visitor.LeaveDir(prefix) + return visitor.LeaveDir(prefix) } return nil diff --git a/internal/walker/walker_test.go b/internal/walker/walker_test.go index fa377bb8fa6..3614a2397e4 100644 --- a/internal/walker/walker_test.go +++ b/internal/walker/walker_test.go @@ -93,12 +93,12 @@ func (t TreeMap) Connections() uint { // checkFunc returns a function suitable for walking the tree to check // something, and a function which will check the final result. -type checkFunc func(t testing.TB) (walker WalkFunc, leaveDir func(path string), final func(testing.TB)) +type checkFunc func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB)) // checkItemOrder ensures that the order of the 'path' arguments is the one passed in as 'want'. func checkItemOrder(want []string) checkFunc { pos := 0 - return func(t testing.TB) (walker WalkFunc, leaveDir func(path string), final func(testing.TB)) { + return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB)) { walker = func(treeID restic.ID, path string, node *restic.Node, err error) error { if err != nil { t.Errorf("error walking %v: %v", path, err) @@ -117,8 +117,8 @@ func checkItemOrder(want []string) checkFunc { return nil } - leaveDir = func(path string) { - _ = walker(restic.ID{}, "leave: "+path, nil, nil) + leaveDir = func(path string) error { + return walker(restic.ID{}, "leave: "+path, nil, nil) } final = func(t testing.TB) { @@ -134,7 +134,7 @@ func checkItemOrder(want []string) checkFunc { // checkParentTreeOrder ensures that the order of the 'parentID' arguments is the one passed in as 'want'. func checkParentTreeOrder(want []string) checkFunc { pos := 0 - return func(t testing.TB) (walker WalkFunc, leaveDir func(path string), final func(testing.TB)) { + return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB)) { walker = func(treeID restic.ID, path string, node *restic.Node, err error) error { if err != nil { t.Errorf("error walking %v: %v", path, err) @@ -168,7 +168,7 @@ func checkParentTreeOrder(want []string) checkFunc { func checkSkipFor(skipFor map[string]struct{}, wantPaths []string) checkFunc { var pos int - return func(t testing.TB) (walker WalkFunc, leaveDir func(path string), final func(testing.TB)) { + return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB)) { walker = func(treeID restic.ID, path string, node *restic.Node, err error) error { if err != nil { t.Errorf("error walking %v: %v", path, err) @@ -192,8 +192,8 @@ func checkSkipFor(skipFor map[string]struct{}, wantPaths []string) checkFunc { return nil } - leaveDir = func(path string) { - _ = walker(restic.ID{}, "leave: "+path, nil, nil) + leaveDir = func(path string) error { + return walker(restic.ID{}, "leave: "+path, nil, nil) } final = func(t testing.TB) { From 4052a5927cd956fd837f098d096b4110c7a8dad5 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 15:39:42 +0100 Subject: [PATCH 792/893] fs: move getVolumePathName function --- internal/fs/ea_windows.go | 18 ------------- internal/fs/ea_windows_test.go | 44 -------------------------------- internal/fs/node_windows.go | 17 ++++++++++++ internal/fs/node_windows_test.go | 43 +++++++++++++++++++++++++++++++ 4 files changed, 60 insertions(+), 62 deletions(-) diff --git a/internal/fs/ea_windows.go b/internal/fs/ea_windows.go index 6bfe2020906..fe9a3c42a63 100644 --- a/internal/fs/ea_windows.go +++ b/internal/fs/ea_windows.go @@ -8,7 +8,6 @@ import ( "encoding/binary" "errors" "fmt" - "strings" "syscall" "unsafe" @@ -299,20 +298,3 @@ func pathSupportsExtendedAttributes(path string) (supported bool, err error) { supported = (fileSystemFlags & windows.FILE_SUPPORTS_EXTENDED_ATTRIBUTES) != 0 return supported, nil } - -// getVolumePathName returns the volume path name for the given path. -func getVolumePathName(path string) (volumeName string, err error) { - utf16Path, err := windows.UTF16PtrFromString(path) - if err != nil { - return "", err - } - // Get the volume path (e.g., "D:") - var volumePath [windows.MAX_PATH + 1]uint16 - err = windows.GetVolumePathName(utf16Path, &volumePath[0], windows.MAX_PATH+1) - if err != nil { - return "", err - } - // Trim any trailing backslashes - volumeName = strings.TrimRight(windows.UTF16ToString(volumePath[:]), "\\") - return volumeName, nil -} diff --git a/internal/fs/ea_windows_test.go b/internal/fs/ea_windows_test.go index 64bc7f7b6aa..00cbe97f816 100644 --- a/internal/fs/ea_windows_test.go +++ b/internal/fs/ea_windows_test.go @@ -10,7 +10,6 @@ import ( "os" "path/filepath" "reflect" - "strings" "syscall" "testing" "unsafe" @@ -278,46 +277,3 @@ func TestPathSupportsExtendedAttributes(t *testing.T) { t.Error("Expected an error for non-existent path, but got nil") } } - -func TestGetVolumePathName(t *testing.T) { - tempDirVolume := filepath.VolumeName(os.TempDir()) - testCases := []struct { - name string - path string - expectedPrefix string - }{ - { - name: "Root directory", - path: os.Getenv("SystemDrive") + `\`, - expectedPrefix: os.Getenv("SystemDrive"), - }, - { - name: "Nested directory", - path: os.Getenv("SystemDrive") + `\Windows\System32`, - expectedPrefix: os.Getenv("SystemDrive"), - }, - { - name: "Temp directory", - path: os.TempDir() + `\`, - expectedPrefix: tempDirVolume, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - volumeName, err := getVolumePathName(tc.path) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if !strings.HasPrefix(volumeName, tc.expectedPrefix) { - t.Errorf("Expected volume name to start with %s, but got %s", tc.expectedPrefix, volumeName) - } - }) - } - - // Test with an invalid path - _, err := getVolumePathName("Z:\\NonExistentPath") - if err == nil { - t.Error("Expected an error for non-existent path, but got nil") - } -} diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index d20d16642b1..837d4642805 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -423,6 +423,23 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { return isEASupportedVolume, err } +// getVolumePathName returns the volume path name for the given path. +func getVolumePathName(path string) (volumeName string, err error) { + utf16Path, err := windows.UTF16PtrFromString(path) + if err != nil { + return "", err + } + // Get the volume path (e.g., "D:") + var volumePath [windows.MAX_PATH + 1]uint16 + err = windows.GetVolumePathName(utf16Path, &volumePath[0], windows.MAX_PATH+1) + if err != nil { + return "", err + } + // Trim any trailing backslashes + volumeName = strings.TrimRight(windows.UTF16ToString(volumePath[:]), "\\") + return volumeName, nil +} + // isVolumePath returns whether a path refers to a volume func isVolumePath(path string) (bool, error) { volName, err := prepareVolumeName(path) diff --git a/internal/fs/node_windows_test.go b/internal/fs/node_windows_test.go index 94052662f99..1bb76b20455 100644 --- a/internal/fs/node_windows_test.go +++ b/internal/fs/node_windows_test.go @@ -533,3 +533,46 @@ func getOSVolumeGUIDPath(t *testing.T) string { return windows.UTF16ToString(volumeGUID[:]) } + +func TestGetVolumePathName(t *testing.T) { + tempDirVolume := filepath.VolumeName(os.TempDir()) + testCases := []struct { + name string + path string + expectedPrefix string + }{ + { + name: "Root directory", + path: os.Getenv("SystemDrive") + `\`, + expectedPrefix: os.Getenv("SystemDrive"), + }, + { + name: "Nested directory", + path: os.Getenv("SystemDrive") + `\Windows\System32`, + expectedPrefix: os.Getenv("SystemDrive"), + }, + { + name: "Temp directory", + path: os.TempDir() + `\`, + expectedPrefix: tempDirVolume, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + volumeName, err := getVolumePathName(tc.path) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if !strings.HasPrefix(volumeName, tc.expectedPrefix) { + t.Errorf("Expected volume name to start with %s, but got %s", tc.expectedPrefix, volumeName) + } + }) + } + + // Test with an invalid path + _, err := getVolumePathName("Z:\\NonExistentPath") + if err == nil { + t.Error("Expected an error for non-existent path, but got nil") + } +} From 289159beaf639d09a4a41878665340d7ac645f11 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 15:39:58 +0100 Subject: [PATCH 793/893] fs: remove redundant fixpath in vss code --- internal/fs/fs_local_vss.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 1915e2a7ca1..dcbda2a847f 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -176,7 +176,7 @@ func (fs *LocalVss) snapshotPath(path string) string { return path } - fixPath = strings.TrimPrefix(fixpath(path), `\\?\`) + fixPath = strings.TrimPrefix(fixPath, `\\?\`) fixPathLower := strings.ToLower(fixPath) volumeName := filepath.VolumeName(fixPath) volumeNameLower := strings.ToLower(volumeName) From f9a90aae894632704d5c7703ba23a1d7bc065eeb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 18:56:11 +0100 Subject: [PATCH 794/893] fs: fallback to low privilege security descriptors on access denied --- changelog/unreleased/issue-5003 | 14 ++++++++++++++ internal/fs/sd_windows.go | 22 ++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 changelog/unreleased/issue-5003 diff --git a/changelog/unreleased/issue-5003 b/changelog/unreleased/issue-5003 new file mode 100644 index 00000000000..d02b06bc71e --- /dev/null +++ b/changelog/unreleased/issue-5003 @@ -0,0 +1,14 @@ +Bugfix: fix metadata errors during backup of removable disks on Windows + +Since restic 0.17.0, backups of removable disks on Windows could report +errors with retrieving metadata like shown below. + +``` +error: incomplete metadata for d:\filename: get named security info failed with: Access is denied. +``` + +This has now been fixed. + +https://github.com/restic/restic/issues/5003 +https://github.com/restic/restic/pull/5123 +https://forum.restic.net/t/backing-up-a-folder-from-a-veracrypt-volume-brings-up-errors-since-restic-v17-0/8444 diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index 66d9bcb5444..6bffa4fe26e 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -54,6 +54,15 @@ func getSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err sd, err = getNamedSecurityInfoLow(filePath) } else { sd, err = getNamedSecurityInfoHigh(filePath) + // Fallback to the low privilege version when receiving an access denied error. + // For some reason the ERROR_PRIVILEGE_NOT_HELD error is not returned for removable media + // but instead an access denied error is returned. Workaround that by just retrying with + // the low privilege version, but don't switch privileges as we cannot distinguish this + // case from actual access denied errors. + // see https://github.com/restic/restic/issues/5003#issuecomment-2452314191 for details + if err != nil && isAccessDeniedError(err) { + sd, err = getNamedSecurityInfoLow(filePath) + } } if err != nil { if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { @@ -114,6 +123,10 @@ func setSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { err = setNamedSecurityInfoLow(filePath, dacl) } else { err = setNamedSecurityInfoHigh(filePath, owner, group, dacl, sacl) + // See corresponding fallback in getSecurityDescriptor for an explanation + if err != nil && isAccessDeniedError(err) { + err = setNamedSecurityInfoLow(filePath, dacl) + } } if err != nil { @@ -174,6 +187,15 @@ func isHandlePrivilegeNotHeldError(err error) bool { return false } +// isAccessDeniedError checks if the error is ERROR_ACCESS_DENIED +func isAccessDeniedError(err error) bool { + if errno, ok := err.(syscall.Errno); ok { + // Compare the error code to the expected value + return errno == windows.ERROR_ACCESS_DENIED + } + return false +} + // securityDescriptorBytesToStruct converts the security descriptor bytes representation // into a pointer to windows SECURITY_DESCRIPTOR. func securityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, error) { From d8e03849408c73518187f90652bb3970fc0cb3ad Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 17 Oct 2024 19:45:03 +0200 Subject: [PATCH 795/893] doc: document safety feature for --target / --delete --- doc/050_restore.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 1a920fad432..9558ab1d4a0 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -132,6 +132,10 @@ options will be deleted. For example, the command ``restic -r /srv/restic-repo restore 79766175:/work --target /tmp/restore-work --include /foo --delete`` would only delete files within ``/tmp/restore-work/foo``. +When using ``--target / --delete`` then the ``restore`` command only works if either an ``--include`` +or ``--exclude`` option is also specified. This ensures that one cannot accidentaly delete +the whole system. + Dry run ------- From 75ec7d32690234271375004cf76a6c75050a0a2f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 9 Sep 2024 22:15:30 +0200 Subject: [PATCH 796/893] fuse: cache fs.Node instances A particular node should always be represented by a single instance. This is necessary to allow the fuse library to assign a stable nodeId to a node. macOS Sonoma trips over the previous, unstable behavior when using fuse-t. --- internal/fuse/dir.go | 43 +++++++++++++++++++--------------- internal/fuse/snapshots_dir.go | 14 +++++++---- internal/fuse/tree_cache.go | 38 ++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+), 24 deletions(-) create mode 100644 internal/fuse/tree_cache.go diff --git a/internal/fuse/dir.go b/internal/fuse/dir.go index fd030295b6d..e87e0124781 100644 --- a/internal/fuse/dir.go +++ b/internal/fuse/dir.go @@ -29,6 +29,7 @@ type dir struct { parentInode uint64 node *restic.Node m sync.Mutex + cache treeCache } func cleanupNodeName(name string) string { @@ -43,6 +44,7 @@ func newDir(root *Root, inode, parentInode uint64, node *restic.Node) (*dir, err node: node, inode: inode, parentInode: parentInode, + cache: *newTreeCache(), }, nil } @@ -87,6 +89,7 @@ func newDirFromSnapshot(root *Root, inode uint64, snapshot *restic.Snapshot) (*d Subtree: snapshot.Tree, }, inode: inode, + cache: *newTreeCache(), }, nil } @@ -208,25 +211,27 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { return nil, err } - node, ok := d.items[name] - if !ok { - debug.Log(" Lookup(%v) -> not found", name) - return nil, syscall.ENOENT - } - inode := inodeFromNode(d.inode, node) - switch node.Type { - case "dir": - return newDir(d.root, inode, d.inode, node) - case "file": - return newFile(d.root, inode, node) - case "symlink": - return newLink(d.root, inode, node) - case "dev", "chardev", "fifo", "socket": - return newOther(d.root, inode, node) - default: - debug.Log(" node %v has unknown type %v", name, node.Type) - return nil, syscall.ENOENT - } + return d.cache.lookupOrCreate(name, func() (fs.Node, error) { + node, ok := d.items[name] + if !ok { + debug.Log(" Lookup(%v) -> not found", name) + return nil, syscall.ENOENT + } + inode := inodeFromNode(d.inode, node) + switch node.Type { + case "dir": + return newDir(d.root, inode, d.inode, node) + case "file": + return newFile(d.root, inode, node) + case "symlink": + return newLink(d.root, inode, node) + case "dev", "chardev", "fifo", "socket": + return newOther(d.root, inode, node) + default: + debug.Log(" node %v has unknown type %v", name, node.Type) + return nil, syscall.ENOENT + } + }) } func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { diff --git a/internal/fuse/snapshots_dir.go b/internal/fuse/snapshots_dir.go index 4cae7106c47..cfe1f782a26 100644 --- a/internal/fuse/snapshots_dir.go +++ b/internal/fuse/snapshots_dir.go @@ -23,6 +23,7 @@ type SnapshotsDir struct { parentInode uint64 dirStruct *SnapshotsDirStructure prefix string + cache treeCache } // ensure that *SnapshotsDir implements these interfaces @@ -38,6 +39,7 @@ func NewSnapshotsDir(root *Root, inode, parentInode uint64, dirStruct *Snapshots parentInode: parentInode, dirStruct: dirStruct, prefix: prefix, + cache: *newTreeCache(), } } @@ -107,8 +109,12 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) return nil, syscall.ENOENT } - entry := meta.names[name] - if entry != nil { + return d.cache.lookupOrCreate(name, func() (fs.Node, error) { + entry := meta.names[name] + if entry == nil { + return nil, syscall.ENOENT + } + inode := inodeFromName(d.inode, name) if entry.linkTarget != "" { return newSnapshotLink(d.root, inode, entry.linkTarget, entry.snapshot) @@ -116,9 +122,7 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) return newDirFromSnapshot(d.root, inode, entry.snapshot) } return NewSnapshotsDir(d.root, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil - } - - return nil, syscall.ENOENT + }) } // SnapshotLink diff --git a/internal/fuse/tree_cache.go b/internal/fuse/tree_cache.go new file mode 100644 index 00000000000..addc54a469d --- /dev/null +++ b/internal/fuse/tree_cache.go @@ -0,0 +1,38 @@ +//go:build darwin || freebsd || linux +// +build darwin freebsd linux + +package fuse + +import ( + "sync" + + "github.com/anacrolix/fuse/fs" +) + +type treeCache struct { + nodes map[string]fs.Node + m sync.Mutex +} + +func newTreeCache() *treeCache { + return &treeCache{ + nodes: map[string]fs.Node{}, + } +} + +func (t *treeCache) lookupOrCreate(name string, create func() (fs.Node, error)) (fs.Node, error) { + t.m.Lock() + defer t.m.Unlock() + + if node, ok := t.nodes[name]; ok { + return node, nil + } + + node, err := create() + if err != nil { + return nil, err + } + + t.nodes[name] = node + return node, nil +} From de4f8b344ea0a17b883ac23496a99721eca42bf7 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 9 Sep 2024 22:37:08 +0200 Subject: [PATCH 797/893] fuse: add missing type assertion for optional interfaces --- internal/fuse/dir.go | 2 ++ internal/fuse/link.go | 2 ++ internal/fuse/other.go | 4 ++++ 3 files changed, 8 insertions(+) diff --git a/internal/fuse/dir.go b/internal/fuse/dir.go index e87e0124781..49b32e21a71 100644 --- a/internal/fuse/dir.go +++ b/internal/fuse/dir.go @@ -20,6 +20,8 @@ import ( // Statically ensure that *dir implement those interface var _ = fs.HandleReadDirAller(&dir{}) +var _ = fs.NodeGetxattrer(&dir{}) +var _ = fs.NodeListxattrer(&dir{}) var _ = fs.NodeStringLookuper(&dir{}) type dir struct { diff --git a/internal/fuse/link.go b/internal/fuse/link.go index 3aea8b06e91..975e640ea1e 100644 --- a/internal/fuse/link.go +++ b/internal/fuse/link.go @@ -12,6 +12,8 @@ import ( ) // Statically ensure that *link implements the given interface +var _ = fs.NodeGetxattrer(&link{}) +var _ = fs.NodeListxattrer(&link{}) var _ = fs.NodeReadlinker(&link{}) type link struct { diff --git a/internal/fuse/other.go b/internal/fuse/other.go index f536de5c1be..d459d0efdbe 100644 --- a/internal/fuse/other.go +++ b/internal/fuse/other.go @@ -7,9 +7,13 @@ import ( "context" "github.com/anacrolix/fuse" + "github.com/anacrolix/fuse/fs" "github.com/restic/restic/internal/restic" ) +// Statically ensure that *other implements the given interface +var _ = fs.NodeReadlinker(&other{}) + type other struct { root *Root node *restic.Node From 0e9716a6e61b5daf1a25cfed73997f13bbb0069e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 9 Sep 2024 22:37:51 +0200 Subject: [PATCH 798/893] fuse: forget fs.Node instances on request by the kernel Forget fs.Node instances once the kernel frees the corresponding nodeId. This ensures that restic does not run out of memory on large snapshots. --- internal/fuse/dir.go | 24 ++++++++++++++++-------- internal/fuse/file.go | 23 +++++++++++++++-------- internal/fuse/fuse_test.go | 6 +++--- internal/fuse/link.go | 16 +++++++++++----- internal/fuse/other.go | 16 +++++++++++----- internal/fuse/root.go | 2 +- internal/fuse/snapshots_dir.go | 27 ++++++++++++++++++++------- internal/fuse/tree_cache.go | 11 +++++++++-- 8 files changed, 86 insertions(+), 39 deletions(-) diff --git a/internal/fuse/dir.go b/internal/fuse/dir.go index 49b32e21a71..beb3420c7a5 100644 --- a/internal/fuse/dir.go +++ b/internal/fuse/dir.go @@ -20,12 +20,14 @@ import ( // Statically ensure that *dir implement those interface var _ = fs.HandleReadDirAller(&dir{}) +var _ = fs.NodeForgetter(&dir{}) var _ = fs.NodeGetxattrer(&dir{}) var _ = fs.NodeListxattrer(&dir{}) var _ = fs.NodeStringLookuper(&dir{}) type dir struct { root *Root + forget forgetFn items map[string]*restic.Node inode uint64 parentInode uint64 @@ -38,11 +40,12 @@ func cleanupNodeName(name string) string { return filepath.Base(name) } -func newDir(root *Root, inode, parentInode uint64, node *restic.Node) (*dir, error) { +func newDir(root *Root, forget forgetFn, inode, parentInode uint64, node *restic.Node) (*dir, error) { debug.Log("new dir for %v (%v)", node.Name, node.Subtree) return &dir{ root: root, + forget: forget, node: node, inode: inode, parentInode: parentInode, @@ -79,10 +82,11 @@ func replaceSpecialNodes(ctx context.Context, repo restic.BlobLoader, node *rest return tree.Nodes, nil } -func newDirFromSnapshot(root *Root, inode uint64, snapshot *restic.Snapshot) (*dir, error) { +func newDirFromSnapshot(root *Root, forget forgetFn, inode uint64, snapshot *restic.Snapshot) (*dir, error) { debug.Log("new dir for snapshot %v (%v)", snapshot.ID(), snapshot.Tree) return &dir{ - root: root, + root: root, + forget: forget, node: &restic.Node{ AccessTime: snapshot.Time, ModTime: snapshot.Time, @@ -213,7 +217,7 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { return nil, err } - return d.cache.lookupOrCreate(name, func() (fs.Node, error) { + return d.cache.lookupOrCreate(name, func(forget forgetFn) (fs.Node, error) { node, ok := d.items[name] if !ok { debug.Log(" Lookup(%v) -> not found", name) @@ -222,13 +226,13 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { inode := inodeFromNode(d.inode, node) switch node.Type { case "dir": - return newDir(d.root, inode, d.inode, node) + return newDir(d.root, forget, inode, d.inode, node) case "file": - return newFile(d.root, inode, node) + return newFile(d.root, forget, inode, node) case "symlink": - return newLink(d.root, inode, node) + return newLink(d.root, forget, inode, node) case "dev", "chardev", "fifo", "socket": - return newOther(d.root, inode, node) + return newOther(d.root, forget, inode, node) default: debug.Log(" node %v has unknown type %v", name, node.Type) return nil, syscall.ENOENT @@ -244,3 +248,7 @@ func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fus func (d *dir) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return nodeGetXattr(d.node, req, resp) } + +func (d *dir) Forget() { + d.forget() +} diff --git a/internal/fuse/file.go b/internal/fuse/file.go index 494fca283ba..a69471f8372 100644 --- a/internal/fuse/file.go +++ b/internal/fuse/file.go @@ -20,14 +20,16 @@ const blockSize = 512 // Statically ensure that *file and *openFile implement the given interfaces var _ = fs.HandleReader(&openFile{}) -var _ = fs.NodeListxattrer(&file{}) +var _ = fs.NodeForgetter(&file{}) var _ = fs.NodeGetxattrer(&file{}) +var _ = fs.NodeListxattrer(&file{}) var _ = fs.NodeOpener(&file{}) type file struct { - root *Root - node *restic.Node - inode uint64 + root *Root + forget forgetFn + node *restic.Node + inode uint64 } type openFile struct { @@ -36,12 +38,13 @@ type openFile struct { cumsize []uint64 } -func newFile(root *Root, inode uint64, node *restic.Node) (fusefile *file, err error) { +func newFile(root *Root, forget forgetFn, inode uint64, node *restic.Node) (fusefile *file, err error) { debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content)) return &file{ - inode: inode, - root: root, - node: node, + inode: inode, + forget: forget, + root: root, + node: node, }, nil } @@ -172,3 +175,7 @@ func (f *file) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fu func (f *file) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return nodeGetXattr(f.node, req, resp) } + +func (f *file) Forget() { + f.forget() +} diff --git a/internal/fuse/fuse_test.go b/internal/fuse/fuse_test.go index aebcb127282..5818c1edd5a 100644 --- a/internal/fuse/fuse_test.go +++ b/internal/fuse/fuse_test.go @@ -119,7 +119,7 @@ func TestFuseFile(t *testing.T) { root := &Root{repo: repo, blobCache: bloblru.New(blobCacheSize)} inode := inodeFromNode(1, node) - f, err := newFile(root, inode, node) + f, err := newFile(root, func() {}, inode, node) rtest.OK(t, err) of, err := f.Open(context.TODO(), nil, nil) rtest.OK(t, err) @@ -162,7 +162,7 @@ func TestFuseDir(t *testing.T) { } parentInode := inodeFromName(0, "parent") inode := inodeFromName(1, "foo") - d, err := newDir(root, inode, parentInode, node) + d, err := newDir(root, func() {}, inode, parentInode, node) rtest.OK(t, err) // don't open the directory as that would require setting up a proper tree blob @@ -276,7 +276,7 @@ func TestLink(t *testing.T) { {Name: "foo", Value: []byte("bar")}, }} - lnk, err := newLink(&Root{}, 42, node) + lnk, err := newLink(&Root{}, func() {}, 42, node) rtest.OK(t, err) target, err := lnk.Readlink(context.TODO(), nil) rtest.OK(t, err) diff --git a/internal/fuse/link.go b/internal/fuse/link.go index 975e640ea1e..f8bf8d3ee11 100644 --- a/internal/fuse/link.go +++ b/internal/fuse/link.go @@ -12,18 +12,20 @@ import ( ) // Statically ensure that *link implements the given interface +var _ = fs.NodeForgetter(&link{}) var _ = fs.NodeGetxattrer(&link{}) var _ = fs.NodeListxattrer(&link{}) var _ = fs.NodeReadlinker(&link{}) type link struct { - root *Root - node *restic.Node - inode uint64 + root *Root + forget forgetFn + node *restic.Node + inode uint64 } -func newLink(root *Root, inode uint64, node *restic.Node) (*link, error) { - return &link{root: root, inode: inode, node: node}, nil +func newLink(root *Root, forget forgetFn, inode uint64, node *restic.Node) (*link, error) { + return &link{root: root, forget: forget, inode: inode, node: node}, nil } func (l *link) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) { @@ -57,3 +59,7 @@ func (l *link) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fu func (l *link) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return nodeGetXattr(l.node, req, resp) } + +func (l *link) Forget() { + l.forget() +} diff --git a/internal/fuse/other.go b/internal/fuse/other.go index d459d0efdbe..cbd9667ccf1 100644 --- a/internal/fuse/other.go +++ b/internal/fuse/other.go @@ -12,16 +12,18 @@ import ( ) // Statically ensure that *other implements the given interface +var _ = fs.NodeForgetter(&other{}) var _ = fs.NodeReadlinker(&other{}) type other struct { - root *Root - node *restic.Node - inode uint64 + root *Root + forget forgetFn + node *restic.Node + inode uint64 } -func newOther(root *Root, inode uint64, node *restic.Node) (*other, error) { - return &other{root: root, inode: inode, node: node}, nil +func newOther(root *Root, forget forgetFn, inode uint64, node *restic.Node) (*other, error) { + return &other{root: root, forget: forget, inode: inode, node: node}, nil } func (l *other) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) { @@ -44,3 +46,7 @@ func (l *other) Attr(_ context.Context, a *fuse.Attr) error { return nil } + +func (l *other) Forget() { + l.forget() +} diff --git a/internal/fuse/root.go b/internal/fuse/root.go index ab6116f0dcc..72a0634fca5 100644 --- a/internal/fuse/root.go +++ b/internal/fuse/root.go @@ -66,7 +66,7 @@ func NewRoot(repo restic.Repository, cfg Config) *Root { } } - root.SnapshotsDir = NewSnapshotsDir(root, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), "") + root.SnapshotsDir = NewSnapshotsDir(root, func() {}, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), "") return root } diff --git a/internal/fuse/snapshots_dir.go b/internal/fuse/snapshots_dir.go index cfe1f782a26..bcab160849e 100644 --- a/internal/fuse/snapshots_dir.go +++ b/internal/fuse/snapshots_dir.go @@ -19,6 +19,7 @@ import ( // It uses the saved prefix to select the corresponding MetaDirData. type SnapshotsDir struct { root *Root + forget forgetFn inode uint64 parentInode uint64 dirStruct *SnapshotsDirStructure @@ -28,13 +29,15 @@ type SnapshotsDir struct { // ensure that *SnapshotsDir implements these interfaces var _ = fs.HandleReadDirAller(&SnapshotsDir{}) +var _ = fs.NodeForgetter(&SnapshotsDir{}) var _ = fs.NodeStringLookuper(&SnapshotsDir{}) // NewSnapshotsDir returns a new directory structure containing snapshots and "latest" links -func NewSnapshotsDir(root *Root, inode, parentInode uint64, dirStruct *SnapshotsDirStructure, prefix string) *SnapshotsDir { +func NewSnapshotsDir(root *Root, forget forgetFn, inode, parentInode uint64, dirStruct *SnapshotsDirStructure, prefix string) *SnapshotsDir { debug.Log("create snapshots dir, inode %d", inode) return &SnapshotsDir{ root: root, + forget: forget, inode: inode, parentInode: parentInode, dirStruct: dirStruct, @@ -109,7 +112,7 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) return nil, syscall.ENOENT } - return d.cache.lookupOrCreate(name, func() (fs.Node, error) { + return d.cache.lookupOrCreate(name, func(forget forgetFn) (fs.Node, error) { entry := meta.names[name] if entry == nil { return nil, syscall.ENOENT @@ -117,27 +120,33 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) inode := inodeFromName(d.inode, name) if entry.linkTarget != "" { - return newSnapshotLink(d.root, inode, entry.linkTarget, entry.snapshot) + return newSnapshotLink(d.root, forget, inode, entry.linkTarget, entry.snapshot) } else if entry.snapshot != nil { - return newDirFromSnapshot(d.root, inode, entry.snapshot) + return newDirFromSnapshot(d.root, forget, inode, entry.snapshot) } - return NewSnapshotsDir(d.root, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil + return NewSnapshotsDir(d.root, forget, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil }) } +func (d *SnapshotsDir) Forget() { + d.forget() +} + // SnapshotLink type snapshotLink struct { root *Root + forget forgetFn inode uint64 target string snapshot *restic.Snapshot } +var _ = fs.NodeForgetter(&snapshotLink{}) var _ = fs.NodeReadlinker(&snapshotLink{}) // newSnapshotLink -func newSnapshotLink(root *Root, inode uint64, target string, snapshot *restic.Snapshot) (*snapshotLink, error) { - return &snapshotLink{root: root, inode: inode, target: target, snapshot: snapshot}, nil +func newSnapshotLink(root *Root, forget forgetFn, inode uint64, target string, snapshot *restic.Snapshot) (*snapshotLink, error) { + return &snapshotLink{root: root, forget: forget, inode: inode, target: target, snapshot: snapshot}, nil } // Readlink @@ -161,3 +170,7 @@ func (l *snapshotLink) Attr(_ context.Context, a *fuse.Attr) error { return nil } + +func (l *snapshotLink) Forget() { + l.forget() +} diff --git a/internal/fuse/tree_cache.go b/internal/fuse/tree_cache.go index addc54a469d..d913f9b81b3 100644 --- a/internal/fuse/tree_cache.go +++ b/internal/fuse/tree_cache.go @@ -14,13 +14,15 @@ type treeCache struct { m sync.Mutex } +type forgetFn func() + func newTreeCache() *treeCache { return &treeCache{ nodes: map[string]fs.Node{}, } } -func (t *treeCache) lookupOrCreate(name string, create func() (fs.Node, error)) (fs.Node, error) { +func (t *treeCache) lookupOrCreate(name string, create func(forget forgetFn) (fs.Node, error)) (fs.Node, error) { t.m.Lock() defer t.m.Unlock() @@ -28,7 +30,12 @@ func (t *treeCache) lookupOrCreate(name string, create func() (fs.Node, error)) return node, nil } - node, err := create() + node, err := create(func() { + t.m.Lock() + defer t.m.Unlock() + + delete(t.nodes, name) + }) if err != nil { return nil, err } From 8aebea7ba23ae19899ad01b0c36bae867335f6b1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 11 Sep 2024 21:31:05 +0200 Subject: [PATCH 799/893] fuse: test that the same fs.Node is used for the same file --- internal/fuse/fuse_test.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/internal/fuse/fuse_test.go b/internal/fuse/fuse_test.go index 5818c1edd5a..6cd7a450a5b 100644 --- a/internal/fuse/fuse_test.go +++ b/internal/fuse/fuse_test.go @@ -217,6 +217,34 @@ func testTopUIDGID(t *testing.T, cfg Config, repo restic.Repository, uid, gid ui rtest.Equals(t, uint32(0), attr.Gid) } +// The Lookup method must return the same Node object unless it was forgotten in the meantime +func testStableLookup(t *testing.T, node fs.Node, path string) fs.Node { + t.Helper() + result, err := node.(fs.NodeStringLookuper).Lookup(context.TODO(), path) + rtest.OK(t, err) + result2, err := node.(fs.NodeStringLookuper).Lookup(context.TODO(), path) + rtest.OK(t, err) + rtest.Assert(t, result == result2, "%v are not the same object", path) + + result2.(fs.NodeForgetter).Forget() + result2, err = node.(fs.NodeStringLookuper).Lookup(context.TODO(), path) + rtest.OK(t, err) + rtest.Assert(t, result != result2, "object for %v should change after forget", path) + return result +} + +func TestStableNodeObjects(t *testing.T) { + repo := repository.TestRepository(t) + restic.TestCreateSnapshot(t, repo, time.Unix(1460289341, 207401672), 2) + root := NewRoot(repo, Config{}) + + idsdir := testStableLookup(t, root, "ids") + snapID := loadFirstSnapshot(t, repo).ID().Str() + snapshotdir := testStableLookup(t, idsdir, snapID) + dir := testStableLookup(t, snapshotdir, "dir-0") + testStableLookup(t, dir, "file-2") +} + // Test reporting of fuse.Attr.Blocks in multiples of 512. func TestBlocks(t *testing.T) { root := &Root{} From d0c5b5a9b71d0e87ce528e3e3b084807c51a600d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 11 Sep 2024 21:39:35 +0200 Subject: [PATCH 800/893] add changelog for fuse fix --- changelog/unreleased/issue-4971 | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 changelog/unreleased/issue-4971 diff --git a/changelog/unreleased/issue-4971 b/changelog/unreleased/issue-4971 new file mode 100644 index 00000000000..9ab529408a1 --- /dev/null +++ b/changelog/unreleased/issue-4971 @@ -0,0 +1,9 @@ +Bugfix: Fix unusable `mount` on macOS Sonoma + +On macOS Sonoma when using fuse-t, it was not possible to access files in +a mounted repository. + +This issue has been resolved. + +https://github.com/restic/restic/issues/4971 +https://github.com/restic/restic/pull/5048 From b8b7896d4c6f298ef06537cf0ab7525daa8fdfbd Mon Sep 17 00:00:00 2001 From: Joram Berger Date: Sun, 27 Oct 2024 19:22:34 +0100 Subject: [PATCH 801/893] doc: Clarify number of blobs are added The numbers reported as `data_blobs` and `tree_blobs` are not total numbers of blobs but numbers of blobs added with the given snapshot. --- doc/075_scripting.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 9fa0da6d049..aea78864436 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -191,9 +191,9 @@ Summary is the last output line in a successful backup. +---------------------------+---------------------------------------------------------+ | ``dirs_unmodified`` | Number of directories that did not change | +---------------------------+---------------------------------------------------------+ -| ``data_blobs`` | Number of data blobs | +| ``data_blobs`` | Number of data blobs added | +---------------------------+---------------------------------------------------------+ -| ``tree_blobs`` | Number of tree blobs | +| ``tree_blobs`` | Number of tree blobs added | +---------------------------+---------------------------------------------------------+ | ``data_added`` | Amount of (uncompressed) data added, in bytes | +---------------------------+---------------------------------------------------------+ @@ -651,9 +651,9 @@ was created. +---------------------------+---------------------------------------------------------+ | ``dirs_unmodified`` | Number of directories that did not change | +---------------------------+---------------------------------------------------------+ -| ``data_blobs`` | Number of data blobs | +| ``data_blobs`` | Number of data blobs added | +---------------------------+---------------------------------------------------------+ -| ``tree_blobs`` | Number of tree blobs | +| ``tree_blobs`` | Number of tree blobs added | +---------------------------+---------------------------------------------------------+ | ``data_added`` | Amount of (uncompressed) data added, in bytes | +---------------------------+---------------------------------------------------------+ From b8527f4b380093d071469071fd2a9ba6cf40da3d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 17 Oct 2024 20:52:14 +0200 Subject: [PATCH 802/893] prune: allow dry-run without taking a lock --- changelog/unreleased/pull-5096 | 7 +++++++ cmd/restic/cmd_prune.go | 6 +++++- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/pull-5096 diff --git a/changelog/unreleased/pull-5096 b/changelog/unreleased/pull-5096 new file mode 100644 index 00000000000..d1e1d09b26b --- /dev/null +++ b/changelog/unreleased/pull-5096 @@ -0,0 +1,7 @@ +Enhancement: Allow prune dry-run without lock + +The `prune --dry-run --no-lock` now allows performing a dry-run without +taking a lock. If the repository is modified concurrently, `prune` may +return inaccurate statistics or errors. + +https://github.com/restic/restic/pull/5096 diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index e8473bd6f2f..a74ba23f786 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -149,7 +149,11 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive") } - ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) + if gopts.NoLock && !opts.DryRun { + return errors.Fatal("--no-lock is only applicable in combination with --dry-run for prune command") + } + + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun && gopts.NoLock) if err != nil { return err } From 75f317eaf1d7cc458e8bf0ef7a6030e530a007f0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 21 Oct 2024 21:41:56 +0200 Subject: [PATCH 803/893] sftp: check for broken connection in Load/List operation --- changelog/unreleased/pull-5101 | 9 +++++++++ internal/backend/sftp/sftp.go | 8 ++++++++ 2 files changed, 17 insertions(+) create mode 100644 changelog/unreleased/pull-5101 diff --git a/changelog/unreleased/pull-5101 b/changelog/unreleased/pull-5101 new file mode 100644 index 00000000000..f784d0c4721 --- /dev/null +++ b/changelog/unreleased/pull-5101 @@ -0,0 +1,9 @@ +Bugfix: Do not retry load/list operation is SFTP connection is broken + +When using restic with the SFTP backend, backend operations that load +a file or list files were retried even if the SFTP connection is broken. + +This has been fixed now. + +https://github.com/restic/restic/pull/5101 +https://forum.restic.net/t/restic-hanging-on-backup/8559/2 diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index efbd0c8d5b7..6b9620a362b 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -421,6 +421,10 @@ func (r *SFTP) checkNoSpace(dir string, size int64, origErr error) error { // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (r *SFTP) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + if err := r.clientError(); err != nil { + return err + } + return util.DefaultLoad(ctx, h, length, offset, r.openReader, func(rd io.Reader) error { if length == 0 || !feature.Flag.Enabled(feature.BackendErrorRedesign) { return fn(rd) @@ -490,6 +494,10 @@ func (r *SFTP) Remove(_ context.Context, h backend.Handle) error { // List runs fn for each file in the backend which has the type t. When an // error occurs (or fn returns an error), List stops and returns it. func (r *SFTP) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error { + if err := r.clientError(); err != nil { + return err + } + basedir, subdirs := r.Basedir(t) walker := r.c.Walk(basedir) for { From 3800eac54bedca06d8f5e32beca7fd4c2ece8d28 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 16:22:32 +0100 Subject: [PATCH 804/893] prepare-release: improve handling of release from non-master branch The final push command now states the correct branch to push. --- helpers/prepare-release/main.go | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/helpers/prepare-release/main.go b/helpers/prepare-release/main.go index ba3de38a521..607d16936eb 100644 --- a/helpers/prepare-release/main.go +++ b/helpers/prepare-release/main.go @@ -31,7 +31,7 @@ var opts = struct { var versionRegex = regexp.MustCompile(`^\d+\.\d+\.\d+$`) func init() { - pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches as 'master'") + pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches than 'master'") pflag.BoolVar(&opts.IgnoreUncommittedChanges, "ignore-uncommitted-changes", false, "allow uncommitted changes") pflag.BoolVar(&opts.IgnoreChangelogVersion, "ignore-changelog-version", false, "ignore missing entry in CHANGELOG.md") pflag.BoolVar(&opts.IgnoreChangelogReleaseDate, "ignore-changelog-release-date", false, "ignore missing subdir with date in changelog/") @@ -128,17 +128,22 @@ func uncommittedChanges(dirs ...string) string { return string(changes) } -func preCheckBranchMaster() { - if opts.IgnoreBranchName { - return - } - +func getBranchName() string { branch, err := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD").Output() if err != nil { die("error running 'git': %v", err) } - if strings.TrimSpace(string(branch)) != "master" { + return strings.TrimSpace(string(branch)) +} + +func preCheckBranchMaster() { + if opts.IgnoreBranchName { + return + } + + branch := getBranchName() + if branch != "master" { die("wrong branch: %s", branch) } } @@ -449,6 +454,7 @@ func main() { } preCheckBranchMaster() + branch := getBranchName() preCheckUncommittedChanges() preCheckVersionExists() preCheckDockerBuilderGoVersion() @@ -485,5 +491,5 @@ func main() { msg("done, output dir is %v", opts.OutputDir) - msg("now run:\n\ngit push --tags origin master\n%s\n\nrm -rf %q", dockerCmds, sourceDir) + msg("now run:\n\ngit push --tags origin %s\n%s\n\nrm -rf %q", branch, dockerCmds, sourceDir) } From d46525a51bbe519214637b396a7c64fbdcd2c2c0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 16:36:23 +0100 Subject: [PATCH 805/893] fix double printf usage --- cmd/restic/cmd_rewrite.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 7788016b78e..aa6dc490361 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -2,7 +2,6 @@ package main import ( "context" - "fmt" "time" "github.com/spf13/cobra" @@ -140,7 +139,7 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti if selectByName(path) { return node } - Verbosef(fmt.Sprintf("excluding %s\n", path)) + Verbosef("excluding %s\n", path) return nil } From 7bfe3d99ae2d2c2bb353016e357c4cc9f596a05b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 1 Nov 2024 18:56:11 +0100 Subject: [PATCH 806/893] fs: fallback to low privilege security descriptors on access denied --- changelog/unreleased/issue-5003 | 14 ++++++++++++++ internal/fs/sd_windows.go | 22 ++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 changelog/unreleased/issue-5003 diff --git a/changelog/unreleased/issue-5003 b/changelog/unreleased/issue-5003 new file mode 100644 index 00000000000..d02b06bc71e --- /dev/null +++ b/changelog/unreleased/issue-5003 @@ -0,0 +1,14 @@ +Bugfix: fix metadata errors during backup of removable disks on Windows + +Since restic 0.17.0, backups of removable disks on Windows could report +errors with retrieving metadata like shown below. + +``` +error: incomplete metadata for d:\filename: get named security info failed with: Access is denied. +``` + +This has now been fixed. + +https://github.com/restic/restic/issues/5003 +https://github.com/restic/restic/pull/5123 +https://forum.restic.net/t/backing-up-a-folder-from-a-veracrypt-volume-brings-up-errors-since-restic-v17-0/8444 diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index 0004f1809f8..a39c06f2c7b 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -54,6 +54,15 @@ func GetSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err sd, err = getNamedSecurityInfoLow(filePath) } else { sd, err = getNamedSecurityInfoHigh(filePath) + // Fallback to the low privilege version when receiving an access denied error. + // For some reason the ERROR_PRIVILEGE_NOT_HELD error is not returned for removable media + // but instead an access denied error is returned. Workaround that by just retrying with + // the low privilege version, but don't switch privileges as we cannot distinguish this + // case from actual access denied errors. + // see https://github.com/restic/restic/issues/5003#issuecomment-2452314191 for details + if err != nil && isAccessDeniedError(err) { + sd, err = getNamedSecurityInfoLow(filePath) + } } if err != nil { if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { @@ -114,6 +123,10 @@ func SetSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { err = setNamedSecurityInfoLow(filePath, dacl) } else { err = setNamedSecurityInfoHigh(filePath, owner, group, dacl, sacl) + // See corresponding fallback in getSecurityDescriptor for an explanation + if err != nil && isAccessDeniedError(err) { + err = setNamedSecurityInfoLow(filePath, dacl) + } } if err != nil { @@ -174,6 +187,15 @@ func isHandlePrivilegeNotHeldError(err error) bool { return false } +// isAccessDeniedError checks if the error is ERROR_ACCESS_DENIED +func isAccessDeniedError(err error) bool { + if errno, ok := err.(syscall.Errno); ok { + // Compare the error code to the expected value + return errno == windows.ERROR_ACCESS_DENIED + } + return false +} + // SecurityDescriptorBytesToStruct converts the security descriptor bytes representation // into a pointer to windows SECURITY_DESCRIPTOR. func SecurityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, error) { From 06ba4af436b34578cbad75c16456a859f67a0ebe Mon Sep 17 00:00:00 2001 From: "Leo R. Lundgren" Date: Sun, 3 Nov 2024 22:53:09 +0100 Subject: [PATCH 807/893] doc: Polish changelogs before release --- changelog/unreleased/issue-4971 | 6 ++---- changelog/unreleased/issue-5003 | 4 ++-- changelog/unreleased/pull-5096 | 9 +++++---- changelog/unreleased/pull-5101 | 11 +++++------ 4 files changed, 14 insertions(+), 16 deletions(-) diff --git a/changelog/unreleased/issue-4971 b/changelog/unreleased/issue-4971 index 9ab529408a1..235d18cb53b 100644 --- a/changelog/unreleased/issue-4971 +++ b/changelog/unreleased/issue-4971 @@ -1,9 +1,7 @@ Bugfix: Fix unusable `mount` on macOS Sonoma -On macOS Sonoma when using fuse-t, it was not possible to access files in -a mounted repository. - -This issue has been resolved. +On macOS Sonoma when using FUSE-T, it was not possible to access files in +a mounted repository. This issue is now resolved. https://github.com/restic/restic/issues/4971 https://github.com/restic/restic/pull/5048 diff --git a/changelog/unreleased/issue-5003 b/changelog/unreleased/issue-5003 index d02b06bc71e..f88ed3113ed 100644 --- a/changelog/unreleased/issue-5003 +++ b/changelog/unreleased/issue-5003 @@ -1,6 +1,6 @@ -Bugfix: fix metadata errors during backup of removable disks on Windows +Bugfix: Fix metadata errors during backup of removable disks on Windows -Since restic 0.17.0, backups of removable disks on Windows could report +Since restic 0.17.0, backing up removable disks on Windows could report errors with retrieving metadata like shown below. ``` diff --git a/changelog/unreleased/pull-5096 b/changelog/unreleased/pull-5096 index d1e1d09b26b..b1cc6edd33a 100644 --- a/changelog/unreleased/pull-5096 +++ b/changelog/unreleased/pull-5096 @@ -1,7 +1,8 @@ -Enhancement: Allow prune dry-run without lock +Enhancement: Allow `prune --dry-run` without lock -The `prune --dry-run --no-lock` now allows performing a dry-run without -taking a lock. If the repository is modified concurrently, `prune` may -return inaccurate statistics or errors. +The `prune --dry-run --no-lock` now allows performing a dry-run +without locking the repository. Note that if the repository is +modified concurrently, `prune` may return inaccurate statistics +or errors. https://github.com/restic/restic/pull/5096 diff --git a/changelog/unreleased/pull-5101 b/changelog/unreleased/pull-5101 index f784d0c4721..4152eb185f7 100644 --- a/changelog/unreleased/pull-5101 +++ b/changelog/unreleased/pull-5101 @@ -1,9 +1,8 @@ -Bugfix: Do not retry load/list operation is SFTP connection is broken +Bugfix: Do not retry load/list operation if SFTP connection is broken -When using restic with the SFTP backend, backend operations that load -a file or list files were retried even if the SFTP connection is broken. - -This has been fixed now. +When using restic with the SFTP backend, backend operations that load a +file or list files were retried even if the SFTP connection was broken. +This has now been fixed. https://github.com/restic/restic/pull/5101 -https://forum.restic.net/t/restic-hanging-on-backup/8559/2 +https://forum.restic.net/t/restic-hanging-on-backup/8559 From 83480246644098f2f41fb32ba07b885e0af9248d Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 8 Nov 2024 20:36:25 +0100 Subject: [PATCH 808/893] Prepare changelog for 0.17.3 --- changelog/{unreleased => 0.17.3_2024-11-08}/issue-4971 | 0 changelog/{unreleased => 0.17.3_2024-11-08}/issue-5003 | 0 changelog/{unreleased => 0.17.3_2024-11-08}/issue-5107 | 0 changelog/{unreleased => 0.17.3_2024-11-08}/pull-5096 | 0 changelog/{unreleased => 0.17.3_2024-11-08}/pull-5101 | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename changelog/{unreleased => 0.17.3_2024-11-08}/issue-4971 (100%) rename changelog/{unreleased => 0.17.3_2024-11-08}/issue-5003 (100%) rename changelog/{unreleased => 0.17.3_2024-11-08}/issue-5107 (100%) rename changelog/{unreleased => 0.17.3_2024-11-08}/pull-5096 (100%) rename changelog/{unreleased => 0.17.3_2024-11-08}/pull-5101 (100%) diff --git a/changelog/unreleased/issue-4971 b/changelog/0.17.3_2024-11-08/issue-4971 similarity index 100% rename from changelog/unreleased/issue-4971 rename to changelog/0.17.3_2024-11-08/issue-4971 diff --git a/changelog/unreleased/issue-5003 b/changelog/0.17.3_2024-11-08/issue-5003 similarity index 100% rename from changelog/unreleased/issue-5003 rename to changelog/0.17.3_2024-11-08/issue-5003 diff --git a/changelog/unreleased/issue-5107 b/changelog/0.17.3_2024-11-08/issue-5107 similarity index 100% rename from changelog/unreleased/issue-5107 rename to changelog/0.17.3_2024-11-08/issue-5107 diff --git a/changelog/unreleased/pull-5096 b/changelog/0.17.3_2024-11-08/pull-5096 similarity index 100% rename from changelog/unreleased/pull-5096 rename to changelog/0.17.3_2024-11-08/pull-5096 diff --git a/changelog/unreleased/pull-5101 b/changelog/0.17.3_2024-11-08/pull-5101 similarity index 100% rename from changelog/unreleased/pull-5101 rename to changelog/0.17.3_2024-11-08/pull-5101 From 633883bdb6554f2ddc826c15283f09d72736730b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 8 Nov 2024 20:36:25 +0100 Subject: [PATCH 809/893] Generate CHANGELOG.md for 0.17.3 --- CHANGELOG.md | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5e638c51a4..7ab47f11dc5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ # Table of Contents +* [Changelog for 0.17.3](#changelog-for-restic-0173-2024-11-08) * [Changelog for 0.17.2](#changelog-for-restic-0172-2024-10-27) * [Changelog for 0.17.1](#changelog-for-restic-0171-2024-09-05) * [Changelog for 0.17.0](#changelog-for-restic-0170-2024-07-26) @@ -37,6 +38,77 @@ * [Changelog for 0.6.0](#changelog-for-restic-060-2017-05-29) +# Changelog for restic 0.17.3 (2024-11-08) +The following sections list the changes in restic 0.17.3 relevant to +restic users. The changes are ordered by importance. + +## Summary + + * Fix #4971: Fix unusable `mount` on macOS Sonoma + * Fix #5003: Fix metadata errors during backup of removable disks on Windows + * Fix #5101: Do not retry load/list operation if SFTP connection is broken + * Fix #5107: Fix metadata error on Windows for backups using VSS + * Enh #5096: Allow `prune --dry-run` without lock + +## Details + + * Bugfix #4971: Fix unusable `mount` on macOS Sonoma + + On macOS Sonoma when using FUSE-T, it was not possible to access files in a + mounted repository. This issue is now resolved. + + https://github.com/restic/restic/issues/4971 + https://github.com/restic/restic/pull/5048 + + * Bugfix #5003: Fix metadata errors during backup of removable disks on Windows + + Since restic 0.17.0, backing up removable disks on Windows could report errors + with retrieving metadata like shown below. + + ``` + error: incomplete metadata for d:\filename: get named security info failed with: Access is denied. + ``` + + This has now been fixed. + + https://github.com/restic/restic/issues/5003 + https://github.com/restic/restic/pull/5123 + https://forum.restic.net/t/backing-up-a-folder-from-a-veracrypt-volume-brings-up-errors-since-restic-v17-0/8444 + + * Bugfix #5101: Do not retry load/list operation if SFTP connection is broken + + When using restic with the SFTP backend, backend operations that load a file or + list files were retried even if the SFTP connection was broken. This has now + been fixed. + + https://github.com/restic/restic/pull/5101 + https://forum.restic.net/t/restic-hanging-on-backup/8559 + + * Bugfix #5107: Fix metadata error on Windows for backups using VSS + + Since restic 0.17.2, when creating a backup on Windows using + `--use-fs-snapshot`, restic would report an error like the following: + + ``` + error: incomplete metadata for C:\: get EA failed while opening file handle for path \\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX\, with: The process cannot access the file because it is being used by another process. + ``` + + This has now been fixed by correctly handling paths that refer to volume shadow + copy snapshots. + + https://github.com/restic/restic/issues/5107 + https://github.com/restic/restic/pull/5110 + https://github.com/restic/restic/pull/5112 + + * Enhancement #5096: Allow `prune --dry-run` without lock + + The `prune --dry-run --no-lock` now allows performing a dry-run without locking + the repository. Note that if the repository is modified concurrently, `prune` + may return inaccurate statistics or errors. + + https://github.com/restic/restic/pull/5096 + + # Changelog for restic 0.17.2 (2024-10-27) The following sections list the changes in restic 0.17.2 relevant to restic users. The changes are ordered by importance. From bc64921a8ea73dfaeaf4d9b66676a76998e144fc Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 8 Nov 2024 20:36:36 +0100 Subject: [PATCH 810/893] Add version for 0.17.3 --- VERSION | 2 +- cmd/restic/global.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index a799160352f..884e9604b53 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.17.2-dev +0.17.3 diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 2b67708a86b..56b1eba3752 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -47,7 +47,7 @@ import ( // to a missing backend storage location or config file var ErrNoRepository = errors.New("repository does not exist") -var version = "0.17.2-dev (compiled manually)" +var version = "0.17.3" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" From e2a98aa9557867309973db84651ba76ce5d812eb Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 8 Nov 2024 20:36:48 +0100 Subject: [PATCH 811/893] Set development version for 0.17.3 --- VERSION | 2 +- cmd/restic/global.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index 884e9604b53..e2d1ad6ac90 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.17.3 +0.17.3-dev diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 56b1eba3752..133cf374442 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -47,7 +47,7 @@ import ( // to a missing backend storage location or config file var ErrNoRepository = errors.New("repository does not exist") -var version = "0.17.3" +var version = "0.17.3-dev (compiled manually)" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" From a73ae7ba1a28743aa8a492d932745b66da5c04d6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 11 Nov 2024 21:37:28 +0100 Subject: [PATCH 812/893] restore: improve error if timestamp fails to restore --- internal/fs/node.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/fs/node.go b/internal/fs/node.go index d3619432212..50781b4c6e8 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -1,6 +1,7 @@ package fs import ( + "fmt" "os" "os/user" "strconv" @@ -296,7 +297,7 @@ func nodeRestoreTimestamps(node *restic.Node, path string) error { mtime := node.ModTime.UnixNano() if err := utimesNano(fixpath(path), atime, mtime, node.Type); err != nil { - return &os.PathError{Op: "UtimesNano", Path: path, Err: err} + return fmt.Errorf("failed to restore timestamp of %q: %w", path, err) } return nil } From 82c5043fc953746ea57777755c0c606b6cafdbdd Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 11 Nov 2024 21:49:26 +0100 Subject: [PATCH 813/893] Reduce checkboxes in PR checklist (#5120) The basics around how to format commits and PR settings are primarily relevant when opening a PR for the first time. But for repeated contributors it is tedious to always tick those checkboxes. Co-authored-by: rawtaz --- .github/PULL_REQUEST_TEMPLATE.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d3c6b0d26cd..9faeb3a9d6d 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -28,13 +28,15 @@ Checklist You do not need to check all the boxes below all at once. Feel free to take your time and add more commits. If you're done and ready for review, please check the last box. Enable a checkbox by replacing [ ] with [x]. + +Please always follow these steps: +- Read the [contribution guidelines](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#providing-patches). +- Enable [maintainer edits](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork). +- Run `gofmt` on the code in all commits. +- Format all commit messages in the same style as [the other commits in the repository](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#git-commits). --> -- [ ] I have read the [contribution guidelines](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#providing-patches). -- [ ] I have [enabled maintainer edits](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork). - [ ] I have added tests for all code changes. - [ ] I have added documentation for relevant changes (in the manual). - [ ] There's a new file in `changelog/unreleased/` that describes the changes for our users (see [template](https://github.com/restic/restic/blob/master/changelog/TEMPLATE)). -- [ ] I have run `gofmt` on the code in all commits. -- [ ] All commit messages are formatted in the same style as [the other commits in the repo](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#git-commits). - [ ] I'm done! This pull request is ready for review. From dfbd4fb983f0f789da8130906ea0bfd2ae51e717 Mon Sep 17 00:00:00 2001 From: Richard Grover <38992201+richgrov@users.noreply.github.com> Date: Wed, 13 Nov 2024 08:02:22 -0700 Subject: [PATCH 814/893] Error if AZURE_ACCOUNT_NAME not set --- internal/backend/azure/azure.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index c1c049a948f..27390ee13fe 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -62,6 +62,11 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { } else { endpointSuffix = "core.windows.net" } + + if cfg.AccountName == "" { + return nil, errors.Fatalf("unable to open Azure backend: Account name ($AZURE_ACCOUNT_NAME) is empty") + } + url := fmt.Sprintf("https://%s.blob.%s/%s", cfg.AccountName, endpointSuffix, cfg.Container) opts := &azContainer.ClientOptions{ ClientOptions: azcore.ClientOptions{ From 49ef3ebec31c8676421179527c32aede7f70f990 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 15 Nov 2024 17:52:09 +0100 Subject: [PATCH 815/893] restic: add missing space in error message --- internal/restic/tree.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/restic/tree.go b/internal/restic/tree.go index c4125653b12..f406b489f8c 100644 --- a/internal/restic/tree.go +++ b/internal/restic/tree.go @@ -162,7 +162,7 @@ func NewTreeJSONBuilder() *TreeJSONBuilder { func (builder *TreeJSONBuilder) AddNode(node *Node) error { if node.Name <= builder.lastName { - return fmt.Errorf("node %q, last%q: %w", node.Name, builder.lastName, ErrTreeNotOrdered) + return fmt.Errorf("node %q, last %q: %w", node.Name, builder.lastName, ErrTreeNotOrdered) } if builder.lastName != "" { _ = builder.buf.WriteByte(',') From f8031561f276e7ffc1ebf8ab6f282d18bf3f324c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 2 Nov 2024 17:41:40 +0100 Subject: [PATCH 816/893] archiver: deduplicate error filtering --- internal/archiver/archiver.go | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index f7a9f275009..efa0d2945dc 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -435,6 +435,13 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous return futureNode{}, false, err } + filterError := func(err error) (futureNode, bool, error) { + err = arch.error(abstarget, err) + if err != nil { + return futureNode{}, false, errors.WithStack(err) + } + return futureNode{}, true, nil + } // exclude files by path before running Lstat to reduce number of lstat calls if !arch.SelectByName(abstarget) { debug.Log("%v is excluded by path", target) @@ -445,11 +452,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous fi, err := arch.FS.Lstat(target) if err != nil { debug.Log("lstat() for %v returned error: %v", target, err) - err = arch.error(abstarget, err) - if err != nil { - return futureNode{}, false, errors.WithStack(err) - } - return futureNode{}, true, nil + return filterError(err) } if !arch.Select(abstarget, fi, arch.FS) { debug.Log("%v is excluded", target) @@ -497,33 +500,21 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous file, err := arch.FS.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) if err != nil { debug.Log("Openfile() for %v returned error: %v", target, err) - err = arch.error(abstarget, err) - if err != nil { - return futureNode{}, false, errors.WithStack(err) - } - return futureNode{}, true, nil + return filterError(err) } fi, err = file.Stat() if err != nil { debug.Log("stat() on opened file %v returned error: %v", target, err) _ = file.Close() - err = arch.error(abstarget, err) - if err != nil { - return futureNode{}, false, errors.WithStack(err) - } - return futureNode{}, true, nil + return filterError(err) } // make sure it's still a file if !fi.Mode().IsRegular() { - err = errors.Errorf("file %v changed type, refusing to archive", fi.Name()) + err = errors.Errorf("file %v changed type, refusing to archive", target) _ = file.Close() - err = arch.error(abstarget, err) - if err != nil { - return futureNode{}, false, err - } - return futureNode{}, true, nil + return filterError(err) } // Save will close the file, we don't need to do that From 548fa07577e5733a6acf6e472e88c2c3cee9f1b8 Mon Sep 17 00:00:00 2001 From: Richard Grover <38992201+richgrov@users.noreply.github.com> Date: Fri, 15 Nov 2024 14:46:34 -0700 Subject: [PATCH 817/893] Add changelog info --- changelog/unreleased/pull-5141 | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 changelog/unreleased/pull-5141 diff --git a/changelog/unreleased/pull-5141 b/changelog/unreleased/pull-5141 new file mode 100644 index 00000000000..7f71f2269c5 --- /dev/null +++ b/changelog/unreleased/pull-5141 @@ -0,0 +1,7 @@ +Enhancement: Provide clear error message if AZURE_ACCOUNT_NAME is not set + +If AZURE_ACCOUNT_NAME is not set, any command related to an Azure repository +would result in a misleading networking error. Restic will now detect this and +provide a clear warning that the variable is not defined. + +https://github.com/restic/restic/pull/5141 From b402e8a6fc7685c6bf8061ff0917ee5735bc19c2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 2 Nov 2024 17:44:55 +0100 Subject: [PATCH 818/893] fs: stricter enforcement to only call readdir on a directory Use O_DIRECTORY to prevent opening any other than a directory in readdirnames. --- internal/fs/const_unix.go | 3 +++ internal/fs/const_windows.go | 5 +++++ internal/fs/file.go | 5 +++-- internal/fs/file_unix_test.go | 22 ++++++++++++++++++++++ 4 files changed, 33 insertions(+), 2 deletions(-) create mode 100644 internal/fs/file_unix_test.go diff --git a/internal/fs/const_unix.go b/internal/fs/const_unix.go index fe84cda176d..e570c255370 100644 --- a/internal/fs/const_unix.go +++ b/internal/fs/const_unix.go @@ -7,3 +7,6 @@ import "syscall" // O_NOFOLLOW instructs the kernel to not follow symlinks when opening a file. const O_NOFOLLOW int = syscall.O_NOFOLLOW + +// O_DIRECTORY instructs the kernel to only open directories. +const O_DIRECTORY int = syscall.O_DIRECTORY diff --git a/internal/fs/const_windows.go b/internal/fs/const_windows.go index f1b263a54a4..4c29e0b9d59 100644 --- a/internal/fs/const_windows.go +++ b/internal/fs/const_windows.go @@ -3,5 +3,10 @@ package fs +// TODO honor flags when opening files + // O_NOFOLLOW is a noop on Windows. const O_NOFOLLOW int = 0 + +// O_DIRECTORY is a noop on Windows. +const O_DIRECTORY int = 0 diff --git a/internal/fs/file.go b/internal/fs/file.go index 8d60ed15978..c60625a07ab 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -64,9 +64,10 @@ func ResetPermissions(path string) error { return nil } -// Readdirnames returns a list of file in a directory. Flags are passed to fs.OpenFile. O_RDONLY is implied. +// Readdirnames returns a list of file in a directory. Flags are passed to fs.OpenFile. +// O_RDONLY and O_DIRECTORY are implied. func Readdirnames(filesystem FS, dir string, flags int) ([]string, error) { - f, err := filesystem.OpenFile(dir, O_RDONLY|flags, 0) + f, err := filesystem.OpenFile(dir, O_RDONLY|O_DIRECTORY|flags, 0) if err != nil { return nil, fmt.Errorf("openfile for readdirnames failed: %w", err) } diff --git a/internal/fs/file_unix_test.go b/internal/fs/file_unix_test.go new file mode 100644 index 00000000000..00d68abb8e5 --- /dev/null +++ b/internal/fs/file_unix_test.go @@ -0,0 +1,22 @@ +//go:build unix + +package fs + +import ( + "path/filepath" + "syscall" + "testing" + + "github.com/restic/restic/internal/errors" + rtest "github.com/restic/restic/internal/test" +) + +func TestReaddirnamesFifo(t *testing.T) { + // should not block when reading from a fifo instead of a directory + tempdir := t.TempDir() + fifoFn := filepath.Join(tempdir, "fifo") + rtest.OK(t, mkfifo(fifoFn, 0o600)) + + _, err := Readdirnames(&Local{}, fifoFn, 0) + rtest.Assert(t, errors.Is(err, syscall.ENOTDIR), "unexpected error %v", err) +} From 623ba92b986c856981f30d0f7351668e9532c8f0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 2 Nov 2024 17:47:54 +0100 Subject: [PATCH 819/893] fs: drop unused permission parameter from OpenFile --- internal/archiver/archiver.go | 2 +- internal/archiver/archiver_test.go | 16 ++++++++-------- internal/archiver/exclude.go | 2 +- internal/archiver/file_saver_test.go | 2 +- internal/fs/file.go | 2 +- internal/fs/fs_local.go | 4 ++-- internal/fs/fs_local_vss.go | 4 ++-- internal/fs/fs_local_vss_test.go | 2 +- internal/fs/fs_reader.go | 2 +- internal/fs/fs_reader_test.go | 8 ++++---- internal/fs/fs_track.go | 4 ++-- internal/fs/interface.go | 2 +- 12 files changed, 25 insertions(+), 25 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index efa0d2945dc..fb03c88fced 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -497,7 +497,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous // reopen file and do an fstat() on the open file to check it is still // a file (and has not been exchanged for e.g. a symlink) - file, err := arch.FS.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + file, err := arch.FS.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW) if err != nil { debug.Log("Openfile() for %v returned error: %v", target, err) return filterError(err) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 97d27dc649f..ff072fa3605 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -76,7 +76,7 @@ func saveFile(t testing.TB, repo archiverRepo, filename string, filesystem fs.FS startCallback = true } - file, err := arch.FS.OpenFile(filename, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + file, err := arch.FS.OpenFile(filename, fs.O_RDONLY|fs.O_NOFOLLOW) if err != nil { t.Fatal(err) } @@ -1665,8 +1665,8 @@ type MockFS struct { bytesRead map[string]int // tracks bytes read from all opened files } -func (m *MockFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) { - f, err := m.FS.OpenFile(name, flag, perm) +func (m *MockFS) OpenFile(name string, flag int) (fs.File, error) { + f, err := m.FS.OpenFile(name, flag) if err != nil { return f, err } @@ -2056,12 +2056,12 @@ type TrackFS struct { m sync.Mutex } -func (m *TrackFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) { +func (m *TrackFS) OpenFile(name string, flag int) (fs.File, error) { m.m.Lock() m.opened[name]++ m.m.Unlock() - return m.FS.OpenFile(name, flag, perm) + return m.FS.OpenFile(name, flag) } type failSaveRepo struct { @@ -2228,9 +2228,9 @@ func (fs *StatFS) Lstat(name string) (os.FileInfo, error) { return fs.FS.Lstat(name) } -func (fs *StatFS) OpenFile(name string, flags int, perm os.FileMode) (fs.File, error) { +func (fs *StatFS) OpenFile(name string, flags int) (fs.File, error) { if fi, ok := fs.OverrideLstat[fixpath(name)]; ok { - f, err := fs.FS.OpenFile(name, flags, perm) + f, err := fs.FS.OpenFile(name, flags) if err != nil { return nil, err } @@ -2242,7 +2242,7 @@ func (fs *StatFS) OpenFile(name string, flags int, perm os.FileMode) (fs.File, e return wrappedFile, nil } - return fs.FS.OpenFile(name, flags, perm) + return fs.FS.OpenFile(name, flags) } type fileStat struct { diff --git a/internal/archiver/exclude.go b/internal/archiver/exclude.go index 1e855fc3a37..54ced788ae0 100644 --- a/internal/archiver/exclude.go +++ b/internal/archiver/exclude.go @@ -153,7 +153,7 @@ func isDirExcludedByFile(dir, tagFilename, header string, fs fs.FS, warnf func(m // From this stage, errors mean tagFilename exists but it is malformed. // Warnings will be generated so that the user is informed that the // indented ignore-action is not performed. - f, err := fs.OpenFile(tf, os.O_RDONLY, 0) + f, err := fs.OpenFile(tf, os.O_RDONLY) if err != nil { warnf("could not open exclusion tagfile: %v", err) return false diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go index 5b17eca3797..069cdc17152 100644 --- a/internal/archiver/file_saver_test.go +++ b/internal/archiver/file_saver_test.go @@ -72,7 +72,7 @@ func TestFileSaver(t *testing.T) { var results []futureNode for _, filename := range files { - f, err := testFs.OpenFile(filename, os.O_RDONLY, 0) + f, err := testFs.OpenFile(filename, os.O_RDONLY) if err != nil { t.Fatal(err) } diff --git a/internal/fs/file.go b/internal/fs/file.go index c60625a07ab..fa395b62845 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -67,7 +67,7 @@ func ResetPermissions(path string) error { // Readdirnames returns a list of file in a directory. Flags are passed to fs.OpenFile. // O_RDONLY and O_DIRECTORY are implied. func Readdirnames(filesystem FS, dir string, flags int) ([]string, error) { - f, err := filesystem.OpenFile(dir, O_RDONLY|O_DIRECTORY|flags, 0) + f, err := filesystem.OpenFile(dir, O_RDONLY|O_DIRECTORY|flags) if err != nil { return nil, fmt.Errorf("openfile for readdirnames failed: %w", err) } diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go index 5fac88dbb8a..045edf02f60 100644 --- a/internal/fs/fs_local.go +++ b/internal/fs/fs_local.go @@ -25,8 +25,8 @@ func (fs Local) VolumeName(path string) string { // (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, // methods on the returned File can be used for I/O. // If there is an error, it will be of type *PathError. -func (fs Local) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, err := os.OpenFile(fixpath(name), flag, perm) +func (fs Local) OpenFile(name string, flag int) (File, error) { + f, err := os.OpenFile(fixpath(name), flag, 0) if err != nil { return nil, err } diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index dcbda2a847f..aa9f2b89de0 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -127,8 +127,8 @@ func (fs *LocalVss) DeleteSnapshots() { } // OpenFile wraps the Open method of the underlying file system. -func (fs *LocalVss) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return fs.FS.OpenFile(fs.snapshotPath(name), flag, perm) +func (fs *LocalVss) OpenFile(name string, flag int) (File, error) { + return fs.FS.OpenFile(fs.snapshotPath(name), flag) } // Stat wraps the Stat method of the underlying file system. diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index f1a0431185d..7856767ba78 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -331,7 +331,7 @@ func TestVSSFS(t *testing.T) { rtest.OK(t, err) rtest.Equals(t, origFi.Mode(), lstatFi.Mode()) - f, err := localVss.OpenFile(tempfile, os.O_RDONLY, 0) + f, err := localVss.OpenFile(tempfile, os.O_RDONLY) rtest.OK(t, err) data, err := io.ReadAll(f) rtest.OK(t, err) diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index 97d4e1660f4..ed8b9a3470c 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -54,7 +54,7 @@ func (fs *Reader) fi() os.FileInfo { // (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, // methods on the returned File can be used for I/O. // If there is an error, it will be of type *os.PathError. -func (fs *Reader) OpenFile(name string, flag int, _ os.FileMode) (f File, err error) { +func (fs *Reader) OpenFile(name string, flag int) (f File, err error) { if flag & ^(O_RDONLY|O_NOFOLLOW) != 0 { return nil, pathError("open", name, fmt.Errorf("invalid combination of flags 0x%x", flag)) diff --git a/internal/fs/fs_reader_test.go b/internal/fs/fs_reader_test.go index 442912fe3af..e7020bc9dc5 100644 --- a/internal/fs/fs_reader_test.go +++ b/internal/fs/fs_reader_test.go @@ -16,7 +16,7 @@ import ( ) func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte) { - f, err := fs.OpenFile(filename, O_RDONLY, 0) + f, err := fs.OpenFile(filename, O_RDONLY) if err != nil { t.Fatal(err) } @@ -37,7 +37,7 @@ func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte } func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) { - f, err := fs.OpenFile(dir, os.O_RDONLY, 0) + f, err := fs.OpenFile(dir, os.O_RDONLY) if err != nil { t.Fatal(err) } @@ -123,7 +123,7 @@ func TestFSReader(t *testing.T) { { name: "file/Stat", f: func(t *testing.T, fs FS) { - f, err := fs.OpenFile(filename, os.O_RDONLY, 0) + f, err := fs.OpenFile(filename, os.O_RDONLY) if err != nil { t.Fatal(err) } @@ -295,7 +295,7 @@ func TestFSReaderMinFileSize(t *testing.T) { AllowEmptyFile: test.allowEmpty, } - f, err := fs.OpenFile("testfile", os.O_RDONLY, 0) + f, err := fs.OpenFile("testfile", os.O_RDONLY) if err != nil { t.Fatal(err) } diff --git a/internal/fs/fs_track.go b/internal/fs/fs_track.go index 366bbee762a..9912ac45bb9 100644 --- a/internal/fs/fs_track.go +++ b/internal/fs/fs_track.go @@ -16,8 +16,8 @@ type Track struct { } // OpenFile wraps the OpenFile method of the underlying file system. -func (fs Track) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, err := fs.FS.OpenFile(fixpath(name), flag, perm) +func (fs Track) OpenFile(name string, flag int) (File, error) { + f, err := fs.FS.OpenFile(fixpath(name), flag) if err != nil { return nil, err } diff --git a/internal/fs/interface.go b/internal/fs/interface.go index 2967429c0d5..58744bd1dc1 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -9,7 +9,7 @@ import ( // FS bundles all methods needed for a file system. type FS interface { - OpenFile(name string, flag int, perm os.FileMode) (File, error) + OpenFile(name string, flag int) (File, error) Stat(name string) (os.FileInfo, error) Lstat(name string) (os.FileInfo, error) DeviceID(fi os.FileInfo) (deviceID uint64, err error) From 2f2ce9add25c86bea2d2b8f235ae257f7d6a6ba7 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 2 Nov 2024 18:09:39 +0100 Subject: [PATCH 820/893] fs: remove Stat from FS interface --- internal/archiver/archiver.go | 57 ++++++++++++++++++-------------- internal/fs/fs_local.go | 6 ---- internal/fs/fs_local_vss.go | 5 --- internal/fs/fs_local_vss_test.go | 13 +++----- internal/fs/fs_reader.go | 6 ---- internal/fs/interface.go | 1 - 6 files changed, 37 insertions(+), 51 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index fb03c88fced..e88c15bd970 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -605,22 +605,6 @@ func join(elem ...string) string { return path.Join(elem...) } -// statDir returns the file info for the directory. Symbolic links are -// resolved. If the target directory is not a directory, an error is returned. -func (arch *Archiver) statDir(dir string) (os.FileInfo, error) { - fi, err := arch.FS.Stat(dir) - if err != nil { - return nil, errors.WithStack(err) - } - - tpe := fi.Mode() & (os.ModeType | os.ModeCharDevice) - if tpe != os.ModeDir { - return fi, errors.Errorf("path is not a directory: %v", dir) - } - - return fi, nil -} - // saveTree stores a Tree in the repo, returned is the tree. snPath is the path // within the current snapshot. func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree, previous *restic.Tree, complete fileCompleteFunc) (futureNode, int, error) { @@ -631,15 +615,8 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree, return futureNode{}, 0, errors.Errorf("FileInfoPath for %v is empty", snPath) } - fi, err := arch.statDir(atree.FileInfoPath) - if err != nil { - return futureNode{}, 0, err - } - - debug.Log("%v, dir node data loaded from %v", snPath, atree.FileInfoPath) - // in some cases reading xattrs for directories above the backup source is not allowed - // thus ignore errors for such folders. - node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi, true) + var err error + node, err = arch.dirPathToNode(snPath, atree.FileInfoPath) if err != nil { return futureNode{}, 0, err } @@ -710,6 +687,36 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree, return fn, len(nodes), nil } +func (arch *Archiver) dirPathToNode(snPath, target string) (node *restic.Node, err error) { + meta, err := arch.FS.OpenFile(target, fs.O_RDONLY) + if err != nil { + return nil, err + } + defer func() { + cerr := meta.Close() + if err == nil { + err = cerr + } + }() + + debug.Log("%v, reading dir node data from %v", snPath, target) + fi, err := meta.Stat() + if err != nil { + return nil, errors.WithStack(err) + } + + // in some cases reading xattrs for directories above the backup source is not allowed + // thus ignore errors for such folders. + node, err = arch.nodeFromFileInfo(snPath, target, fi, true) + if err != nil { + return nil, err + } + if node.Type != restic.NodeTypeDir { + return nil, errors.Errorf("path is not a directory: %v", target) + } + return node, err +} + // resolveRelativeTargets replaces targets that only contain relative // directories ("." or "../../") with the contents of the directory. Each // element of target is processed with fs.Clean(). diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go index 045edf02f60..4f8b670900c 100644 --- a/internal/fs/fs_local.go +++ b/internal/fs/fs_local.go @@ -34,12 +34,6 @@ func (fs Local) OpenFile(name string, flag int) (File, error) { return f, nil } -// Stat returns a FileInfo describing the named file. If there is an error, it -// will be of type *PathError. -func (fs Local) Stat(name string) (os.FileInfo, error) { - return os.Stat(fixpath(name)) -} - // Lstat returns the FileInfo structure describing the named file. // If the file is a symbolic link, the returned FileInfo // describes the symbolic link. Lstat makes no attempt to follow the link. diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index aa9f2b89de0..e9bc66657c9 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -131,11 +131,6 @@ func (fs *LocalVss) OpenFile(name string, flag int) (File, error) { return fs.FS.OpenFile(fs.snapshotPath(name), flag) } -// Stat wraps the Stat method of the underlying file system. -func (fs *LocalVss) Stat(name string) (os.FileInfo, error) { - return fs.FS.Stat(fs.snapshotPath(name)) -} - // Lstat wraps the Lstat method of the underlying file system. func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { return fs.FS.Lstat(fs.snapshotPath(name)) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index 7856767ba78..db8d4b13385 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -317,16 +317,12 @@ func TestVSSFS(t *testing.T) { // trigger snapshot creation and // capture FI while file still exists (should already be within the snapshot) - origFi, err := localVss.Stat(tempfile) + origFi, err := localVss.Lstat(tempfile) rtest.OK(t, err) // remove original file rtest.OK(t, os.Remove(tempfile)) - statFi, err := localVss.Stat(tempfile) - rtest.OK(t, err) - rtest.Equals(t, origFi.Mode(), statFi.Mode()) - lstatFi, err := localVss.Lstat(tempfile) rtest.OK(t, err) rtest.Equals(t, origFi.Mode(), lstatFi.Mode()) @@ -336,9 +332,10 @@ func TestVSSFS(t *testing.T) { data, err := io.ReadAll(f) rtest.OK(t, err) rtest.Equals(t, "example", string(data), "unexpected file content") - rtest.OK(t, f.Close()) - node, err := localVss.NodeFromFileInfo(tempfile, statFi, false) + node, err := f.ToNode(false) rtest.OK(t, err) - rtest.Equals(t, node.Mode, statFi.Mode()) + rtest.Equals(t, node.Mode, lstatFi.Mode()) + + rtest.OK(t, f.Close()) } diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index ed8b9a3470c..a4efa8deacb 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -81,12 +81,6 @@ func (fs *Reader) OpenFile(name string, flag int) (f File, err error) { return nil, pathError("open", name, syscall.ENOENT) } -// Stat returns a FileInfo describing the named file. If there is an error, it -// will be of type *os.PathError. -func (fs *Reader) Stat(name string) (os.FileInfo, error) { - return fs.Lstat(name) -} - // Lstat returns the FileInfo structure describing the named file. // If the file is a symbolic link, the returned FileInfo // describes the symbolic link. Lstat makes no attempt to follow the link. diff --git a/internal/fs/interface.go b/internal/fs/interface.go index 58744bd1dc1..899888fb085 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -10,7 +10,6 @@ import ( // FS bundles all methods needed for a file system. type FS interface { OpenFile(name string, flag int) (File, error) - Stat(name string) (os.FileInfo, error) Lstat(name string) (os.FileInfo, error) DeviceID(fi os.FileInfo) (deviceID uint64, err error) ExtendedStat(fi os.FileInfo) ExtendedFileInfo From 48dbefc37e1c5b16ee6cd97fed272a8372b9cbdd Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 2 Nov 2024 20:27:38 +0100 Subject: [PATCH 821/893] fs / archiver: convert to handle based interface The actual implementation still relies on file paths, but with the abstraction layer in place, an FS implementation can ensure atomic file accesses in the future. --- internal/archiver/archiver.go | 86 ++++++---- internal/archiver/archiver_test.go | 207 ++++++++++++------------ internal/archiver/archiver_unix_test.go | 8 +- internal/archiver/exclude.go | 8 +- internal/archiver/file_saver.go | 13 +- internal/archiver/file_saver_test.go | 15 +- internal/fs/file.go | 2 +- internal/fs/fs_local.go | 110 +++++++++++-- internal/fs/fs_local_vss.go | 11 +- internal/fs/fs_local_vss_test.go | 2 +- internal/fs/fs_reader.go | 33 ++-- internal/fs/fs_reader_test.go | 8 +- internal/fs/fs_track.go | 6 +- internal/fs/interface.go | 25 ++- internal/fs/node_test.go | 60 ++----- internal/fs/node_unix_test.go | 16 +- internal/fs/node_windows_test.go | 8 +- internal/restic/tree_test.go | 20 +-- 18 files changed, 355 insertions(+), 283 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index e88c15bd970..ae3edaf44c6 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -66,6 +66,11 @@ func (s *ItemStats) Add(other ItemStats) { s.TreeSizeInRepo += other.TreeSizeInRepo } +// ToNoder returns a restic.Node for a File. +type ToNoder interface { + ToNode(ignoreXattrListError bool) (*restic.Node, error) +} + type archiverRepo interface { restic.Loader restic.BlobSaver @@ -257,8 +262,8 @@ func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s I } // nodeFromFileInfo returns the restic node from an os.FileInfo. -func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - node, err := arch.FS.NodeFromFileInfo(filename, fi, ignoreXattrListError) +func (arch *Archiver) nodeFromFileInfo(snPath, filename string, meta ToNoder, ignoreXattrListError bool) (*restic.Node, error) { + node, err := meta.ToNode(ignoreXattrListError) if !arch.WithAtime { node.AccessTime = node.ModTime } @@ -308,20 +313,14 @@ func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { // saveDir stores a directory in the repo and returns the node. snPath is the // path within the current snapshot. -func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete fileCompleteFunc) (d futureNode, err error) { +func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, meta fs.File, previous *restic.Tree, complete fileCompleteFunc) (d futureNode, err error) { debug.Log("%v %v", snPath, dir) - treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi, false) + treeNode, names, err := arch.dirToNodeAndEntries(snPath, dir, meta) if err != nil { return futureNode{}, err } - names, err := fs.Readdirnames(arch.FS, dir, fs.O_NOFOLLOW) - if err != nil { - return futureNode{}, err - } - sort.Strings(names) - nodes := make([]futureNode, 0, len(names)) for _, name := range names { @@ -359,6 +358,29 @@ func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi return fn, nil } +func (arch *Archiver) dirToNodeAndEntries(snPath, dir string, meta fs.File) (node *restic.Node, names []string, err error) { + err = meta.MakeReadable() + if err != nil { + return nil, nil, fmt.Errorf("openfile for readdirnames failed: %w", err) + } + + node, err = arch.nodeFromFileInfo(snPath, dir, meta, false) + if err != nil { + return nil, nil, err + } + if node.Type != restic.NodeTypeDir { + return nil, nil, fmt.Errorf("directory %v changed type, refusing to archive", snPath) + } + + names, err = meta.Readdirnames(-1) + if err != nil { + return nil, nil, fmt.Errorf("readdirnames %v failed: %w", dir, err) + } + sort.Strings(names) + + return node, names, nil +} + // futureNode holds a reference to a channel that returns a FutureNodeResult // or a reference to an already existing result. If the result is available // immediately, then storing a reference directly requires less memory than @@ -448,8 +470,23 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous return futureNode{}, true, nil } + meta, err := arch.FS.OpenFile(target, fs.O_NOFOLLOW, true) + if err != nil { + debug.Log("open metadata for %v returned error: %v", target, err) + return filterError(err) + } + closeFile := true + defer func() { + if closeFile { + cerr := meta.Close() + if err == nil { + err = cerr + } + } + }() + // get file info and run remaining select functions that require file information - fi, err := arch.FS.Lstat(target) + fi, err := meta.Stat() if err != nil { debug.Log("lstat() for %v returned error: %v", target, err) return filterError(err) @@ -470,7 +507,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous debug.Log("%v hasn't changed, using old list of blobs", target) arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start)) arch.CompleteBlob(previous.Size) - node, err := arch.nodeFromFileInfo(snPath, target, fi, false) + node, err := arch.nodeFromFileInfo(snPath, target, meta, false) if err != nil { return futureNode{}, false, err } @@ -497,28 +534,28 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous // reopen file and do an fstat() on the open file to check it is still // a file (and has not been exchanged for e.g. a symlink) - file, err := arch.FS.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW) + err := meta.MakeReadable() if err != nil { - debug.Log("Openfile() for %v returned error: %v", target, err) + debug.Log("MakeReadable() for %v returned error: %v", target, err) return filterError(err) } - fi, err = file.Stat() + fi, err := meta.Stat() if err != nil { debug.Log("stat() on opened file %v returned error: %v", target, err) - _ = file.Close() return filterError(err) } // make sure it's still a file if !fi.Mode().IsRegular() { err = errors.Errorf("file %v changed type, refusing to archive", target) - _ = file.Close() return filterError(err) } + closeFile = false + // Save will close the file, we don't need to do that - fn = arch.fileSaver.Save(ctx, snPath, target, file, fi, func() { + fn = arch.fileSaver.Save(ctx, snPath, target, meta, func() { arch.StartFile(snPath) }, func() { arch.trackItem(snPath, nil, nil, ItemStats{}, 0) @@ -538,7 +575,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous return futureNode{}, false, err } - fn, err = arch.saveDir(ctx, snPath, target, fi, oldSubtree, + fn, err = arch.saveDir(ctx, snPath, target, meta, oldSubtree, func(node *restic.Node, stats ItemStats) { arch.trackItem(snItem, previous, node, stats, time.Since(start)) }) @@ -554,7 +591,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous default: debug.Log(" %v other", target) - node, err := arch.nodeFromFileInfo(snPath, target, fi, false) + node, err := arch.nodeFromFileInfo(snPath, target, meta, false) if err != nil { return futureNode{}, false, err } @@ -688,7 +725,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree, } func (arch *Archiver) dirPathToNode(snPath, target string) (node *restic.Node, err error) { - meta, err := arch.FS.OpenFile(target, fs.O_RDONLY) + meta, err := arch.FS.OpenFile(target, 0, true) if err != nil { return nil, err } @@ -700,14 +737,9 @@ func (arch *Archiver) dirPathToNode(snPath, target string) (node *restic.Node, e }() debug.Log("%v, reading dir node data from %v", snPath, target) - fi, err := meta.Stat() - if err != nil { - return nil, errors.WithStack(err) - } - // in some cases reading xattrs for directories above the backup source is not allowed // thus ignore errors for such folders. - node, err = arch.nodeFromFileInfo(snPath, target, fi, true) + node, err = arch.nodeFromFileInfo(snPath, target, meta, true) if err != nil { return nil, err } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index ff072fa3605..0b2957bc6d4 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -76,17 +76,12 @@ func saveFile(t testing.TB, repo archiverRepo, filename string, filesystem fs.FS startCallback = true } - file, err := arch.FS.OpenFile(filename, fs.O_RDONLY|fs.O_NOFOLLOW) + file, err := arch.FS.OpenFile(filename, fs.O_NOFOLLOW, false) if err != nil { t.Fatal(err) } - fi, err := file.Stat() - if err != nil { - t.Fatal(err) - } - - res := arch.fileSaver.Save(ctx, "/", filename, file, fi, start, completeReading, complete) + res := arch.fileSaver.Save(ctx, "/", filename, file, start, completeReading, complete) fnr := res.take(ctx) if fnr.err != nil { @@ -556,11 +551,12 @@ func rename(t testing.TB, oldname, newname string) { } } -func nodeFromFI(t testing.TB, fs fs.FS, filename string, fi os.FileInfo) *restic.Node { - node, err := fs.NodeFromFileInfo(filename, fi, false) - if err != nil { - t.Fatal(err) - } +func nodeFromFile(t testing.TB, localFs fs.FS, filename string) *restic.Node { + meta, err := localFs.OpenFile(filename, fs.O_NOFOLLOW, true) + rtest.OK(t, err) + node, err := meta.ToNode(false) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) return node } @@ -688,7 +684,7 @@ func TestFileChanged(t *testing.T) { fs := &fs.Local{} fiBefore := lstat(t, filename) - node := nodeFromFI(t, fs, filename, fiBefore) + node := nodeFromFile(t, fs, filename) if fileChanged(fs, fiBefore, node, 0) { t.Fatalf("unchanged file detected as changed") @@ -729,8 +725,8 @@ func TestFilChangedSpecialCases(t *testing.T) { t.Run("type-change", func(t *testing.T) { fi := lstat(t, filename) - node := nodeFromFI(t, &fs.Local{}, filename, fi) - node.Type = "restic.NodeTypeSymlink" + node := nodeFromFile(t, &fs.Local{}, filename) + node.Type = restic.NodeTypeSymlink if !fileChanged(&fs.Local{}, fi, node, 0) { t.Fatal("node with changed type detected as unchanged") } @@ -834,7 +830,8 @@ func TestArchiverSaveDir(t *testing.T) { wg, ctx := errgroup.WithContext(context.Background()) repo.StartPackUploader(ctx, wg) - arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) + testFS := fs.Track{FS: fs.Local{}} + arch := New(repo, testFS, Options{}) arch.runWorkers(ctx, wg) arch.summary = &Summary{} @@ -846,15 +843,11 @@ func TestArchiverSaveDir(t *testing.T) { back := rtest.Chdir(t, chdir) defer back() - fi, err := os.Lstat(test.target) - if err != nil { - t.Fatal(err) - } - - ft, err := arch.saveDir(ctx, "/", test.target, fi, nil, nil) - if err != nil { - t.Fatal(err) - } + meta, err := testFS.OpenFile(test.target, fs.O_NOFOLLOW, true) + rtest.OK(t, err) + ft, err := arch.saveDir(ctx, "/", test.target, meta, nil, nil) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) fnr := ft.take(ctx) node, stats := fnr.node, fnr.stats @@ -916,19 +909,16 @@ func TestArchiverSaveDirIncremental(t *testing.T) { wg, ctx := errgroup.WithContext(context.TODO()) repo.StartPackUploader(ctx, wg) - arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) + testFS := fs.Track{FS: fs.Local{}} + arch := New(repo, testFS, Options{}) arch.runWorkers(ctx, wg) arch.summary = &Summary{} - fi, err := os.Lstat(tempdir) - if err != nil { - t.Fatal(err) - } - - ft, err := arch.saveDir(ctx, "/", tempdir, fi, nil, nil) - if err != nil { - t.Fatal(err) - } + meta, err := testFS.OpenFile(tempdir, fs.O_NOFOLLOW, true) + rtest.OK(t, err) + ft, err := arch.saveDir(ctx, "/", tempdir, meta, nil, nil) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) fnr := ft.take(ctx) node, stats := fnr.node, fnr.stats @@ -1665,8 +1655,8 @@ type MockFS struct { bytesRead map[string]int // tracks bytes read from all opened files } -func (m *MockFS) OpenFile(name string, flag int) (fs.File, error) { - f, err := m.FS.OpenFile(name, flag) +func (m *MockFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { + f, err := m.FS.OpenFile(name, flag, metadataOnly) if err != nil { return f, err } @@ -2056,12 +2046,12 @@ type TrackFS struct { m sync.Mutex } -func (m *TrackFS) OpenFile(name string, flag int) (fs.File, error) { +func (m *TrackFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { m.m.Lock() m.opened[name]++ m.m.Unlock() - return m.FS.OpenFile(name, flag) + return m.FS.OpenFile(name, flag, metadataOnly) } type failSaveRepo struct { @@ -2210,48 +2200,39 @@ func snapshot(t testing.TB, repo archiverRepo, fs fs.FS, parent *restic.Snapshot return snapshot, node } -// StatFS allows overwriting what is returned by the Lstat function. -type StatFS struct { +type overrideFS struct { fs.FS - - OverrideLstat map[string]os.FileInfo - OnlyOverrideStat bool + overrideFI os.FileInfo + overrideNode *restic.Node + overrideErr error } -func (fs *StatFS) Lstat(name string) (os.FileInfo, error) { - if !fs.OnlyOverrideStat { - if fi, ok := fs.OverrideLstat[fixpath(name)]; ok { - return fi, nil - } +func (m *overrideFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { + f, err := m.FS.OpenFile(name, flag, metadataOnly) + if err != nil { + return f, err } - return fs.FS.Lstat(name) -} - -func (fs *StatFS) OpenFile(name string, flags int) (fs.File, error) { - if fi, ok := fs.OverrideLstat[fixpath(name)]; ok { - f, err := fs.FS.OpenFile(name, flags) - if err != nil { - return nil, err - } - - wrappedFile := fileStat{ - File: f, - fi: fi, - } - return wrappedFile, nil + if filepath.Base(name) == "testfile" { + return &overrideFile{f, m}, nil } - - return fs.FS.OpenFile(name, flags) + return f, nil } -type fileStat struct { +type overrideFile struct { fs.File - fi os.FileInfo + ofs *overrideFS +} + +func (f overrideFile) Stat() (os.FileInfo, error) { + return f.ofs.overrideFI, nil } -func (f fileStat) Stat() (os.FileInfo, error) { - return f.fi, nil +func (f overrideFile) ToNode(ignoreXattrListError bool) (*restic.Node, error) { + if f.ofs.overrideNode == nil { + return f.File.ToNode(ignoreXattrListError) + } + return f.ofs.overrideNode, f.ofs.overrideErr } // used by wrapFileInfo, use untyped const in order to avoid having a version @@ -2279,17 +2260,18 @@ func TestMetadataChanged(t *testing.T) { // get metadata fi := lstat(t, "testfile") localFS := &fs.Local{} - want, err := localFS.NodeFromFileInfo("testfile", fi, false) - if err != nil { - t.Fatal(err) - } + meta, err := localFS.OpenFile("testfile", fs.O_NOFOLLOW, true) + rtest.OK(t, err) + want, err := meta.ToNode(false) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) - fs := &StatFS{ - FS: localFS, - OverrideLstat: map[string]os.FileInfo{ - "testfile": fi, - }, + fs := &overrideFS{ + FS: localFS, + overrideFI: fi, + overrideNode: &restic.Node{}, } + *fs.overrideNode = *want sn, node2 := snapshot(t, repo, fs, nil, "testfile") @@ -2309,7 +2291,8 @@ func TestMetadataChanged(t *testing.T) { } // modify the mode by wrapping it in a new struct, uses the consts defined above - fs.OverrideLstat["testfile"] = wrapFileInfo(fi) + fs.overrideFI = wrapFileInfo(fi) + rtest.Assert(t, !fileChanged(fs, fs.overrideFI, node2, 0), "testfile must not be considered as changed") // set the override values in the 'want' node which want.Mode = 0400 @@ -2318,16 +2301,13 @@ func TestMetadataChanged(t *testing.T) { want.UID = 51234 want.GID = 51235 } - // no user and group name - want.User = "" - want.Group = "" + // update mock node accordingly + fs.overrideNode.Mode = 0400 + fs.overrideNode.UID = want.UID + fs.overrideNode.GID = want.GID // make another snapshot _, node3 := snapshot(t, repo, fs, sn, "testfile") - // Override username and group to empty string - in case underlying system has user with UID 51234 - // See https://github.com/restic/restic/issues/2372 - node3.User = "" - node3.Group = "" // make sure that metadata was recorded successfully if !cmp.Equal(want, node3) { @@ -2342,7 +2322,7 @@ func TestMetadataChanged(t *testing.T) { func TestRacyFileSwap(t *testing.T) { files := TestDir{ - "file": TestFile{ + "testfile": TestFile{ Content: "foo bar test file", }, } @@ -2354,14 +2334,11 @@ func TestRacyFileSwap(t *testing.T) { // get metadata of current folder fi := lstat(t, ".") - tempfile := filepath.Join(tempdir, "file") + tempfile := filepath.Join(tempdir, "testfile") - statfs := &StatFS{ - FS: fs.Local{}, - OverrideLstat: map[string]os.FileInfo{ - tempfile: fi, - }, - OnlyOverrideStat: true, + statfs := &overrideFS{ + FS: fs.Local{}, + overrideFI: fi, } ctx, cancel := context.WithCancel(context.Background()) @@ -2388,14 +2365,19 @@ func TestRacyFileSwap(t *testing.T) { } } +type mockToNoder struct { + node *restic.Node + err error +} + +func (m *mockToNoder) ToNode(_ bool) (*restic.Node, error) { + return m.node, m.err +} + func TestMetadataBackupErrorFiltering(t *testing.T) { tempdir := t.TempDir() - repo := repository.TestRepository(t) - filename := filepath.Join(tempdir, "file") - rtest.OK(t, os.WriteFile(filename, []byte("example"), 0o600)) - fi, err := os.Stat(filename) - rtest.OK(t, err) + repo := repository.TestRepository(t) arch := New(repo, fs.Local{}, Options{}) @@ -2406,15 +2388,24 @@ func TestMetadataBackupErrorFiltering(t *testing.T) { return replacementErr } + nonExistNoder := &mockToNoder{ + node: &restic.Node{Type: restic.NodeTypeFile}, + err: fmt.Errorf("not found"), + } + // check that errors from reading extended metadata are properly filtered - node, err := arch.nodeFromFileInfo("file", filename+"invalid", fi, false) + node, err := arch.nodeFromFileInfo("file", filename+"invalid", nonExistNoder, false) rtest.Assert(t, node != nil, "node is missing") rtest.Assert(t, err == replacementErr, "expected %v got %v", replacementErr, err) rtest.Assert(t, filteredErr != nil, "missing inner error") // check that errors from reading irregular file are not filtered filteredErr = nil - node, err = arch.nodeFromFileInfo("file", filename, wrapIrregularFileInfo(fi), false) + nonExistNoder = &mockToNoder{ + node: &restic.Node{Type: restic.NodeTypeIrregular}, + err: fmt.Errorf(`unsupported file type "irregular"`), + } + node, err = arch.nodeFromFileInfo("file", filename, nonExistNoder, false) rtest.Assert(t, node != nil, "node is missing") rtest.Assert(t, filteredErr == nil, "error for irregular node should not have been filtered") rtest.Assert(t, strings.Contains(err.Error(), "irregular"), "unexpected error %q does not warn about irregular file mode", err) @@ -2434,17 +2425,19 @@ func TestIrregularFile(t *testing.T) { tempfile := filepath.Join(tempdir, "testfile") fi := lstat(t, "testfile") - statfs := &StatFS{ - FS: fs.Local{}, - OverrideLstat: map[string]os.FileInfo{ - tempfile: wrapIrregularFileInfo(fi), + override := &overrideFS{ + FS: fs.Local{}, + overrideFI: wrapIrregularFileInfo(fi), + overrideNode: &restic.Node{ + Type: restic.NodeTypeIrregular, }, + overrideErr: fmt.Errorf(`unsupported file type "irregular"`), } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - arch := New(repo, fs.Track{FS: statfs}, Options{}) + arch := New(repo, fs.Track{FS: override}, Options{}) _, excluded, err := arch.save(ctx, "/", tempfile, nil) if err == nil { t.Fatalf("Save() should have failed") diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 621f84826b0..deeab645947 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -57,12 +57,8 @@ func wrapIrregularFileInfo(fi os.FileInfo) os.FileInfo { } func statAndSnapshot(t *testing.T, repo archiverRepo, name string) (*restic.Node, *restic.Node) { - fi := lstat(t, name) - fs := &fs.Local{} - want, err := fs.NodeFromFileInfo(name, fi, false) - rtest.OK(t, err) - - _, node := snapshot(t, repo, fs, nil, name) + want := nodeFromFile(t, &fs.Local{}, name) + _, node := snapshot(t, repo, &fs.Local{}, nil, name) return want, node } diff --git a/internal/archiver/exclude.go b/internal/archiver/exclude.go index 54ced788ae0..418517fd91c 100644 --- a/internal/archiver/exclude.go +++ b/internal/archiver/exclude.go @@ -135,9 +135,9 @@ func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache, return rejected } -func isDirExcludedByFile(dir, tagFilename, header string, fs fs.FS, warnf func(msg string, args ...interface{})) bool { - tf := fs.Join(dir, tagFilename) - _, err := fs.Lstat(tf) +func isDirExcludedByFile(dir, tagFilename, header string, fsInst fs.FS, warnf func(msg string, args ...interface{})) bool { + tf := fsInst.Join(dir, tagFilename) + _, err := fsInst.Lstat(tf) if errors.Is(err, os.ErrNotExist) { return false } @@ -153,7 +153,7 @@ func isDirExcludedByFile(dir, tagFilename, header string, fs fs.FS, warnf func(m // From this stage, errors mean tagFilename exists but it is malformed. // Warnings will be generated so that the user is informed that the // indented ignore-action is not performed. - f, err := fs.OpenFile(tf, os.O_RDONLY) + f, err := fsInst.OpenFile(tf, fs.O_RDONLY, false) if err != nil { warnf("could not open exclusion tagfile: %v", err) return false diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index dccaa944245..ca8ec2fbb50 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "os" "sync" "github.com/restic/chunker" @@ -29,7 +28,7 @@ type fileSaver struct { CompleteBlob func(bytes uint64) - NodeFromFileInfo func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) + NodeFromFileInfo func(snPath, filename string, meta ToNoder, ignoreXattrListError bool) (*restic.Node, error) } // newFileSaver returns a new file saver. A worker pool with fileWorkers is @@ -71,13 +70,12 @@ type fileCompleteFunc func(*restic.Node, ItemStats) // file is closed by Save. completeReading is only called if the file was read // successfully. complete is always called. If completeReading is called, then // this will always happen before calling complete. -func (s *fileSaver) Save(ctx context.Context, snPath string, target string, file fs.File, fi os.FileInfo, start func(), completeReading func(), complete fileCompleteFunc) futureNode { +func (s *fileSaver) Save(ctx context.Context, snPath string, target string, file fs.File, start func(), completeReading func(), complete fileCompleteFunc) futureNode { fn, ch := newFutureNode() job := saveFileJob{ snPath: snPath, target: target, file: file, - fi: fi, ch: ch, start: start, @@ -100,7 +98,6 @@ type saveFileJob struct { snPath string target string file fs.File - fi os.FileInfo ch chan<- futureNodeResult start func() @@ -109,7 +106,7 @@ type saveFileJob struct { } // saveFile stores the file f in the repo, then closes it. -func (s *fileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, target string, f fs.File, fi os.FileInfo, start func(), finishReading func(), finish func(res futureNodeResult)) { +func (s *fileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, target string, f fs.File, start func(), finishReading func(), finish func(res futureNodeResult)) { start() fnr := futureNodeResult{ @@ -156,7 +153,7 @@ func (s *fileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat debug.Log("%v", snPath) - node, err := s.NodeFromFileInfo(snPath, target, fi, false) + node, err := s.NodeFromFileInfo(snPath, target, f, false) if err != nil { _ = f.Close() completeError(err) @@ -262,7 +259,7 @@ func (s *fileSaver) worker(ctx context.Context, jobs <-chan saveFileJob) { } } - s.saveFile(ctx, chnker, job.snPath, job.target, job.file, job.fi, job.start, func() { + s.saveFile(ctx, chnker, job.snPath, job.target, job.file, job.start, func() { if job.completeReading != nil { job.completeReading() } diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go index 069cdc17152..ce862f6feb6 100644 --- a/internal/archiver/file_saver_test.go +++ b/internal/archiver/file_saver_test.go @@ -30,7 +30,7 @@ func createTestFiles(t testing.TB, num int) (files []string) { return files } -func startFileSaver(ctx context.Context, t testing.TB, fs fs.FS) (*fileSaver, context.Context, *errgroup.Group) { +func startFileSaver(ctx context.Context, t testing.TB, fsInst fs.FS) (*fileSaver, context.Context, *errgroup.Group) { wg, ctx := errgroup.WithContext(ctx) saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *buffer, _ string, cb func(saveBlobResponse)) { @@ -49,8 +49,8 @@ func startFileSaver(ctx context.Context, t testing.TB, fs fs.FS) (*fileSaver, co } s := newFileSaver(ctx, wg, saveBlob, pol, workers, workers) - s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - return fs.NodeFromFileInfo(filename, fi, ignoreXattrListError) + s.NodeFromFileInfo = func(snPath, filename string, meta ToNoder, ignoreXattrListError bool) (*restic.Node, error) { + return meta.ToNode(ignoreXattrListError) } return s, ctx, wg @@ -72,17 +72,12 @@ func TestFileSaver(t *testing.T) { var results []futureNode for _, filename := range files { - f, err := testFs.OpenFile(filename, os.O_RDONLY) + f, err := testFs.OpenFile(filename, os.O_RDONLY, false) if err != nil { t.Fatal(err) } - fi, err := f.Stat() - if err != nil { - t.Fatal(err) - } - - ff := s.Save(ctx, filename, filename, f, fi, startFn, completeReadingFn, completeFn) + ff := s.Save(ctx, filename, filename, f, startFn, completeReadingFn, completeFn) results = append(results, ff) } diff --git a/internal/fs/file.go b/internal/fs/file.go index fa395b62845..81ee4bc7acd 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -67,7 +67,7 @@ func ResetPermissions(path string) error { // Readdirnames returns a list of file in a directory. Flags are passed to fs.OpenFile. // O_RDONLY and O_DIRECTORY are implied. func Readdirnames(filesystem FS, dir string, flags int) ([]string, error) { - f, err := filesystem.OpenFile(dir, O_RDONLY|O_DIRECTORY|flags) + f, err := filesystem.OpenFile(dir, O_RDONLY|O_DIRECTORY|flags, false) if err != nil { return nil, fmt.Errorf("openfile for readdirnames failed: %w", err) } diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go index 4f8b670900c..5e6c72d0a3e 100644 --- a/internal/fs/fs_local.go +++ b/internal/fs/fs_local.go @@ -20,18 +20,16 @@ func (fs Local) VolumeName(path string) string { return filepath.VolumeName(path) } -// OpenFile is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func (fs Local) OpenFile(name string, flag int) (File, error) { - f, err := os.OpenFile(fixpath(name), flag, 0) - if err != nil { - return nil, err - } - _ = setFlags(f) - return f, nil +// OpenFile opens a file or directory for reading. +// +// If metadataOnly is set, an implementation MUST return a File object for +// arbitrary file types including symlinks. The implementation may internally use +// the given file path or a file handle. In particular, an implementation may +// delay actually accessing the underlying filesystem. +// +// Only the O_NOFOLLOW and O_DIRECTORY flags are supported. +func (fs Local) OpenFile(name string, flag int, metadataOnly bool) (File, error) { + return newLocalFile(name, flag, metadataOnly) } // Lstat returns the FileInfo structure describing the named file. @@ -53,10 +51,6 @@ func (fs Local) ExtendedStat(fi os.FileInfo) ExtendedFileInfo { return ExtendedStat(fi) } -func (fs Local) NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - return nodeFromFileInfo(path, fi, ignoreXattrListError) -} - // Join joins any number of path elements into a single path, adding a // Separator if necessary. Join calls Clean on the result; in particular, all // empty strings are ignored. On Windows, the result is a UNC path if and only @@ -97,3 +91,87 @@ func (fs Local) Base(path string) string { func (fs Local) Dir(path string) string { return filepath.Dir(path) } + +type localFile struct { + name string + flag int + f *os.File + fi os.FileInfo +} + +// See the File interface for a description of each method +var _ File = &localFile{} + +func newLocalFile(name string, flag int, metadataOnly bool) (*localFile, error) { + var f *os.File + if !metadataOnly { + var err error + f, err = os.OpenFile(fixpath(name), flag, 0) + if err != nil { + return nil, err + } + _ = setFlags(f) + } + return &localFile{ + name: name, + flag: flag, + f: f, + }, nil +} + +func (f *localFile) MakeReadable() error { + if f.f != nil { + panic("file is already readable") + } + + newF, err := newLocalFile(f.name, f.flag, false) + if err != nil { + return err + } + // replace state and also reset cached FileInfo + *f = *newF + return nil +} + +func (f *localFile) cacheFI() error { + if f.fi != nil { + return nil + } + var err error + if f.f != nil { + f.fi, err = f.f.Stat() + } else if f.flag&O_NOFOLLOW != 0 { + f.fi, err = os.Lstat(f.name) + } else { + f.fi, err = os.Stat(f.name) + } + return err +} + +func (f *localFile) Stat() (os.FileInfo, error) { + err := f.cacheFI() + // the call to cacheFI MUST happen before reading from f.fi + return f.fi, err +} + +func (f *localFile) ToNode(ignoreXattrListError bool) (*restic.Node, error) { + if err := f.cacheFI(); err != nil { + return nil, err + } + return nodeFromFileInfo(f.name, f.fi, ignoreXattrListError) +} + +func (f *localFile) Read(p []byte) (n int, err error) { + return f.f.Read(p) +} + +func (f *localFile) Readdirnames(n int) ([]string, error) { + return f.f.Readdirnames(n) +} + +func (f *localFile) Close() error { + if f.f != nil { + return f.f.Close() + } + return nil +} diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index e9bc66657c9..fe82b85e13d 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -10,7 +10,6 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/options" - "github.com/restic/restic/internal/restic" ) // VSSConfig holds extended options of windows volume shadow copy service. @@ -126,9 +125,9 @@ func (fs *LocalVss) DeleteSnapshots() { fs.snapshots = activeSnapshots } -// OpenFile wraps the Open method of the underlying file system. -func (fs *LocalVss) OpenFile(name string, flag int) (File, error) { - return fs.FS.OpenFile(fs.snapshotPath(name), flag) +// OpenFile wraps the OpenFile method of the underlying file system. +func (fs *LocalVss) OpenFile(name string, flag int, metadataOnly bool) (File, error) { + return fs.FS.OpenFile(fs.snapshotPath(name), flag, metadataOnly) } // Lstat wraps the Lstat method of the underlying file system. @@ -136,10 +135,6 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { return fs.FS.Lstat(fs.snapshotPath(name)) } -func (fs *LocalVss) NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - return fs.FS.NodeFromFileInfo(fs.snapshotPath(path), fi, ignoreXattrListError) -} - // isMountPointIncluded is true if given mountpoint included by user. func (fs *LocalVss) isMountPointIncluded(mountPoint string) bool { if fs.excludeVolumes == nil { diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index db8d4b13385..33c412fe9dc 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -327,7 +327,7 @@ func TestVSSFS(t *testing.T) { rtest.OK(t, err) rtest.Equals(t, origFi.Mode(), lstatFi.Mode()) - f, err := localVss.OpenFile(tempfile, os.O_RDONLY) + f, err := localVss.OpenFile(tempfile, os.O_RDONLY, false) rtest.OK(t, err) data, err := io.ReadAll(f) rtest.OK(t, err) diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index a4efa8deacb..8728b274c71 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -49,12 +49,7 @@ func (fs *Reader) fi() os.FileInfo { } } -// OpenFile is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *os.PathError. -func (fs *Reader) OpenFile(name string, flag int) (f File, err error) { +func (fs *Reader) OpenFile(name string, flag int, _ bool) (f File, err error) { if flag & ^(O_RDONLY|O_NOFOLLOW) != 0 { return nil, pathError("open", name, fmt.Errorf("invalid combination of flags 0x%x", flag)) @@ -127,17 +122,6 @@ func (fs *Reader) ExtendedStat(fi os.FileInfo) ExtendedFileInfo { } } -func (fs *Reader) NodeFromFileInfo(path string, fi os.FileInfo, _ bool) (*restic.Node, error) { - node := buildBasicNode(path, fi) - - // fill minimal info with current values for uid, gid - node.UID = uint32(os.Getuid()) - node.GID = uint32(os.Getgid()) - node.ChangeTime = node.ModTime - - return node, nil -} - // Join joins any number of path elements into a single path, adding a // Separator if necessary. Join calls Clean on the result; in particular, all // empty strings are ignored. On Windows, the result is a UNC path if and only @@ -235,6 +219,10 @@ type fakeFile struct { // ensure that fakeFile implements File var _ File = fakeFile{} +func (f fakeFile) MakeReadable() error { + return nil +} + func (f fakeFile) Readdirnames(_ int) ([]string, error) { return nil, pathError("readdirnames", f.name, os.ErrInvalid) } @@ -251,6 +239,17 @@ func (f fakeFile) Stat() (os.FileInfo, error) { return f.FileInfo, nil } +func (f fakeFile) ToNode(_ bool) (*restic.Node, error) { + node := buildBasicNode(f.name, f.FileInfo) + + // fill minimal info with current values for uid, gid + node.UID = uint32(os.Getuid()) + node.GID = uint32(os.Getgid()) + node.ChangeTime = node.ModTime + + return node, nil +} + // fakeDir implements Readdirnames and Readdir, everything else is delegated to fakeFile. type fakeDir struct { entries []os.FileInfo diff --git a/internal/fs/fs_reader_test.go b/internal/fs/fs_reader_test.go index e7020bc9dc5..7e7f6e77c8e 100644 --- a/internal/fs/fs_reader_test.go +++ b/internal/fs/fs_reader_test.go @@ -16,7 +16,7 @@ import ( ) func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte) { - f, err := fs.OpenFile(filename, O_RDONLY) + f, err := fs.OpenFile(filename, O_RDONLY, false) if err != nil { t.Fatal(err) } @@ -37,7 +37,7 @@ func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte } func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) { - f, err := fs.OpenFile(dir, os.O_RDONLY) + f, err := fs.OpenFile(dir, O_RDONLY, false) if err != nil { t.Fatal(err) } @@ -123,7 +123,7 @@ func TestFSReader(t *testing.T) { { name: "file/Stat", f: func(t *testing.T, fs FS) { - f, err := fs.OpenFile(filename, os.O_RDONLY) + f, err := fs.OpenFile(filename, O_RDONLY, true) if err != nil { t.Fatal(err) } @@ -295,7 +295,7 @@ func TestFSReaderMinFileSize(t *testing.T) { AllowEmptyFile: test.allowEmpty, } - f, err := fs.OpenFile("testfile", os.O_RDONLY) + f, err := fs.OpenFile("testfile", O_RDONLY, false) if err != nil { t.Fatal(err) } diff --git a/internal/fs/fs_track.go b/internal/fs/fs_track.go index 9912ac45bb9..9ebdbb8c4a4 100644 --- a/internal/fs/fs_track.go +++ b/internal/fs/fs_track.go @@ -16,8 +16,8 @@ type Track struct { } // OpenFile wraps the OpenFile method of the underlying file system. -func (fs Track) OpenFile(name string, flag int) (File, error) { - f, err := fs.FS.OpenFile(fixpath(name), flag) +func (fs Track) OpenFile(name string, flag int, metadataOnly bool) (File, error) { + f, err := fs.FS.OpenFile(name, flag, metadataOnly) if err != nil { return nil, err } @@ -31,7 +31,7 @@ type trackFile struct { func newTrackFile(stack []byte, filename string, file File) *trackFile { f := &trackFile{file} - runtime.SetFinalizer(f, func(_ *trackFile) { + runtime.SetFinalizer(f, func(_ any) { fmt.Fprintf(os.Stderr, "file %s not closed\n\nStacktrack:\n%s\n", filename, stack) panic("file " + filename + " not closed") }) diff --git a/internal/fs/interface.go b/internal/fs/interface.go index 899888fb085..7ff77713814 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -9,11 +9,18 @@ import ( // FS bundles all methods needed for a file system. type FS interface { - OpenFile(name string, flag int) (File, error) + // OpenFile opens a file or directory for reading. + // + // If metadataOnly is set, an implementation MUST return a File object for + // arbitrary file types including symlinks. The implementation may internally use + // the given file path or a file handle. In particular, an implementation may + // delay actually accessing the underlying filesystem. + // + // Only the O_NOFOLLOW and O_DIRECTORY flags are supported. + OpenFile(name string, flag int, metadataOnly bool) (File, error) Lstat(name string) (os.FileInfo, error) DeviceID(fi os.FileInfo) (deviceID uint64, err error) ExtendedStat(fi os.FileInfo) ExtendedFileInfo - NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) Join(elem ...string) string Separator() string @@ -26,11 +33,23 @@ type FS interface { Base(path string) string } -// File is an open file on a file system. +// File is an open file on a file system. When opened as metadataOnly, an +// implementation may opt to perform filesystem operations using the filepath +// instead of actually opening the file. type File interface { + // MakeReadable reopens a File that was opened metadataOnly for reading. + // The method must not be called for files that are opened for reading. + // If possible, the underlying file should be reopened atomically. + // MakeReadable must work for files and directories. + MakeReadable() error + io.Reader io.Closer Readdirnames(n int) ([]string, error) Stat() (os.FileInfo, error) + // ToNode returns a restic.Node for the File. The internally used os.FileInfo + // must be consistent with that returned by Stat(). In particular, the metadata + // returned by consecutive calls to Stat() and ToNode() must match. + ToNode(ignoreXattrListError bool) (*restic.Node, error) } diff --git a/internal/fs/node_test.go b/internal/fs/node_test.go index 58facceb165..65098e30473 100644 --- a/internal/fs/node_test.go +++ b/internal/fs/node_test.go @@ -17,56 +17,26 @@ import ( rtest "github.com/restic/restic/internal/test" ) -func BenchmarkNodeFillUser(t *testing.B) { - tempfile, err := os.CreateTemp("", "restic-test-temp-") - if err != nil { - t.Fatal(err) - } - - fi, err := tempfile.Stat() - if err != nil { - t.Fatal(err) - } - +func BenchmarkNodeFromFileInfo(t *testing.B) { + tempfile, err := os.CreateTemp(t.TempDir(), "restic-test-temp-") + rtest.OK(t, err) path := tempfile.Name() - fs := Local{} - - t.ResetTimer() - - for i := 0; i < t.N; i++ { - _, err := fs.NodeFromFileInfo(path, fi, false) - rtest.OK(t, err) - } - rtest.OK(t, tempfile.Close()) - rtest.RemoveAll(t, tempfile.Name()) -} - -func BenchmarkNodeFromFileInfo(t *testing.B) { - tempfile, err := os.CreateTemp("", "restic-test-temp-") - if err != nil { - t.Fatal(err) - } - - fi, err := tempfile.Stat() - if err != nil { - t.Fatal(err) - } - path := tempfile.Name() fs := Local{} + f, err := fs.OpenFile(path, O_NOFOLLOW, true) + rtest.OK(t, err) + _, err = f.Stat() + rtest.OK(t, err) t.ResetTimer() for i := 0; i < t.N; i++ { - _, err := fs.NodeFromFileInfo(path, fi, false) - if err != nil { - t.Fatal(err) - } + _, err := f.ToNode(false) + rtest.OK(t, err) } - rtest.OK(t, tempfile.Close()) - rtest.RemoveAll(t, tempfile.Name()) + rtest.OK(t, f.Close()) } func parseTime(s string) time.Time { @@ -249,14 +219,14 @@ func TestNodeRestoreAt(t *testing.T) { rtest.OK(t, NodeCreateAt(&test, nodePath)) rtest.OK(t, NodeRestoreMetadata(&test, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) })) - fi, err := os.Lstat(nodePath) - rtest.OK(t, err) - fs := &Local{} - n2, err := fs.NodeFromFileInfo(nodePath, fi, false) + meta, err := fs.OpenFile(nodePath, O_NOFOLLOW, true) + rtest.OK(t, err) + n2, err := meta.ToNode(false) rtest.OK(t, err) - n3, err := fs.NodeFromFileInfo(nodePath, fi, true) + n3, err := meta.ToNode(true) rtest.OK(t, err) + rtest.OK(t, meta.Close()) rtest.Assert(t, n2.Equals(*n3), "unexpected node info mismatch %v", cmp.Diff(n2, n3)) rtest.Assert(t, test.Name == n2.Name, diff --git a/internal/fs/node_unix_test.go b/internal/fs/node_unix_test.go index 6b47eafba2f..1eb1ee5061d 100644 --- a/internal/fs/node_unix_test.go +++ b/internal/fs/node_unix_test.go @@ -114,16 +114,14 @@ func TestNodeFromFileInfo(t *testing.T) { return } - if fi.Sys() == nil { - t.Skip("fi.Sys() is nil") - return - } - fs := &Local{} - node, err := fs.NodeFromFileInfo(test.filename, fi, false) - if err != nil { - t.Fatal(err) - } + meta, err := fs.OpenFile(test.filename, O_NOFOLLOW, true) + rtest.OK(t, err) + node, err := meta.ToNode(false) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) + + rtest.OK(t, err) switch node.Type { case restic.NodeTypeFile, restic.NodeTypeSymlink: diff --git a/internal/fs/node_windows_test.go b/internal/fs/node_windows_test.go index 1bb76b20455..f75df54d3c3 100644 --- a/internal/fs/node_windows_test.go +++ b/internal/fs/node_windows_test.go @@ -222,11 +222,11 @@ func restoreAndGetNode(t *testing.T, tempDir string, testNode *restic.Node, warn test.OK(t, errors.Wrapf(err, "Failed to restore metadata for: %s", testPath)) fs := &Local{} - fi, err := fs.Lstat(testPath) - test.OK(t, errors.Wrapf(err, "Could not Lstat for path: %s", testPath)) - - nodeFromFileInfo, err := fs.NodeFromFileInfo(testPath, fi, false) + meta, err := fs.OpenFile(testPath, O_NOFOLLOW, true) + test.OK(t, err) + nodeFromFileInfo, err := meta.ToNode(false) test.OK(t, errors.Wrapf(err, "Could not get NodeFromFileInfo for path: %s", testPath)) + test.OK(t, meta.Close()) return testPath, nodeFromFileInfo } diff --git a/internal/restic/tree_test.go b/internal/restic/tree_test.go index f1979f135de..07ca254f135 100644 --- a/internal/restic/tree_test.go +++ b/internal/restic/tree_test.go @@ -83,13 +83,17 @@ func TestNodeMarshal(t *testing.T) { } } -func TestNodeComparison(t *testing.T) { - fs := &fs.Local{} - fi, err := fs.Lstat("tree_test.go") +func nodeForFile(t *testing.T, name string) *restic.Node { + f, err := (&fs.Local{}).OpenFile(name, fs.O_NOFOLLOW, true) rtest.OK(t, err) - - node, err := fs.NodeFromFileInfo("tree_test.go", fi, false) + node, err := f.ToNode(false) rtest.OK(t, err) + rtest.OK(t, f.Close()) + return node +} + +func TestNodeComparison(t *testing.T) { + node := nodeForFile(t, "tree_test.go") n2 := *node rtest.Assert(t, node.Equals(n2), "nodes aren't equal") @@ -127,11 +131,7 @@ func TestTreeEqualSerialization(t *testing.T) { builder := restic.NewTreeJSONBuilder() for _, fn := range files[:i] { - fs := &fs.Local{} - fi, err := fs.Lstat(fn) - rtest.OK(t, err) - node, err := fs.NodeFromFileInfo(fn, fi, false) - rtest.OK(t, err) + node := nodeForFile(t, fn) rtest.OK(t, tree.Insert(node)) rtest.OK(t, builder.AddNode(node)) From 6084848e5a05666cabab43bd96d2419b879fd405 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 16 Nov 2024 15:38:40 +0100 Subject: [PATCH 822/893] fs: fix O_NOFOLLOW for metadata handles on Windows --- internal/fs/const_windows.go | 6 ++++-- internal/fs/file.go | 4 ++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/internal/fs/const_windows.go b/internal/fs/const_windows.go index 4c29e0b9d59..b2b1bab86b6 100644 --- a/internal/fs/const_windows.go +++ b/internal/fs/const_windows.go @@ -5,8 +5,10 @@ package fs // TODO honor flags when opening files -// O_NOFOLLOW is a noop on Windows. -const O_NOFOLLOW int = 0 +// O_NOFOLLOW is currently only interpreted by FS.OpenFile in metadataOnly mode and ignored by OpenFile. +// The value of the constant is invented and only for use within this fs package. It must not be used in other contexts. +// It must not conflict with the other O_* values from go/src/syscall/types_windows.go +const O_NOFOLLOW int = 0x40000000 // O_DIRECTORY is a noop on Windows. const O_DIRECTORY int = 0 diff --git a/internal/fs/file.go b/internal/fs/file.go index 81ee4bc7acd..57f1a996a07 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -3,6 +3,7 @@ package fs import ( "fmt" "os" + "runtime" ) // MkdirAll creates a directory named path, along with any necessary parents, @@ -47,6 +48,9 @@ func Lstat(name string) (os.FileInfo, error) { // methods on the returned File can be used for I/O. // If there is an error, it will be of type *PathError. func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { + if runtime.GOOS == "windows" { + flag &^= O_NOFOLLOW + } return os.OpenFile(fixpath(name), flag, perm) } From 087f95a298b61723a52d85b28bf45eb4a3bf05ce Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 2 Nov 2024 22:45:48 +0100 Subject: [PATCH 823/893] fs: make generic and extended attrs independent of each other --- internal/fs/node.go | 7 ++---- internal/fs/node_aix.go | 4 ++-- internal/fs/node_netbsd.go | 4 ++-- internal/fs/node_openbsd.go | 4 ++-- internal/fs/node_windows.go | 46 ++++++++++++++++++++++--------------- internal/fs/node_xattr.go | 4 ++-- 6 files changed, 37 insertions(+), 32 deletions(-) diff --git a/internal/fs/node.go b/internal/fs/node.go index d3619432212..a5c821ff42a 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -22,11 +22,8 @@ func nodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (* return node, err } - allowExtended, err := nodeFillGenericAttributes(node, path, &stat) - if allowExtended { - // Skip processing ExtendedAttributes if allowExtended is false. - err = errors.Join(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) - } + err := nodeFillGenericAttributes(node, path, &stat) + err = errors.Join(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) return node, err } diff --git a/internal/fs/node_aix.go b/internal/fs/node_aix.go index fd185724f3b..19c5a2efe61 100644 --- a/internal/fs/node_aix.go +++ b/internal/fs/node_aix.go @@ -21,6 +21,6 @@ func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg str } // nodeFillGenericAttributes is a no-op on AIX. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) (allowExtended bool, err error) { - return true, nil +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) error { + return nil } diff --git a/internal/fs/node_netbsd.go b/internal/fs/node_netbsd.go index d295bf57948..1bf00a136bc 100644 --- a/internal/fs/node_netbsd.go +++ b/internal/fs/node_netbsd.go @@ -18,6 +18,6 @@ func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg str } // nodeFillGenericAttributes is a no-op on netbsd. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) (allowExtended bool, err error) { - return true, nil +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) error { + return nil } diff --git a/internal/fs/node_openbsd.go b/internal/fs/node_openbsd.go index 712b144b46a..1041622af9b 100644 --- a/internal/fs/node_openbsd.go +++ b/internal/fs/node_openbsd.go @@ -18,6 +18,6 @@ func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg str } // fillGenericAttributes is a no-op on openbsd. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) (allowExtended bool, err error) { - return true, nil +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) error { + return nil } diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index 837d4642805..c0f8b08b09b 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -83,8 +83,28 @@ func nodeRestoreExtendedAttributes(node *restic.Node, path string) (err error) { return nil } -// fill extended attributes in the node. This also includes the Generic attributes for windows. +// fill extended attributes in the node +// It also checks if the volume supports extended attributes and stores the result in a map +// so that it does not have to be checked again for subsequent calls for paths in the same volume. func nodeFillExtendedAttributes(node *restic.Node, path string, _ bool) (err error) { + if strings.Contains(filepath.Base(path), ":") { + // Do not process for Alternate Data Streams in Windows + return nil + } + + // only capture xattrs for file/dir + if node.Type != restic.NodeTypeFile && node.Type != restic.NodeTypeDir { + return nil + } + + allowExtended, err := checkAndStoreEASupport(path) + if err != nil { + return err + } + if !allowExtended { + return nil + } + var fileHandle windows.Handle if fileHandle, err = openHandleForEA(node.Type, path, false); fileHandle == 0 { return nil @@ -316,40 +336,28 @@ func decryptFile(pathPointer *uint16) error { // nodeFillGenericAttributes fills in the generic attributes for windows like File Attributes, // Created time and Security Descriptors. -// It also checks if the volume supports extended attributes and stores the result in a map -// so that it does not have to be checked again for subsequent calls for paths in the same volume. -func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFileInfo) (allowExtended bool, err error) { +func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFileInfo) error { if strings.Contains(filepath.Base(path), ":") { // Do not process for Alternate Data Streams in Windows - // Also do not allow processing of extended attributes for ADS. - return false, nil + return nil } isVolume, err := isVolumePath(path) if err != nil { - return false, err + return err } if isVolume { // Do not process file attributes, created time and sd for windows root volume paths // Security descriptors are not supported for root volume paths. // Though file attributes and created time are supported for root volume paths, // we ignore them and we do not want to replace them during every restore. - allowExtended, err = checkAndStoreEASupport(path) - if err != nil { - return false, err - } - return allowExtended, err + return nil } var sd *[]byte if node.Type == restic.NodeTypeFile || node.Type == restic.NodeTypeDir { - // Check EA support and get security descriptor for file/dir only - allowExtended, err = checkAndStoreEASupport(path) - if err != nil { - return false, err - } if sd, err = getSecurityDescriptor(path); err != nil { - return allowExtended, err + return err } } @@ -361,7 +369,7 @@ func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFil FileAttributes: &winFI.FileAttributes, SecurityDescriptor: sd, }) - return allowExtended, err + return err } // checkAndStoreEASupport checks if the volume of the path supports extended attributes and stores the result in a map diff --git a/internal/fs/node_xattr.go b/internal/fs/node_xattr.go index 1781452f700..aacc216dc86 100644 --- a/internal/fs/node_xattr.go +++ b/internal/fs/node_xattr.go @@ -71,8 +71,8 @@ func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg str } // nodeFillGenericAttributes is a no-op. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) (allowExtended bool, err error) { - return true, nil +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) error { + return nil } func nodeRestoreExtendedAttributes(node *restic.Node, path string) error { From d7f4b9db60da583b8d950539fc5cac6a9b22525b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 3 Nov 2024 13:27:58 +0100 Subject: [PATCH 824/893] fs: deduplicate placeholders for generic and xattrs --- internal/fs/node_aix.go | 26 -------------------------- internal/fs/node_netbsd.go | 23 ----------------------- internal/fs/node_noxattr.go | 18 ++++++++++++++++++ internal/fs/node_openbsd.go | 23 ----------------------- internal/fs/node_unix.go | 12 ++++++++++++ internal/fs/node_xattr.go | 10 ---------- 6 files changed, 30 insertions(+), 82 deletions(-) delete mode 100644 internal/fs/node_aix.go delete mode 100644 internal/fs/node_netbsd.go create mode 100644 internal/fs/node_noxattr.go delete mode 100644 internal/fs/node_openbsd.go diff --git a/internal/fs/node_aix.go b/internal/fs/node_aix.go deleted file mode 100644 index 19c5a2efe61..00000000000 --- a/internal/fs/node_aix.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build aix -// +build aix - -package fs - -import "github.com/restic/restic/internal/restic" - -// nodeRestoreExtendedAttributes is a no-op on AIX. -func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { - return nil -} - -// nodeFillExtendedAttributes is a no-op on AIX. -func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { - return nil -} - -// nodeRestoreGenericAttributes is no-op on AIX. -func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { - return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) -} - -// nodeFillGenericAttributes is a no-op on AIX. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) error { - return nil -} diff --git a/internal/fs/node_netbsd.go b/internal/fs/node_netbsd.go deleted file mode 100644 index 1bf00a136bc..00000000000 --- a/internal/fs/node_netbsd.go +++ /dev/null @@ -1,23 +0,0 @@ -package fs - -import "github.com/restic/restic/internal/restic" - -// nodeRestoreExtendedAttributes is a no-op on netbsd. -func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { - return nil -} - -// nodeFillExtendedAttributes is a no-op on netbsd. -func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { - return nil -} - -// nodeRestoreGenericAttributes is no-op on netbsd. -func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { - return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) -} - -// nodeFillGenericAttributes is a no-op on netbsd. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) error { - return nil -} diff --git a/internal/fs/node_noxattr.go b/internal/fs/node_noxattr.go new file mode 100644 index 00000000000..27bc6913a6f --- /dev/null +++ b/internal/fs/node_noxattr.go @@ -0,0 +1,18 @@ +//go:build aix || netbsd || openbsd +// +build aix netbsd openbsd + +package fs + +import ( + "github.com/restic/restic/internal/restic" +) + +// nodeRestoreExtendedAttributes is a no-op +func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { + return nil +} + +// nodeFillExtendedAttributes is a no-op +func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { + return nil +} diff --git a/internal/fs/node_openbsd.go b/internal/fs/node_openbsd.go deleted file mode 100644 index 1041622af9b..00000000000 --- a/internal/fs/node_openbsd.go +++ /dev/null @@ -1,23 +0,0 @@ -package fs - -import "github.com/restic/restic/internal/restic" - -// nodeRestoreExtendedAttributes is a no-op on openbsd. -func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { - return nil -} - -// nodeFillExtendedAttributes is a no-op on openbsd. -func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { - return nil -} - -// nodeRestoreGenericAttributes is no-op on openbsd. -func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { - return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) -} - -// fillGenericAttributes is a no-op on openbsd. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) error { - return nil -} diff --git a/internal/fs/node_unix.go b/internal/fs/node_unix.go index 5f08f362316..e88e5425104 100644 --- a/internal/fs/node_unix.go +++ b/internal/fs/node_unix.go @@ -5,8 +5,20 @@ package fs import ( "os" + + "github.com/restic/restic/internal/restic" ) func lchown(name string, uid, gid int) error { return os.Lchown(name, uid, gid) } + +// nodeRestoreGenericAttributes is no-op. +func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { + return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) +} + +// nodeFillGenericAttributes is a no-op. +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) error { + return nil +} diff --git a/internal/fs/node_xattr.go b/internal/fs/node_xattr.go index aacc216dc86..e1ddf9826e7 100644 --- a/internal/fs/node_xattr.go +++ b/internal/fs/node_xattr.go @@ -65,16 +65,6 @@ func handleXattrErr(err error) error { } } -// nodeRestoreGenericAttributes is no-op. -func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { - return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) -} - -// nodeFillGenericAttributes is a no-op. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) error { - return nil -} - func nodeRestoreExtendedAttributes(node *restic.Node, path string) error { expectedAttrs := map[string]struct{}{} for _, attr := range node.ExtendedAttributes { From 6cb19e01905abcf27c06b57b1d81787fd8d7642a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 15 Nov 2024 21:21:04 +0100 Subject: [PATCH 825/893] archiver: fix file type change test The test did not test the case that the type of a file changed unexpectedly. --- internal/archiver/archiver.go | 4 +- internal/archiver/archiver_test.go | 97 ++++++++++++++++++++---------- 2 files changed, 66 insertions(+), 35 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index ae3edaf44c6..f4ff6f47b1e 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -369,7 +369,7 @@ func (arch *Archiver) dirToNodeAndEntries(snPath, dir string, meta fs.File) (nod return nil, nil, err } if node.Type != restic.NodeTypeDir { - return nil, nil, fmt.Errorf("directory %v changed type, refusing to archive", snPath) + return nil, nil, fmt.Errorf("directory %q changed type, refusing to archive", snPath) } names, err = meta.Readdirnames(-1) @@ -548,7 +548,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous // make sure it's still a file if !fi.Mode().IsRegular() { - err = errors.Errorf("file %v changed type, refusing to archive", target) + err = errors.Errorf("file %q changed type, refusing to archive", target) return filterError(err) } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 0b2957bc6d4..e698ba74106 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -2202,9 +2202,10 @@ func snapshot(t testing.TB, repo archiverRepo, fs fs.FS, parent *restic.Snapshot type overrideFS struct { fs.FS - overrideFI os.FileInfo - overrideNode *restic.Node - overrideErr error + overrideFI os.FileInfo + resetFIOnRead bool + overrideNode *restic.Node + overrideErr error } func (m *overrideFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { @@ -2213,7 +2214,7 @@ func (m *overrideFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File return f, err } - if filepath.Base(name) == "testfile" { + if filepath.Base(name) == "testfile" || filepath.Base(name) == "testdir" { return &overrideFile{f, m}, nil } return f, nil @@ -2225,7 +2226,18 @@ type overrideFile struct { } func (f overrideFile) Stat() (os.FileInfo, error) { + if f.ofs.overrideFI == nil { + return f.File.Stat() + } return f.ofs.overrideFI, nil + +} + +func (f overrideFile) MakeReadable() error { + if f.ofs.resetFIOnRead { + f.ofs.overrideFI = nil + } + return f.File.MakeReadable() } func (f overrideFile) ToNode(ignoreXattrListError bool) (*restic.Node, error) { @@ -2320,48 +2332,67 @@ func TestMetadataChanged(t *testing.T) { checker.TestCheckRepo(t, repo, false) } -func TestRacyFileSwap(t *testing.T) { +func TestRacyFileTypeSwap(t *testing.T) { files := TestDir{ "testfile": TestFile{ Content: "foo bar test file", }, + "testdir": TestDir{}, } - tempdir, repo := prepareTempdirRepoSrc(t, files) - - back := rtest.Chdir(t, tempdir) - defer back() + for _, dirError := range []bool{false, true} { + desc := "file changed type" + if dirError { + desc = "dir changed type" + } + t.Run(desc, func(t *testing.T) { + tempdir, repo := prepareTempdirRepoSrc(t, files) - // get metadata of current folder - fi := lstat(t, ".") - tempfile := filepath.Join(tempdir, "testfile") + back := rtest.Chdir(t, tempdir) + defer back() - statfs := &overrideFS{ - FS: fs.Local{}, - overrideFI: fi, - } + // get metadata of current folder + var fakeName, realName string + if dirError { + // lstat claims this is a directory, but it's actually a file + fakeName = "testdir" + realName = "testfile" + } else { + fakeName = "testfile" + realName = "testdir" + } + fakeFI := lstat(t, fakeName) + tempfile := filepath.Join(tempdir, realName) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + statfs := &overrideFS{ + FS: fs.Local{}, + overrideFI: fakeFI, + resetFIOnRead: true, + } - wg, ctx := errgroup.WithContext(ctx) - repo.StartPackUploader(ctx, wg) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - arch := New(repo, fs.Track{FS: statfs}, Options{}) - arch.Error = func(item string, err error) error { - t.Logf("archiver error as expected for %v: %v", item, err) - return err - } - arch.runWorkers(ctx, wg) + wg, ctx := errgroup.WithContext(ctx) + repo.StartPackUploader(ctx, wg) - // fs.Track will panic if the file was not closed - _, excluded, err := arch.save(ctx, "/", tempfile, nil) - if err == nil { - t.Errorf("Save() should have failed") - } + arch := New(repo, fs.Track{FS: statfs}, Options{}) + arch.Error = func(item string, err error) error { + t.Logf("archiver error as expected for %v: %v", item, err) + return err + } + arch.runWorkers(ctx, wg) - if excluded { - t.Errorf("Save() excluded the node, that's unexpected") + // fs.Track will panic if the file was not closed + _, excluded, err := arch.save(ctx, "/", tempfile, nil) + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "changed type, refusing to archive"), "save() returned wrong error: %v", err) + tpe := "file" + if dirError { + tpe = "directory" + } + rtest.Assert(t, strings.Contains(err.Error(), tpe+" "), "unexpected item type in error: %v", err) + rtest.Assert(t, !excluded, "Save() excluded the node, that's unexpected") + }) } } From b51bf0c0c44e63723a915ba995feeed2296a8ff0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 15 Nov 2024 23:02:52 +0100 Subject: [PATCH 826/893] fs: test File implementation of Local FS --- internal/fs/fs_local_test.go | 222 ++++++++++++++++++++++++++++++ internal/fs/fs_local_unix_test.go | 40 ++++++ 2 files changed, 262 insertions(+) create mode 100644 internal/fs/fs_local_test.go create mode 100644 internal/fs/fs_local_unix_test.go diff --git a/internal/fs/fs_local_test.go b/internal/fs/fs_local_test.go new file mode 100644 index 00000000000..b1e85de0a7b --- /dev/null +++ b/internal/fs/fs_local_test.go @@ -0,0 +1,222 @@ +package fs + +import ( + "io" + "os" + "path/filepath" + "slices" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +type fsLocalMetadataTestcase struct { + name string + follow bool + setup func(t *testing.T, path string) + nodeType restic.NodeType +} + +func TestFSLocalMetadata(t *testing.T) { + for _, test := range []fsLocalMetadataTestcase{ + { + name: "file", + setup: func(t *testing.T, path string) { + rtest.OK(t, os.WriteFile(path, []byte("example"), 0o600)) + }, + nodeType: restic.NodeTypeFile, + }, + { + name: "directory", + setup: func(t *testing.T, path string) { + rtest.OK(t, os.Mkdir(path, 0o600)) + }, + nodeType: restic.NodeTypeDir, + }, + { + name: "symlink", + setup: func(t *testing.T, path string) { + rtest.OK(t, os.Symlink(path+"old", path)) + }, + nodeType: restic.NodeTypeSymlink, + }, + { + name: "symlink file", + follow: true, + setup: func(t *testing.T, path string) { + rtest.OK(t, os.WriteFile(path+"file", []byte("example"), 0o600)) + rtest.OK(t, os.Symlink(path+"file", path)) + }, + nodeType: restic.NodeTypeFile, + }, + } { + runFSLocalTestcase(t, test) + } +} + +func runFSLocalTestcase(t *testing.T, test fsLocalMetadataTestcase) { + t.Run(test.name, func(t *testing.T) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + test.setup(t, path) + + testFs := &Local{} + flags := 0 + if !test.follow { + flags |= O_NOFOLLOW + } + f, err := testFs.OpenFile(path, flags, true) + rtest.OK(t, err) + checkMetadata(t, f, path, test.follow, test.nodeType) + rtest.OK(t, f.Close()) + }) + +} + +func checkMetadata(t *testing.T, f File, path string, follow bool, nodeType restic.NodeType) { + fi, err := f.Stat() + rtest.OK(t, err) + var fi2 os.FileInfo + if follow { + fi2, err = os.Stat(path) + } else { + fi2, err = os.Lstat(path) + } + rtest.OK(t, err) + assertFIEqual(t, fi2, fi) + + node, err := f.ToNode(false) + rtest.OK(t, err) + + // ModTime is likely unique per file, thus it provides a good indication that it is from the correct file + rtest.Equals(t, fi.ModTime(), node.ModTime, "node ModTime") + rtest.Equals(t, nodeType, node.Type, "node Type") +} + +func assertFIEqual(t *testing.T, want os.FileInfo, got os.FileInfo) { + t.Helper() + rtest.Equals(t, want.Name(), got.Name(), "Name") + rtest.Equals(t, want.IsDir(), got.IsDir(), "IsDir") + rtest.Equals(t, want.ModTime(), got.ModTime(), "ModTime") + rtest.Equals(t, want.Mode(), got.Mode(), "Mode") + rtest.Equals(t, want.Size(), got.Size(), "Size") +} + +func TestFSLocalRead(t *testing.T) { + testFSLocalRead(t, false) + testFSLocalRead(t, true) +} + +func testFSLocalRead(t *testing.T, makeReadable bool) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + testdata := "example" + rtest.OK(t, os.WriteFile(path, []byte(testdata), 0o600)) + + f := openReadable(t, path, makeReadable) + checkMetadata(t, f, path, false, restic.NodeTypeFile) + + data, err := io.ReadAll(f) + rtest.OK(t, err) + rtest.Equals(t, testdata, string(data), "file content mismatch") + + rtest.OK(t, f.Close()) +} + +func openReadable(t *testing.T, path string, useMakeReadable bool) File { + testFs := &Local{} + f, err := testFs.OpenFile(path, O_NOFOLLOW, useMakeReadable) + rtest.OK(t, err) + if useMakeReadable { + // file was opened as metadataOnly. open for reading + rtest.OK(t, f.MakeReadable()) + } + return f +} + +func TestFSLocalReaddir(t *testing.T) { + testFSLocalReaddir(t, false) + testFSLocalReaddir(t, true) +} + +func testFSLocalReaddir(t *testing.T, makeReadable bool) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + rtest.OK(t, os.Mkdir(path, 0o700)) + entries := []string{"testfile"} + rtest.OK(t, os.WriteFile(filepath.Join(path, entries[0]), []byte("example"), 0o600)) + + f := openReadable(t, path, makeReadable) + checkMetadata(t, f, path, false, restic.NodeTypeDir) + + names, err := f.Readdirnames(-1) + rtest.OK(t, err) + slices.Sort(names) + rtest.Equals(t, entries, names, "directory content mismatch") + + rtest.OK(t, f.Close()) +} + +func TestFSLocalReadableRace(t *testing.T) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + testdata := "example" + rtest.OK(t, os.WriteFile(path, []byte(testdata), 0o600)) + + testFs := &Local{} + f, err := testFs.OpenFile(path, O_NOFOLLOW, true) + rtest.OK(t, err) + + pathNew := path + "new" + rtest.OK(t, os.Rename(path, pathNew)) + + err = f.MakeReadable() + if err == nil { + // a file handle based implementation should still work + checkMetadata(t, f, pathNew, false, restic.NodeTypeFile) + + data, err := io.ReadAll(f) + rtest.OK(t, err) + rtest.Equals(t, testdata, string(data), "file content mismatch") + } + + rtest.OK(t, f.Close()) +} + +func TestFSLocalTypeChange(t *testing.T) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + testdata := "example" + rtest.OK(t, os.WriteFile(path, []byte(testdata), 0o600)) + + testFs := &Local{} + f, err := testFs.OpenFile(path, O_NOFOLLOW, true) + rtest.OK(t, err) + // cache metadata + _, err = f.Stat() + rtest.OK(t, err) + + pathNew := path + "new" + // rename instead of unlink to let the test also work on windows + rtest.OK(t, os.Rename(path, pathNew)) + + rtest.OK(t, os.Mkdir(path, 0o700)) + rtest.OK(t, f.MakeReadable()) + + fi, err := f.Stat() + rtest.OK(t, err) + if !fi.IsDir() { + // a file handle based implementation should still reference the file + checkMetadata(t, f, pathNew, false, restic.NodeTypeFile) + + data, err := io.ReadAll(f) + rtest.OK(t, err) + rtest.Equals(t, testdata, string(data), "file content mismatch") + } + // else: + // path-based implementation + // nothing to test here. stat returned the new file type + + rtest.OK(t, f.Close()) +} diff --git a/internal/fs/fs_local_unix_test.go b/internal/fs/fs_local_unix_test.go new file mode 100644 index 00000000000..5bcb5efd032 --- /dev/null +++ b/internal/fs/fs_local_unix_test.go @@ -0,0 +1,40 @@ +//go:build unix + +package fs + +import ( + "syscall" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestFSLocalMetadataUnix(t *testing.T) { + for _, test := range []fsLocalMetadataTestcase{ + { + name: "socket", + setup: func(t *testing.T, path string) { + fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + rtest.OK(t, err) + defer func() { + _ = syscall.Close(fd) + }() + + addr := &syscall.SockaddrUnix{Name: path} + rtest.OK(t, syscall.Bind(fd, addr)) + }, + nodeType: restic.NodeTypeSocket, + }, + { + name: "fifo", + setup: func(t *testing.T, path string) { + rtest.OK(t, mkfifo(path, 0o600)) + }, + nodeType: restic.NodeTypeFifo, + }, + // device files can only be created as root + } { + runFSLocalTestcase(t, test) + } +} From 0997f26461ae7bf7ee18175b30d396c2506a2a5e Mon Sep 17 00:00:00 2001 From: Daniel Danner Date: Fri, 29 Nov 2024 23:18:34 +0100 Subject: [PATCH 827/893] Reword description --max-repack-size for clarity --- cmd/restic/cmd_prune.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index a74ba23f786..75a6ffa72c1 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -74,7 +74,7 @@ func init() { func addPruneOptions(c *cobra.Command, pruneOptions *PruneOptions) { f := c.Flags() f.StringVar(&pruneOptions.MaxUnused, "max-unused", "5%", "tolerate given `limit` of unused data (absolute value in bytes with suffixes k/K, m/M, g/G, t/T, a value in % or the word 'unlimited')") - f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "maximum `size` to repack (allowed suffixes: k/K, m/M, g/G, t/T)") + f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "stop after repacking this much data in total (allowed suffixes: k/K, m/M, g/G, t/T)") f.BoolVar(&pruneOptions.RepackCacheableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable") f.BoolVar(&pruneOptions.RepackSmall, "repack-small", false, "repack pack files below 80% of target pack size") f.BoolVar(&pruneOptions.RepackUncompressed, "repack-uncompressed", false, "repack all uncompressed data") From c5fb46da53e198397712beb62bc4007789e6fd7f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 2 Nov 2024 22:55:16 +0100 Subject: [PATCH 828/893] archiver: ignore files removed in the meantime --- changelog/unreleased/issue-2165 | 16 +++++++++++ internal/archiver/archiver.go | 12 ++++++-- internal/archiver/archiver_test.go | 45 ++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 changelog/unreleased/issue-2165 diff --git a/changelog/unreleased/issue-2165 b/changelog/unreleased/issue-2165 new file mode 100644 index 00000000000..12bc9dfd980 --- /dev/null +++ b/changelog/unreleased/issue-2165 @@ -0,0 +1,16 @@ +Bugfix: Ignore disappeared backup source files + +If during a backup files were removed between restic listing the directory +content and backing up the file in question, the following error could occur: + +``` +error: lstat /some/file/name: no such file or directory +``` + +The backup command now ignores this particular error and silently skips the +removed file. + +https://github.com/restic/restic/issues/2165 +https://github.com/restic/restic/issues/3098 +https://github.com/restic/restic/pull/5143 +https://github.com/restic/restic/pull/5145 diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index f4ff6f47b1e..5d4648e03de 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -464,6 +464,12 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous } return futureNode{}, true, nil } + filterNotExist := func(err error) error { + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } // exclude files by path before running Lstat to reduce number of lstat calls if !arch.SelectByName(abstarget) { debug.Log("%v is excluded by path", target) @@ -473,7 +479,8 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous meta, err := arch.FS.OpenFile(target, fs.O_NOFOLLOW, true) if err != nil { debug.Log("open metadata for %v returned error: %v", target, err) - return filterError(err) + // ignore if file disappeared since it was returned by readdir + return filterError(filterNotExist(err)) } closeFile := true defer func() { @@ -489,7 +496,8 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous fi, err := meta.Stat() if err != nil { debug.Log("lstat() for %v returned error: %v", target, err) - return filterError(err) + // ignore if file disappeared since it was returned by readdir + return filterError(filterNotExist(err)) } if !arch.Select(abstarget, fi, arch.FS) { debug.Log("%v is excluded", target) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index e698ba74106..f57c4894b86 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -2479,3 +2479,48 @@ func TestIrregularFile(t *testing.T) { t.Errorf("Save() excluded the node, that's unexpected") } } + +type missingFS struct { + fs.FS + errorOnOpen bool +} + +func (fs *missingFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { + if fs.errorOnOpen { + return nil, os.ErrNotExist + } + + return &missingFile{}, nil +} + +type missingFile struct { + fs.File +} + +func (f *missingFile) Stat() (os.FileInfo, error) { + return nil, os.ErrNotExist +} + +func (f *missingFile) Close() error { + // prevent segfault in test + return nil +} + +func TestDisappearedFile(t *testing.T) { + tempdir, repo := prepareTempdirRepoSrc(t, TestDir{}) + + back := rtest.Chdir(t, tempdir) + defer back() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // depending on the underlying FS implementation a missing file may be detected by OpenFile or + // the subsequent file.Stat() call. Thus test both cases. + for _, errorOnOpen := range []bool{false, true} { + arch := New(repo, fs.Track{FS: &missingFS{FS: &fs.Local{}, errorOnOpen: errorOnOpen}}, Options{}) + _, excluded, err := arch.save(ctx, "/", filepath.Join(tempdir, "testdir"), nil) + rtest.OK(t, err) + rtest.Assert(t, excluded, "testfile should have been excluded") + } +} From 3c0ceda536ca017c50a5a3f9743435e11519c2a2 Mon Sep 17 00:00:00 2001 From: Tyson Key Date: Tue, 5 Nov 2024 15:52:45 +0000 Subject: [PATCH 829/893] Add basic support for DragonFlyBSD --- internal/fs/node_noxattr.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/fs/node_noxattr.go b/internal/fs/node_noxattr.go index 27bc6913a6f..281a16dbdcb 100644 --- a/internal/fs/node_noxattr.go +++ b/internal/fs/node_noxattr.go @@ -1,5 +1,5 @@ -//go:build aix || netbsd || openbsd -// +build aix netbsd openbsd +//go:build aix || dragonfly || netbsd || openbsd +// +build aix dragonfly netbsd openbsd package fs From dc89aad722fefe28c088c9eac07d7d1eca42c003 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 30 Nov 2024 15:41:43 +0100 Subject: [PATCH 830/893] build dragonflybsd binaries --- changelog/unreleased/issue-5131 | 6 ++++++ helpers/build-release-binaries/main.go | 17 +++++++++-------- 2 files changed, 15 insertions(+), 8 deletions(-) create mode 100644 changelog/unreleased/issue-5131 diff --git a/changelog/unreleased/issue-5131 b/changelog/unreleased/issue-5131 new file mode 100644 index 00000000000..fd38a216dd3 --- /dev/null +++ b/changelog/unreleased/issue-5131 @@ -0,0 +1,6 @@ +Enhancement: Add DragonflyBSD support + +Restic can now be compiled on DragonflyBSD. + +https://github.com/restic/restic/issues/5131 +https://github.com/restic/restic/pull/5138 diff --git a/helpers/build-release-binaries/main.go b/helpers/build-release-binaries/main.go index 81d126b0053..8fe8c24fb9b 100644 --- a/helpers/build-release-binaries/main.go +++ b/helpers/build-release-binaries/main.go @@ -243,14 +243,15 @@ func buildTargets(sourceDir, outputDir string, targets map[string][]string) { } var defaultBuildTargets = map[string][]string{ - "aix": {"ppc64"}, - "darwin": {"amd64", "arm64"}, - "freebsd": {"386", "amd64", "arm"}, - "linux": {"386", "amd64", "arm", "arm64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "riscv64", "s390x"}, - "netbsd": {"386", "amd64"}, - "openbsd": {"386", "amd64"}, - "windows": {"386", "amd64"}, - "solaris": {"amd64"}, + "aix": {"ppc64"}, + "darwin": {"amd64", "arm64"}, + "dragonfly": {"amd64"}, + "freebsd": {"386", "amd64", "arm"}, + "linux": {"386", "amd64", "arm", "arm64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "riscv64", "s390x"}, + "netbsd": {"386", "amd64"}, + "openbsd": {"386", "amd64"}, + "windows": {"386", "amd64"}, + "solaris": {"amd64"}, } func downloadModules(sourceDir string) { From 5df6bf80b1495c140e2d038038bf002197287313 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 30 Nov 2024 16:07:18 +0100 Subject: [PATCH 831/893] fs: retry vss creation on VSS_E_SNAPSHOT_SET_IN_PROGRESS error Depending on the change packages, the VSS tests from ./cmd/restic and the fs package may overlap in time. This causes the snapshot creation to fail. Add retries in that case. --- internal/fs/vss_windows.go | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 7281e0210b2..840e971078d 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -171,6 +171,11 @@ func (h HRESULT) Str() string { return "UNKNOWN" } +// Error implements the error interface +func (h HRESULT) Error() string { + return h.Str() +} + // VssError encapsulates errors returned from calling VSS api. type vssError struct { text string @@ -195,6 +200,11 @@ func (e *vssError) Error() string { return fmt.Sprintf("VSS error: %s: %s (%#x)", e.text, e.hresult.Str(), e.hresult) } +// Unwrap returns the underlying HRESULT error +func (e *vssError) Unwrap() error { + return e.hresult +} + // vssTextError encapsulates errors returned from calling VSS api. type vssTextError struct { text string @@ -943,10 +953,23 @@ func NewVssSnapshot(provider string, "%s", volume)) } - snapshotSetID, err := iVssBackupComponents.StartSnapshotSet() - if err != nil { - iVssBackupComponents.Release() - return VssSnapshot{}, err + const retryStartSnapshotSetSleep = 5 * time.Second + var snapshotSetID ole.GUID + for { + var err error + snapshotSetID, err = iVssBackupComponents.StartSnapshotSet() + if errors.Is(err, VSS_E_SNAPSHOT_SET_IN_PROGRESS) && time.Now().Add(-retryStartSnapshotSetSleep).Before(deadline) { + // retry snapshot set creation while deadline is not reached + time.Sleep(retryStartSnapshotSetSleep) + continue + } + + if err != nil { + iVssBackupComponents.Release() + return VssSnapshot{}, err + } else { + break + } } if err := iVssBackupComponents.AddToSnapshotSet(volume, providerID, &snapshotSetID); err != nil { From 641390103df726f2023c93377fec35379a72a485 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 3 Nov 2024 16:01:59 +0100 Subject: [PATCH 832/893] fs: inline ExtendedStat --- cmd/restic/cmd_backup_integration_test.go | 2 +- internal/archiver/archiver.go | 15 +++--- internal/archiver/archiver_test.go | 22 ++++----- internal/archiver/archiver_unix_test.go | 22 ++++----- internal/archiver/archiver_windows_test.go | 22 +++++---- internal/archiver/exclude.go | 42 ++++++---------- internal/archiver/exclude_test.go | 2 +- internal/archiver/scanner.go | 5 +- internal/archiver/scanner_test.go | 2 +- internal/fs/deviceid_unix.go | 31 ------------ internal/fs/deviceid_windows.go | 16 ------- internal/fs/fs_local.go | 36 +++++++------- internal/fs/fs_local_test.go | 4 +- internal/fs/fs_local_vss.go | 3 +- internal/fs/fs_reader.go | 56 +++++++++------------- internal/fs/fs_reader_test.go | 6 +-- internal/fs/interface.go | 7 +-- internal/fs/node.go | 15 +++--- internal/fs/stat.go | 2 +- internal/fs/stat_bsd.go | 6 +-- internal/fs/stat_unix.go | 6 +-- internal/fs/stat_windows.go | 4 +- 22 files changed, 121 insertions(+), 205 deletions(-) delete mode 100644 internal/fs/deviceid_unix.go delete mode 100644 internal/fs/deviceid_windows.go diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 4278f07ca80..06d71e345aa 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -132,7 +132,7 @@ type vssDeleteOriginalFS struct { hasRemoved bool } -func (f *vssDeleteOriginalFS) Lstat(name string) (os.FileInfo, error) { +func (f *vssDeleteOriginalFS) Lstat(name string) (*fs.ExtendedFileInfo, error) { if !f.hasRemoved { // call Lstat to trigger snapshot creation _, _ = f.FS.Lstat(name) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 5d4648e03de..f730fe0a553 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -25,7 +25,7 @@ type SelectByNameFunc func(item string) bool // SelectFunc returns true for all items that should be included (files and // dirs). If false is returned, files are ignored and dirs are not even walked. -type SelectFunc func(item string, fi os.FileInfo, fs fs.FS) bool +type SelectFunc func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool // ErrorFunc is called when an error during archiving occurs. When nil is // returned, the archiver continues, otherwise it aborts and passes the error @@ -189,7 +189,7 @@ func New(repo archiverRepo, filesystem fs.FS, opts Options) *Archiver { arch := &Archiver{ Repo: repo, SelectByName: func(_ string) bool { return true }, - Select: func(_ string, _ os.FileInfo, _ fs.FS) bool { return true }, + Select: func(_ string, _ *fs.ExtendedFileInfo, _ fs.FS) bool { return true }, FS: filesystem, Options: opts.ApplyDefaults(), @@ -618,27 +618,26 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous // fileChanged tries to detect whether a file's content has changed compared // to the contents of node, which describes the same path in the parent backup. // It should only be run for regular files. -func fileChanged(fs fs.FS, fi os.FileInfo, node *restic.Node, ignoreFlags uint) bool { +func fileChanged(fs fs.FS, fi *fs.ExtendedFileInfo, node *restic.Node, ignoreFlags uint) bool { switch { case node == nil: return true case node.Type != restic.NodeTypeFile: // We're only called for regular files, so this is a type change. return true - case uint64(fi.Size()) != node.Size: + case uint64(fi.Size) != node.Size: return true - case !fi.ModTime().Equal(node.ModTime): + case !fi.ModTime.Equal(node.ModTime): return true } checkCtime := ignoreFlags&ChangeIgnoreCtime == 0 checkInode := ignoreFlags&ChangeIgnoreInode == 0 - extFI := fs.ExtendedStat(fi) switch { - case checkCtime && !extFI.ChangeTime.Equal(node.ChangeTime): + case checkCtime && !fi.ChangeTime.Equal(node.ChangeTime): return true - case checkInode && node.Inode != extFI.Inode: + case checkInode && node.Inode != fi.Inode: return true } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index f57c4894b86..038afd11d50 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -516,13 +516,13 @@ func chmodTwice(t testing.TB, name string) { rtest.OK(t, err) } -func lstat(t testing.TB, name string) os.FileInfo { +func lstat(t testing.TB, name string) *fs.ExtendedFileInfo { fi, err := os.Lstat(name) if err != nil { t.Fatal(err) } - return fi + return fs.ExtendedStat(fi) } func setTimestamp(t testing.TB, filename string, atime, mtime time.Time) { @@ -660,7 +660,7 @@ func TestFileChanged(t *testing.T) { rename(t, filename, tempname) save(t, filename, defaultContent) remove(t, tempname) - setTimestamp(t, filename, fi.ModTime(), fi.ModTime()) + setTimestamp(t, filename, fi.ModTime, fi.ModTime) }, ChangeIgnore: ChangeIgnoreCtime | ChangeIgnoreInode, SameFile: true, @@ -1520,7 +1520,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo, _ fs.FS) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { return true }, }, @@ -1537,7 +1537,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo, _ fs.FS) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { return false }, err: "snapshot is empty", @@ -1564,7 +1564,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo, _ fs.FS) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { return filepath.Ext(item) != ".txt" }, }, @@ -1588,7 +1588,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo, fs fs.FS) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { return fs.Base(item) != "subdir" }, }, @@ -1597,7 +1597,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { src: TestDir{ "foo": TestFile{Content: "foo"}, }, - selFn: func(item string, fi os.FileInfo, fs fs.FS) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { return fs.IsAbs(item) }, }, @@ -2202,7 +2202,7 @@ func snapshot(t testing.TB, repo archiverRepo, fs fs.FS, parent *restic.Snapshot type overrideFS struct { fs.FS - overrideFI os.FileInfo + overrideFI *fs.ExtendedFileInfo resetFIOnRead bool overrideNode *restic.Node overrideErr error @@ -2225,7 +2225,7 @@ type overrideFile struct { ofs *overrideFS } -func (f overrideFile) Stat() (os.FileInfo, error) { +func (f overrideFile) Stat() (*fs.ExtendedFileInfo, error) { if f.ofs.overrideFI == nil { return f.File.Stat() } @@ -2497,7 +2497,7 @@ type missingFile struct { fs.File } -func (f *missingFile) Stat() (os.FileInfo, error) { +func (f *missingFile) Stat() (*fs.ExtendedFileInfo, error) { return nil, os.ErrNotExist } diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index deeab645947..d3e87b57ec2 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -29,7 +29,7 @@ func (fi wrappedFileInfo) Mode() os.FileMode { } // wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed. -func wrapFileInfo(fi os.FileInfo) os.FileInfo { +func wrapFileInfo(fi *fs.ExtendedFileInfo) *fs.ExtendedFileInfo { // get the underlying stat_t and modify the values stat := fi.Sys().(*syscall.Stat_t) stat.Mode = mockFileInfoMode @@ -37,22 +37,22 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { stat.Gid = mockFileInfoGID // wrap the os.FileInfo so we can return a modified stat_t - res := wrappedFileInfo{ - FileInfo: fi, + return fs.ExtendedStat(wrappedFileInfo{ + FileInfo: fi.FileInfo, sys: stat, mode: mockFileInfoMode, - } - - return res + }) } // wrapIrregularFileInfo returns a new os.FileInfo with the mode changed to irregular file -func wrapIrregularFileInfo(fi os.FileInfo) os.FileInfo { +func wrapIrregularFileInfo(fi *fs.ExtendedFileInfo) *fs.ExtendedFileInfo { // wrap the os.FileInfo so we can return a modified stat_t - return wrappedFileInfo{ - FileInfo: fi, - sys: fi.Sys().(*syscall.Stat_t), - mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular, + return &fs.ExtendedFileInfo{ + FileInfo: wrappedFileInfo{ + FileInfo: fi.FileInfo, + sys: fi.Sys(), + mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular, + }, } } diff --git a/internal/archiver/archiver_windows_test.go b/internal/archiver/archiver_windows_test.go index ac8a67f2bd3..2e873c1b7e9 100644 --- a/internal/archiver/archiver_windows_test.go +++ b/internal/archiver/archiver_windows_test.go @@ -5,6 +5,8 @@ package archiver import ( "os" + + "github.com/restic/restic/internal/fs" ) type wrappedFileInfo struct { @@ -17,20 +19,20 @@ func (fi wrappedFileInfo) Mode() os.FileMode { } // wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed. -func wrapFileInfo(fi os.FileInfo) os.FileInfo { +func wrapFileInfo(fi *fs.ExtendedFileInfo) *fs.ExtendedFileInfo { // wrap the os.FileInfo and return the modified mode, uid and gid are ignored on Windows - res := wrappedFileInfo{ - FileInfo: fi, + return fs.ExtendedStat(wrappedFileInfo{ + FileInfo: fi.FileInfo, mode: mockFileInfoMode, - } - - return res + }) } // wrapIrregularFileInfo returns a new os.FileInfo with the mode changed to irregular file -func wrapIrregularFileInfo(fi os.FileInfo) os.FileInfo { - return wrappedFileInfo{ - FileInfo: fi, - mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular, +func wrapIrregularFileInfo(fi *fs.ExtendedFileInfo) *fs.ExtendedFileInfo { + return &fs.ExtendedFileInfo{ + FileInfo: wrappedFileInfo{ + FileInfo: fi.FileInfo, + mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular, + }, } } diff --git a/internal/archiver/exclude.go b/internal/archiver/exclude.go index 418517fd91c..e1939d2921a 100644 --- a/internal/archiver/exclude.go +++ b/internal/archiver/exclude.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "runtime" "strings" "sync" @@ -21,7 +22,7 @@ type RejectByNameFunc func(path string) bool // RejectFunc is a function that takes a filename and os.FileInfo of a // file that would be included in the backup. The function returns true if it // should be excluded (rejected) from the backup. -type RejectFunc func(path string, fi os.FileInfo, fs fs.FS) bool +type RejectFunc func(path string, fi *fs.ExtendedFileInfo, fs fs.FS) bool func CombineRejectByNames(funcs []RejectByNameFunc) SelectByNameFunc { return func(item string) bool { @@ -35,7 +36,7 @@ func CombineRejectByNames(funcs []RejectByNameFunc) SelectByNameFunc { } func CombineRejects(funcs []RejectFunc) SelectFunc { - return func(item string, fi os.FileInfo, fs fs.FS) bool { + return func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { for _, reject := range funcs { if reject(item, fi, fs) { return false @@ -104,7 +105,7 @@ func RejectIfPresent(excludeFileSpec string, warnf func(msg string, args ...inte } debug.Log("using %q as exclusion tagfile", tf) rc := newRejectionCache() - return func(filename string, _ os.FileInfo, fs fs.FS) bool { + return func(filename string, _ *fs.ExtendedFileInfo, fs fs.FS) bool { return isExcludedByFile(filename, tf, tc, rc, fs, warnf) }, nil } @@ -186,6 +187,10 @@ type deviceMap map[string]uint64 // newDeviceMap creates a new device map from the list of source paths. func newDeviceMap(allowedSourcePaths []string, fs fs.FS) (deviceMap, error) { + if runtime.GOOS == "windows" { + return nil, errors.New("Device IDs are not supported on Windows") + } + deviceMap := make(map[string]uint64) for _, item := range allowedSourcePaths { @@ -199,12 +204,7 @@ func newDeviceMap(allowedSourcePaths []string, fs fs.FS) (deviceMap, error) { return nil, err } - id, err := fs.DeviceID(fi) - if err != nil { - return nil, err - } - - deviceMap[item] = id + deviceMap[item] = fi.DeviceID } if len(deviceMap) == 0 { @@ -254,15 +254,8 @@ func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { } debug.Log("allowed devices: %v\n", deviceMap) - return func(item string, fi os.FileInfo, fs fs.FS) bool { - id, err := fs.DeviceID(fi) - if err != nil { - // This should never happen because gatherDevices() would have - // errored out earlier. If it still does that's a reason to panic. - panic(err) - } - - allowed, err := deviceMap.IsAllowed(fs.Clean(item), id, fs) + return func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { + allowed, err := deviceMap.IsAllowed(fs.Clean(item), fi.DeviceID, fs) if err != nil { // this should not happen panic(fmt.Sprintf("error checking device ID of %v: %v", item, err)) @@ -290,14 +283,7 @@ func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { return true } - parentDeviceID, err := fs.DeviceID(parentFI) - if err != nil { - debug.Log("item %v: getting device ID of parent directory: %v", item, err) - // if in doubt, reject - return true - } - - parentAllowed, err := deviceMap.IsAllowed(parentDir, parentDeviceID, fs) + parentAllowed, err := deviceMap.IsAllowed(parentDir, parentFI.DeviceID, fs) if err != nil { debug.Log("item %v: error checking parent directory: %v", item, err) // if in doubt, reject @@ -315,13 +301,13 @@ func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { } func RejectBySize(maxSize int64) (RejectFunc, error) { - return func(item string, fi os.FileInfo, _ fs.FS) bool { + return func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { // directory will be ignored if fi.IsDir() { return false } - filesize := fi.Size() + filesize := fi.Size if filesize > maxSize { debug.Log("file %s is oversize: %d", item, filesize) return true diff --git a/internal/archiver/exclude_test.go b/internal/archiver/exclude_test.go index 7eb24b08b6e..9bfa5d83fe6 100644 --- a/internal/archiver/exclude_test.go +++ b/internal/archiver/exclude_test.go @@ -193,7 +193,7 @@ func TestIsExcludedByFileSize(t *testing.T) { return err } - excluded := sizeExclude(p, fi, nil) + excluded := sizeExclude(p, fs.ExtendedStat(fi), nil) // the log message helps debugging in case the test fails t.Logf("%q: dir:%t; size:%d; excluded:%v", p, fi.IsDir(), fi.Size(), excluded) m[p] = !excluded diff --git a/internal/archiver/scanner.go b/internal/archiver/scanner.go index debd09aa32c..ebcca0df1bf 100644 --- a/internal/archiver/scanner.go +++ b/internal/archiver/scanner.go @@ -2,7 +2,6 @@ package archiver import ( "context" - "os" "sort" "github.com/restic/restic/internal/debug" @@ -25,7 +24,7 @@ func NewScanner(filesystem fs.FS) *Scanner { return &Scanner{ FS: filesystem, SelectByName: func(_ string) bool { return true }, - Select: func(_ string, _ os.FileInfo, _ fs.FS) bool { return true }, + Select: func(_ string, _ *fs.ExtendedFileInfo, _ fs.FS) bool { return true }, Error: func(_ string, err error) error { return err }, Result: func(_ string, _ ScanStats) {}, } @@ -121,7 +120,7 @@ func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (Sca switch { case fi.Mode().IsRegular(): stats.Files++ - stats.Bytes += uint64(fi.Size()) + stats.Bytes += uint64(fi.Size) case fi.Mode().IsDir(): names, err := fs.Readdirnames(s.FS, target, fs.O_NOFOLLOW) if err != nil { diff --git a/internal/archiver/scanner_test.go b/internal/archiver/scanner_test.go index e4e2c9f59a1..0504cb8bd8e 100644 --- a/internal/archiver/scanner_test.go +++ b/internal/archiver/scanner_test.go @@ -56,7 +56,7 @@ func TestScanner(t *testing.T) { }, }, }, - selFn: func(item string, fi os.FileInfo, fs fs.FS) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { if fi.IsDir() { return true } diff --git a/internal/fs/deviceid_unix.go b/internal/fs/deviceid_unix.go deleted file mode 100644 index 4d5593335ee..00000000000 --- a/internal/fs/deviceid_unix.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !windows -// +build !windows - -package fs - -import ( - "os" - "syscall" - - "github.com/restic/restic/internal/errors" -) - -// deviceID extracts the device ID from an os.FileInfo object by casting it -// to syscall.Stat_t -func deviceID(fi os.FileInfo) (deviceID uint64, err error) { - if fi == nil { - return 0, errors.New("unable to determine device: fi is nil") - } - - if fi.Sys() == nil { - return 0, errors.New("unable to determine device: fi.Sys() is nil") - } - - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - // st.Dev is uint32 on Darwin and uint64 on Linux. Just cast - // everything to uint64. - return uint64(st.Dev), nil - } - - return 0, errors.New("Could not cast to syscall.Stat_t") -} diff --git a/internal/fs/deviceid_windows.go b/internal/fs/deviceid_windows.go deleted file mode 100644 index bfb22dc9a08..00000000000 --- a/internal/fs/deviceid_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build windows -// +build windows - -package fs - -import ( - "os" - - "github.com/restic/restic/internal/errors" -) - -// deviceID extracts the device ID from an os.FileInfo object by casting it -// to syscall.Stat_t -func deviceID(_ os.FileInfo) (deviceID uint64, err error) { - return 0, errors.New("Device IDs are not supported on Windows") -} diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go index 5e6c72d0a3e..fc6c69cf219 100644 --- a/internal/fs/fs_local.go +++ b/internal/fs/fs_local.go @@ -36,19 +36,12 @@ func (fs Local) OpenFile(name string, flag int, metadataOnly bool) (File, error) // If the file is a symbolic link, the returned FileInfo // describes the symbolic link. Lstat makes no attempt to follow the link. // If there is an error, it will be of type *PathError. -func (fs Local) Lstat(name string) (os.FileInfo, error) { - return os.Lstat(fixpath(name)) -} - -// DeviceID extracts the DeviceID from the given FileInfo. If the fs does -// not support a DeviceID, it returns an error instead -func (fs Local) DeviceID(fi os.FileInfo) (id uint64, err error) { - return deviceID(fi) -} - -// ExtendedStat converts the give FileInfo into ExtendedFileInfo. -func (fs Local) ExtendedStat(fi os.FileInfo) ExtendedFileInfo { - return ExtendedStat(fi) +func (fs Local) Lstat(name string) (*ExtendedFileInfo, error) { + fi, err := os.Lstat(fixpath(name)) + if err != nil { + return nil, err + } + return extendedStat(fi), nil } // Join joins any number of path elements into a single path, adding a @@ -96,7 +89,7 @@ type localFile struct { name string flag int f *os.File - fi os.FileInfo + fi *ExtendedFileInfo } // See the File interface for a description of each method @@ -137,18 +130,23 @@ func (f *localFile) cacheFI() error { if f.fi != nil { return nil } + var fi os.FileInfo var err error if f.f != nil { - f.fi, err = f.f.Stat() + fi, err = f.f.Stat() } else if f.flag&O_NOFOLLOW != 0 { - f.fi, err = os.Lstat(f.name) + fi, err = os.Lstat(f.name) } else { - f.fi, err = os.Stat(f.name) + fi, err = os.Stat(f.name) } - return err + if err != nil { + return err + } + f.fi = extendedStat(fi) + return nil } -func (f *localFile) Stat() (os.FileInfo, error) { +func (f *localFile) Stat() (*ExtendedFileInfo, error) { err := f.cacheFI() // the call to cacheFI MUST happen before reading from f.fi return f.fi, err diff --git a/internal/fs/fs_local_test.go b/internal/fs/fs_local_test.go index b1e85de0a7b..74cc8b48c33 100644 --- a/internal/fs/fs_local_test.go +++ b/internal/fs/fs_local_test.go @@ -84,13 +84,13 @@ func checkMetadata(t *testing.T, f File, path string, follow bool, nodeType rest fi2, err = os.Lstat(path) } rtest.OK(t, err) - assertFIEqual(t, fi2, fi) + assertFIEqual(t, fi2, fi.FileInfo) node, err := f.ToNode(false) rtest.OK(t, err) // ModTime is likely unique per file, thus it provides a good indication that it is from the correct file - rtest.Equals(t, fi.ModTime(), node.ModTime, "node ModTime") + rtest.Equals(t, fi.ModTime, node.ModTime, "node ModTime") rtest.Equals(t, nodeType, node.Type, "node Type") } diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index fe82b85e13d..dfee31779a3 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -1,7 +1,6 @@ package fs import ( - "os" "path/filepath" "runtime" "strings" @@ -131,7 +130,7 @@ func (fs *LocalVss) OpenFile(name string, flag int, metadataOnly bool) (File, er } // Lstat wraps the Lstat method of the underlying file system. -func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { +func (fs *LocalVss) Lstat(name string) (*ExtendedFileInfo, error) { return fs.FS.Lstat(fs.snapshotPath(name)) } diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index 8728b274c71..8b76687305a 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -5,6 +5,7 @@ import ( "io" "os" "path" + "slices" "sync" "syscall" "time" @@ -40,12 +41,14 @@ func (fs *Reader) VolumeName(_ string) string { return "" } -func (fs *Reader) fi() os.FileInfo { - return fakeFileInfo{ - name: fs.Name, - size: fs.Size, - mode: fs.Mode, - modtime: fs.ModTime, +func (fs *Reader) fi() *ExtendedFileInfo { + return &ExtendedFileInfo{ + FileInfo: fakeFileInfo{ + name: fs.Name, + size: fs.Size, + mode: fs.Mode, + modtime: fs.ModTime, + }, } } @@ -68,7 +71,7 @@ func (fs *Reader) OpenFile(name string, flag int, _ bool) (f File, err error) { return f, nil case "/", ".": f = fakeDir{ - entries: []os.FileInfo{fs.fi()}, + entries: []string{fs.fi().Name()}, } return f, nil } @@ -80,15 +83,15 @@ func (fs *Reader) OpenFile(name string, flag int, _ bool) (f File, err error) { // If the file is a symbolic link, the returned FileInfo // describes the symbolic link. Lstat makes no attempt to follow the link. // If there is an error, it will be of type *os.PathError. -func (fs *Reader) Lstat(name string) (os.FileInfo, error) { - getDirInfo := func(name string) os.FileInfo { +func (fs *Reader) Lstat(name string) (*ExtendedFileInfo, error) { + getDirInfo := func(name string) *ExtendedFileInfo { fi := fakeFileInfo{ name: fs.Base(name), size: 0, mode: os.ModeDir | 0755, modtime: time.Now(), } - return fi + return &ExtendedFileInfo{FileInfo: fi} } switch name { @@ -112,16 +115,6 @@ func (fs *Reader) Lstat(name string) (os.FileInfo, error) { return nil, pathError("lstat", name, os.ErrNotExist) } -func (fs *Reader) DeviceID(_ os.FileInfo) (deviceID uint64, err error) { - return 0, errors.New("Device IDs are not supported") -} - -func (fs *Reader) ExtendedStat(fi os.FileInfo) ExtendedFileInfo { - return ExtendedFileInfo{ - FileInfo: fi, - } -} - // Join joins any number of path elements into a single path, adding a // Separator if necessary. Join calls Clean on the result; in particular, all // empty strings are ignored. On Windows, the result is a UNC path if and only @@ -165,13 +158,13 @@ func (fs *Reader) Dir(p string) string { return path.Dir(p) } -func newReaderFile(rd io.ReadCloser, fi os.FileInfo, allowEmptyFile bool) *readerFile { +func newReaderFile(rd io.ReadCloser, fi *ExtendedFileInfo, allowEmptyFile bool) *readerFile { return &readerFile{ ReadCloser: rd, AllowEmptyFile: allowEmptyFile, fakeFile: fakeFile{ - FileInfo: fi, - name: fi.Name(), + fi: fi, + name: fi.Name(), }, } } @@ -213,7 +206,7 @@ var _ File = &readerFile{} // except Stat() type fakeFile struct { name string - os.FileInfo + fi *ExtendedFileInfo } // ensure that fakeFile implements File @@ -235,12 +228,12 @@ func (f fakeFile) Close() error { return nil } -func (f fakeFile) Stat() (os.FileInfo, error) { - return f.FileInfo, nil +func (f fakeFile) Stat() (*ExtendedFileInfo, error) { + return f.fi, nil } func (f fakeFile) ToNode(_ bool) (*restic.Node, error) { - node := buildBasicNode(f.name, f.FileInfo) + node := buildBasicNode(f.name, f.fi.FileInfo) // fill minimal info with current values for uid, gid node.UID = uint32(os.Getuid()) @@ -252,7 +245,7 @@ func (f fakeFile) ToNode(_ bool) (*restic.Node, error) { // fakeDir implements Readdirnames and Readdir, everything else is delegated to fakeFile. type fakeDir struct { - entries []os.FileInfo + entries []string fakeFile } @@ -260,12 +253,7 @@ func (d fakeDir) Readdirnames(n int) ([]string, error) { if n > 0 { return nil, pathError("readdirnames", d.name, errors.New("not implemented")) } - names := make([]string, 0, len(d.entries)) - for _, entry := range d.entries { - names = append(names, entry.Name()) - } - - return names, nil + return slices.Clone(d.entries), nil } // fakeFileInfo implements the bare minimum of os.FileInfo. diff --git a/internal/fs/fs_reader_test.go b/internal/fs/fs_reader_test.go index 7e7f6e77c8e..f2e8b201390 100644 --- a/internal/fs/fs_reader_test.go +++ b/internal/fs/fs_reader_test.go @@ -60,7 +60,7 @@ func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) { } } -func checkFileInfo(t testing.TB, fi os.FileInfo, filename string, modtime time.Time, mode os.FileMode, isdir bool) { +func checkFileInfo(t testing.TB, fi *ExtendedFileInfo, filename string, modtime time.Time, mode os.FileMode, isdir bool) { if fi.IsDir() != isdir { t.Errorf("IsDir returned %t, want %t", fi.IsDir(), isdir) } @@ -69,8 +69,8 @@ func checkFileInfo(t testing.TB, fi os.FileInfo, filename string, modtime time.T t.Errorf("Mode() returned wrong value, want 0%o, got 0%o", mode, fi.Mode()) } - if !modtime.Equal(time.Time{}) && !fi.ModTime().Equal(modtime) { - t.Errorf("ModTime() returned wrong value, want %v, got %v", modtime, fi.ModTime()) + if !modtime.Equal(time.Time{}) && !fi.FileInfo.ModTime().Equal(modtime) { + t.Errorf("ModTime() returned wrong value, want %v, got %v", modtime, fi.FileInfo.ModTime()) } if path.Base(fi.Name()) != fi.Name() { diff --git a/internal/fs/interface.go b/internal/fs/interface.go index 7ff77713814..d75b0a91d20 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -2,7 +2,6 @@ package fs import ( "io" - "os" "github.com/restic/restic/internal/restic" ) @@ -18,9 +17,7 @@ type FS interface { // // Only the O_NOFOLLOW and O_DIRECTORY flags are supported. OpenFile(name string, flag int, metadataOnly bool) (File, error) - Lstat(name string) (os.FileInfo, error) - DeviceID(fi os.FileInfo) (deviceID uint64, err error) - ExtendedStat(fi os.FileInfo) ExtendedFileInfo + Lstat(name string) (*ExtendedFileInfo, error) Join(elem ...string) string Separator() string @@ -47,7 +44,7 @@ type File interface { io.Closer Readdirnames(n int) ([]string, error) - Stat() (os.FileInfo, error) + Stat() (*ExtendedFileInfo, error) // ToNode returns a restic.Node for the File. The internally used os.FileInfo // must be consistent with that returned by Stat(). In particular, the metadata // returned by consecutive calls to Stat() and ToNode() must match. diff --git a/internal/fs/node.go b/internal/fs/node.go index 065969537f8..be91562a456 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -15,15 +15,14 @@ import ( // nodeFromFileInfo returns a new node from the given path and FileInfo. It // returns the first error that is encountered, together with a node. -func nodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - node := buildBasicNode(path, fi) +func nodeFromFileInfo(path string, fi *ExtendedFileInfo, ignoreXattrListError bool) (*restic.Node, error) { + node := buildBasicNode(path, fi.FileInfo) - stat := ExtendedStat(fi) - if err := nodeFillExtendedStat(node, path, &stat); err != nil { + if err := nodeFillExtendedStat(node, path, fi); err != nil { return node, err } - err := nodeFillGenericAttributes(node, path, &stat) + err := nodeFillGenericAttributes(node, path, fi) err = errors.Join(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) return node, err } @@ -37,15 +36,15 @@ func buildBasicNode(path string, fi os.FileInfo) *restic.Node { ModTime: fi.ModTime(), } - node.Type = nodeTypeFromFileInfo(fi) + node.Type = nodeTypeFromFileInfo(fi.Mode()) if node.Type == restic.NodeTypeFile { node.Size = uint64(fi.Size()) } return node } -func nodeTypeFromFileInfo(fi os.FileInfo) restic.NodeType { - switch fi.Mode() & os.ModeType { +func nodeTypeFromFileInfo(mode os.FileMode) restic.NodeType { + switch mode & os.ModeType { case 0: return restic.NodeTypeFile case os.ModeDir: diff --git a/internal/fs/stat.go b/internal/fs/stat.go index e1006fd61b7..9e5be51e118 100644 --- a/internal/fs/stat.go +++ b/internal/fs/stat.go @@ -26,7 +26,7 @@ type ExtendedFileInfo struct { } // ExtendedStat returns an ExtendedFileInfo constructed from the os.FileInfo. -func ExtendedStat(fi os.FileInfo) ExtendedFileInfo { +func ExtendedStat(fi os.FileInfo) *ExtendedFileInfo { if fi == nil { panic("os.FileInfo is nil") } diff --git a/internal/fs/stat_bsd.go b/internal/fs/stat_bsd.go index 11e075b5098..de2254d2441 100644 --- a/internal/fs/stat_bsd.go +++ b/internal/fs/stat_bsd.go @@ -10,10 +10,10 @@ import ( ) // extendedStat extracts info into an ExtendedFileInfo for unix based operating systems. -func extendedStat(fi os.FileInfo) ExtendedFileInfo { +func extendedStat(fi os.FileInfo) *ExtendedFileInfo { s := fi.Sys().(*syscall.Stat_t) - extFI := ExtendedFileInfo{ + return &ExtendedFileInfo{ FileInfo: fi, DeviceID: uint64(s.Dev), Inode: uint64(s.Ino), @@ -29,6 +29,4 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo { ModTime: time.Unix(s.Mtimespec.Unix()), ChangeTime: time.Unix(s.Ctimespec.Unix()), } - - return extFI } diff --git a/internal/fs/stat_unix.go b/internal/fs/stat_unix.go index c555710312a..46077402f8b 100644 --- a/internal/fs/stat_unix.go +++ b/internal/fs/stat_unix.go @@ -10,10 +10,10 @@ import ( ) // extendedStat extracts info into an ExtendedFileInfo for unix based operating systems. -func extendedStat(fi os.FileInfo) ExtendedFileInfo { +func extendedStat(fi os.FileInfo) *ExtendedFileInfo { s := fi.Sys().(*syscall.Stat_t) - extFI := ExtendedFileInfo{ + return &ExtendedFileInfo{ FileInfo: fi, DeviceID: uint64(s.Dev), Inode: s.Ino, @@ -29,6 +29,4 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo { ModTime: time.Unix(s.Mtim.Unix()), ChangeTime: time.Unix(s.Ctim.Unix()), } - - return extFI } diff --git a/internal/fs/stat_windows.go b/internal/fs/stat_windows.go index 57f330fb54d..0dbc429fba2 100644 --- a/internal/fs/stat_windows.go +++ b/internal/fs/stat_windows.go @@ -11,7 +11,7 @@ import ( ) // extendedStat extracts info into an ExtendedFileInfo for Windows. -func extendedStat(fi os.FileInfo) ExtendedFileInfo { +func extendedStat(fi os.FileInfo) *ExtendedFileInfo { s, ok := fi.Sys().(*syscall.Win32FileAttributeData) if !ok { panic(fmt.Sprintf("conversion to syscall.Win32FileAttributeData failed, type is %T", fi.Sys())) @@ -31,5 +31,5 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo { // Windows does not have the concept of a "change time" in the sense Unix uses it, so we're using the LastWriteTime here. extFI.ChangeTime = extFI.ModTime - return extFI + return &extFI } From 847b2efba2ba330b873395d00d7ab34ddd2d30e6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 16 Nov 2024 16:53:34 +0100 Subject: [PATCH 833/893] archiver: remove fs parameter from fileChanged function --- internal/archiver/archiver.go | 4 ++-- internal/archiver/archiver_test.go | 15 ++++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index f730fe0a553..a89663084a0 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -510,7 +510,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous // check if the file has not changed before performing a fopen operation (more expensive, specially // in network filesystems) - if previous != nil && !fileChanged(arch.FS, fi, previous, arch.ChangeIgnoreFlags) { + if previous != nil && !fileChanged(fi, previous, arch.ChangeIgnoreFlags) { if arch.allBlobsPresent(previous) { debug.Log("%v hasn't changed, using old list of blobs", target) arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start)) @@ -618,7 +618,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous // fileChanged tries to detect whether a file's content has changed compared // to the contents of node, which describes the same path in the parent backup. // It should only be run for regular files. -func fileChanged(fs fs.FS, fi *fs.ExtendedFileInfo, node *restic.Node, ignoreFlags uint) bool { +func fileChanged(fi *fs.ExtendedFileInfo, node *restic.Node, ignoreFlags uint) bool { switch { case node == nil: return true diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 038afd11d50..0a3fba028e0 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -683,10 +683,11 @@ func TestFileChanged(t *testing.T) { save(t, filename, content) fs := &fs.Local{} - fiBefore := lstat(t, filename) + fiBefore, err := fs.Lstat(filename) + rtest.OK(t, err) node := nodeFromFile(t, fs, filename) - if fileChanged(fs, fiBefore, node, 0) { + if fileChanged(fiBefore, node, 0) { t.Fatalf("unchanged file detected as changed") } @@ -696,12 +697,12 @@ func TestFileChanged(t *testing.T) { if test.SameFile { // file should be detected as unchanged - if fileChanged(fs, fiAfter, node, test.ChangeIgnore) { + if fileChanged(fiAfter, node, test.ChangeIgnore) { t.Fatalf("unmodified file detected as changed") } } else { // file should be detected as changed - if !fileChanged(fs, fiAfter, node, test.ChangeIgnore) && !test.SameFile { + if !fileChanged(fiAfter, node, test.ChangeIgnore) && !test.SameFile { t.Fatalf("modified file detected as unchanged") } } @@ -718,7 +719,7 @@ func TestFilChangedSpecialCases(t *testing.T) { t.Run("nil-node", func(t *testing.T) { fi := lstat(t, filename) - if !fileChanged(&fs.Local{}, fi, nil, 0) { + if !fileChanged(fi, nil, 0) { t.Fatal("nil node detected as unchanged") } }) @@ -727,7 +728,7 @@ func TestFilChangedSpecialCases(t *testing.T) { fi := lstat(t, filename) node := nodeFromFile(t, &fs.Local{}, filename) node.Type = restic.NodeTypeSymlink - if !fileChanged(&fs.Local{}, fi, node, 0) { + if !fileChanged(fi, node, 0) { t.Fatal("node with changed type detected as unchanged") } }) @@ -2304,7 +2305,7 @@ func TestMetadataChanged(t *testing.T) { // modify the mode by wrapping it in a new struct, uses the consts defined above fs.overrideFI = wrapFileInfo(fi) - rtest.Assert(t, !fileChanged(fs, fs.overrideFI, node2, 0), "testfile must not be considered as changed") + rtest.Assert(t, !fileChanged(fs.overrideFI, node2, 0), "testfile must not be considered as changed") // set the override values in the 'want' node which want.Mode = 0400 From 9a99141a5fe188b59bef37795de9ac9582fd541d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 30 Nov 2024 16:58:04 +0100 Subject: [PATCH 834/893] fs: remove os.FileInfo from fs.ExtendedFileInfo Only the `Sys()` value from os.FileInfo is kept as field `sys` to support Windows. The os.FileInfo removal ensures that for values like `ModTime` that existed in both data structures there's no more confusion which value is actually used. --- internal/archiver/archiver.go | 8 +-- internal/archiver/archiver_test.go | 23 ++++++--- internal/archiver/archiver_unix_test.go | 44 ---------------- internal/archiver/archiver_windows_test.go | 38 -------------- internal/archiver/exclude.go | 4 +- internal/archiver/scanner.go | 4 +- internal/archiver/scanner_test.go | 2 +- internal/fs/fs_local_test.go | 15 +++--- internal/fs/fs_local_vss_test.go | 4 +- internal/fs/fs_reader.go | 59 +++++----------------- internal/fs/fs_reader_test.go | 20 ++++---- internal/fs/node.go | 14 ++--- internal/fs/node_windows.go | 2 +- internal/fs/stat.go | 6 ++- internal/fs/stat_bsd.go | 4 +- internal/fs/stat_unix.go | 4 +- internal/fs/stat_windows.go | 7 ++- 17 files changed, 80 insertions(+), 178 deletions(-) delete mode 100644 internal/archiver/archiver_windows_test.go diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index a89663084a0..55b6ee4b3f2 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -505,7 +505,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous } switch { - case fi.Mode().IsRegular(): + case fi.Mode.IsRegular(): debug.Log(" %v regular file", target) // check if the file has not changed before performing a fopen operation (more expensive, specially @@ -555,7 +555,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous } // make sure it's still a file - if !fi.Mode().IsRegular() { + if !fi.Mode.IsRegular() { err = errors.Errorf("file %q changed type, refusing to archive", target) return filterError(err) } @@ -571,7 +571,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous arch.trackItem(snPath, previous, node, stats, time.Since(start)) }) - case fi.IsDir(): + case fi.Mode.IsDir(): debug.Log(" %v dir", target) snItem := snPath + "/" @@ -592,7 +592,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous return futureNode{}, false, err } - case fi.Mode()&os.ModeSocket > 0: + case fi.Mode&os.ModeSocket > 0: debug.Log(" %v is a socket, ignoring", target) return futureNode{}, true, nil diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 0a3fba028e0..fcc3d465da4 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -2303,19 +2303,26 @@ func TestMetadataChanged(t *testing.T) { t.Fatalf("metadata does not match:\n%v", cmp.Diff(want, node2)) } - // modify the mode by wrapping it in a new struct, uses the consts defined above - fs.overrideFI = wrapFileInfo(fi) + // modify the mode and UID/GID + modFI := *fi + modFI.Mode = mockFileInfoMode + if runtime.GOOS != "windows" { + modFI.UID = mockFileInfoUID + modFI.GID = mockFileInfoGID + } + + fs.overrideFI = &modFI rtest.Assert(t, !fileChanged(fs.overrideFI, node2, 0), "testfile must not be considered as changed") // set the override values in the 'want' node which - want.Mode = 0400 + want.Mode = mockFileInfoMode // ignore UID and GID on Windows if runtime.GOOS != "windows" { - want.UID = 51234 - want.GID = 51235 + want.UID = mockFileInfoUID + want.GID = mockFileInfoGID } // update mock node accordingly - fs.overrideNode.Mode = 0400 + fs.overrideNode.Mode = want.Mode fs.overrideNode.UID = want.UID fs.overrideNode.GID = want.GID @@ -2456,10 +2463,12 @@ func TestIrregularFile(t *testing.T) { tempfile := filepath.Join(tempdir, "testfile") fi := lstat(t, "testfile") + // patch mode to irregular + fi.Mode = (fi.Mode &^ os.ModeType) | os.ModeIrregular override := &overrideFS{ FS: fs.Local{}, - overrideFI: wrapIrregularFileInfo(fi), + overrideFI: fi, overrideNode: &restic.Node{ Type: restic.NodeTypeIrregular, }, diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index d3e87b57ec2..b6cc1ba4ecc 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -4,8 +4,6 @@ package archiver import ( - "os" - "syscall" "testing" "github.com/restic/restic/internal/feature" @@ -14,48 +12,6 @@ import ( rtest "github.com/restic/restic/internal/test" ) -type wrappedFileInfo struct { - os.FileInfo - sys interface{} - mode os.FileMode -} - -func (fi wrappedFileInfo) Sys() interface{} { - return fi.sys -} - -func (fi wrappedFileInfo) Mode() os.FileMode { - return fi.mode -} - -// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed. -func wrapFileInfo(fi *fs.ExtendedFileInfo) *fs.ExtendedFileInfo { - // get the underlying stat_t and modify the values - stat := fi.Sys().(*syscall.Stat_t) - stat.Mode = mockFileInfoMode - stat.Uid = mockFileInfoUID - stat.Gid = mockFileInfoGID - - // wrap the os.FileInfo so we can return a modified stat_t - return fs.ExtendedStat(wrappedFileInfo{ - FileInfo: fi.FileInfo, - sys: stat, - mode: mockFileInfoMode, - }) -} - -// wrapIrregularFileInfo returns a new os.FileInfo with the mode changed to irregular file -func wrapIrregularFileInfo(fi *fs.ExtendedFileInfo) *fs.ExtendedFileInfo { - // wrap the os.FileInfo so we can return a modified stat_t - return &fs.ExtendedFileInfo{ - FileInfo: wrappedFileInfo{ - FileInfo: fi.FileInfo, - sys: fi.Sys(), - mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular, - }, - } -} - func statAndSnapshot(t *testing.T, repo archiverRepo, name string) (*restic.Node, *restic.Node) { want := nodeFromFile(t, &fs.Local{}, name) _, node := snapshot(t, repo, &fs.Local{}, nil, name) diff --git a/internal/archiver/archiver_windows_test.go b/internal/archiver/archiver_windows_test.go deleted file mode 100644 index 2e873c1b7e9..00000000000 --- a/internal/archiver/archiver_windows_test.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build windows -// +build windows - -package archiver - -import ( - "os" - - "github.com/restic/restic/internal/fs" -) - -type wrappedFileInfo struct { - os.FileInfo - mode os.FileMode -} - -func (fi wrappedFileInfo) Mode() os.FileMode { - return fi.mode -} - -// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed. -func wrapFileInfo(fi *fs.ExtendedFileInfo) *fs.ExtendedFileInfo { - // wrap the os.FileInfo and return the modified mode, uid and gid are ignored on Windows - return fs.ExtendedStat(wrappedFileInfo{ - FileInfo: fi.FileInfo, - mode: mockFileInfoMode, - }) -} - -// wrapIrregularFileInfo returns a new os.FileInfo with the mode changed to irregular file -func wrapIrregularFileInfo(fi *fs.ExtendedFileInfo) *fs.ExtendedFileInfo { - return &fs.ExtendedFileInfo{ - FileInfo: wrappedFileInfo{ - FileInfo: fi.FileInfo, - mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular, - }, - } -} diff --git a/internal/archiver/exclude.go b/internal/archiver/exclude.go index e1939d2921a..6db62aa2025 100644 --- a/internal/archiver/exclude.go +++ b/internal/archiver/exclude.go @@ -267,7 +267,7 @@ func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { } // reject everything except directories - if !fi.IsDir() { + if !fi.Mode.IsDir() { return true } @@ -303,7 +303,7 @@ func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { func RejectBySize(maxSize int64) (RejectFunc, error) { return func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { // directory will be ignored - if fi.IsDir() { + if fi.Mode.IsDir() { return false } diff --git a/internal/archiver/scanner.go b/internal/archiver/scanner.go index ebcca0df1bf..2e6b7210ce3 100644 --- a/internal/archiver/scanner.go +++ b/internal/archiver/scanner.go @@ -118,10 +118,10 @@ func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (Sca } switch { - case fi.Mode().IsRegular(): + case fi.Mode.IsRegular(): stats.Files++ stats.Bytes += uint64(fi.Size) - case fi.Mode().IsDir(): + case fi.Mode.IsDir(): names, err := fs.Readdirnames(s.FS, target, fs.O_NOFOLLOW) if err != nil { return stats, s.Error(target, err) diff --git a/internal/archiver/scanner_test.go b/internal/archiver/scanner_test.go index 0504cb8bd8e..a47952388ff 100644 --- a/internal/archiver/scanner_test.go +++ b/internal/archiver/scanner_test.go @@ -57,7 +57,7 @@ func TestScanner(t *testing.T) { }, }, selFn: func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { - if fi.IsDir() { + if fi.Mode.IsDir() { return true } diff --git a/internal/fs/fs_local_test.go b/internal/fs/fs_local_test.go index 74cc8b48c33..8fd8eb136e7 100644 --- a/internal/fs/fs_local_test.go +++ b/internal/fs/fs_local_test.go @@ -84,7 +84,7 @@ func checkMetadata(t *testing.T, f File, path string, follow bool, nodeType rest fi2, err = os.Lstat(path) } rtest.OK(t, err) - assertFIEqual(t, fi2, fi.FileInfo) + assertFIEqual(t, fi2, fi) node, err := f.ToNode(false) rtest.OK(t, err) @@ -94,13 +94,12 @@ func checkMetadata(t *testing.T, f File, path string, follow bool, nodeType rest rtest.Equals(t, nodeType, node.Type, "node Type") } -func assertFIEqual(t *testing.T, want os.FileInfo, got os.FileInfo) { +func assertFIEqual(t *testing.T, want os.FileInfo, got *ExtendedFileInfo) { t.Helper() - rtest.Equals(t, want.Name(), got.Name(), "Name") - rtest.Equals(t, want.IsDir(), got.IsDir(), "IsDir") - rtest.Equals(t, want.ModTime(), got.ModTime(), "ModTime") - rtest.Equals(t, want.Mode(), got.Mode(), "Mode") - rtest.Equals(t, want.Size(), got.Size(), "Size") + rtest.Equals(t, want.Name(), got.Name, "Name") + rtest.Equals(t, want.ModTime(), got.ModTime, "ModTime") + rtest.Equals(t, want.Mode(), got.Mode, "Mode") + rtest.Equals(t, want.Size(), got.Size, "Size") } func TestFSLocalRead(t *testing.T) { @@ -206,7 +205,7 @@ func TestFSLocalTypeChange(t *testing.T) { fi, err := f.Stat() rtest.OK(t, err) - if !fi.IsDir() { + if !fi.Mode.IsDir() { // a file handle based implementation should still reference the file checkMetadata(t, f, pathNew, false, restic.NodeTypeFile) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index 33c412fe9dc..b64897d1cbe 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -325,7 +325,7 @@ func TestVSSFS(t *testing.T) { lstatFi, err := localVss.Lstat(tempfile) rtest.OK(t, err) - rtest.Equals(t, origFi.Mode(), lstatFi.Mode()) + rtest.Equals(t, origFi.Mode, lstatFi.Mode) f, err := localVss.OpenFile(tempfile, os.O_RDONLY, false) rtest.OK(t, err) @@ -335,7 +335,7 @@ func TestVSSFS(t *testing.T) { node, err := f.ToNode(false) rtest.OK(t, err) - rtest.Equals(t, node.Mode, lstatFi.Mode()) + rtest.Equals(t, node.Mode, lstatFi.Mode) rtest.OK(t, f.Close()) } diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index 8b76687305a..bbe5c95abed 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -43,12 +43,10 @@ func (fs *Reader) VolumeName(_ string) string { func (fs *Reader) fi() *ExtendedFileInfo { return &ExtendedFileInfo{ - FileInfo: fakeFileInfo{ - name: fs.Name, - size: fs.Size, - mode: fs.Mode, - modtime: fs.ModTime, - }, + Name: fs.Name, + Mode: fs.Mode, + ModTime: fs.ModTime, + Size: fs.Size, } } @@ -71,7 +69,7 @@ func (fs *Reader) OpenFile(name string, flag int, _ bool) (f File, err error) { return f, nil case "/", ".": f = fakeDir{ - entries: []string{fs.fi().Name()}, + entries: []string{fs.fi().Name}, } return f, nil } @@ -85,13 +83,12 @@ func (fs *Reader) OpenFile(name string, flag int, _ bool) (f File, err error) { // If there is an error, it will be of type *os.PathError. func (fs *Reader) Lstat(name string) (*ExtendedFileInfo, error) { getDirInfo := func(name string) *ExtendedFileInfo { - fi := fakeFileInfo{ - name: fs.Base(name), - size: 0, - mode: os.ModeDir | 0755, - modtime: time.Now(), + return &ExtendedFileInfo{ + Name: fs.Base(name), + Size: 0, + Mode: os.ModeDir | 0755, + ModTime: time.Now(), } - return &ExtendedFileInfo{FileInfo: fi} } switch name { @@ -164,7 +161,7 @@ func newReaderFile(rd io.ReadCloser, fi *ExtendedFileInfo, allowEmptyFile bool) AllowEmptyFile: allowEmptyFile, fakeFile: fakeFile{ fi: fi, - name: fi.Name(), + name: fi.Name, }, } } @@ -233,7 +230,7 @@ func (f fakeFile) Stat() (*ExtendedFileInfo, error) { } func (f fakeFile) ToNode(_ bool) (*restic.Node, error) { - node := buildBasicNode(f.name, f.fi.FileInfo) + node := buildBasicNode(f.name, f.fi) // fill minimal info with current values for uid, gid node.UID = uint32(os.Getuid()) @@ -256,38 +253,6 @@ func (d fakeDir) Readdirnames(n int) ([]string, error) { return slices.Clone(d.entries), nil } -// fakeFileInfo implements the bare minimum of os.FileInfo. -type fakeFileInfo struct { - name string - size int64 - mode os.FileMode - modtime time.Time -} - -func (fi fakeFileInfo) Name() string { - return fi.name -} - -func (fi fakeFileInfo) Size() int64 { - return fi.size -} - -func (fi fakeFileInfo) Mode() os.FileMode { - return fi.mode -} - -func (fi fakeFileInfo) ModTime() time.Time { - return fi.modtime -} - -func (fi fakeFileInfo) IsDir() bool { - return fi.mode&os.ModeDir > 0 -} - -func (fi fakeFileInfo) Sys() interface{} { - return nil -} - func pathError(op, name string, err error) *os.PathError { return &os.PathError{Op: op, Path: name, Err: err} } diff --git a/internal/fs/fs_reader_test.go b/internal/fs/fs_reader_test.go index f2e8b201390..257bfbbac76 100644 --- a/internal/fs/fs_reader_test.go +++ b/internal/fs/fs_reader_test.go @@ -61,24 +61,24 @@ func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) { } func checkFileInfo(t testing.TB, fi *ExtendedFileInfo, filename string, modtime time.Time, mode os.FileMode, isdir bool) { - if fi.IsDir() != isdir { - t.Errorf("IsDir returned %t, want %t", fi.IsDir(), isdir) + if fi.Mode.IsDir() != isdir { + t.Errorf("IsDir returned %t, want %t", fi.Mode.IsDir(), isdir) } - if fi.Mode() != mode { - t.Errorf("Mode() returned wrong value, want 0%o, got 0%o", mode, fi.Mode()) + if fi.Mode != mode { + t.Errorf("Mode has wrong value, want 0%o, got 0%o", mode, fi.Mode) } - if !modtime.Equal(time.Time{}) && !fi.FileInfo.ModTime().Equal(modtime) { - t.Errorf("ModTime() returned wrong value, want %v, got %v", modtime, fi.FileInfo.ModTime()) + if !modtime.Equal(time.Time{}) && !fi.ModTime.Equal(modtime) { + t.Errorf("ModTime has wrong value, want %v, got %v", modtime, fi.ModTime) } - if path.Base(fi.Name()) != fi.Name() { - t.Errorf("Name() returned is not base, want %q, got %q", path.Base(fi.Name()), fi.Name()) + if path.Base(fi.Name) != fi.Name { + t.Errorf("Name is not base, want %q, got %q", path.Base(fi.Name), fi.Name) } - if fi.Name() != path.Base(filename) { - t.Errorf("Name() returned wrong value, want %q, got %q", path.Base(filename), fi.Name()) + if fi.Name != path.Base(filename) { + t.Errorf("Name has wrong value, want %q, got %q", path.Base(filename), fi.Name) } } diff --git a/internal/fs/node.go b/internal/fs/node.go index be91562a456..058d9cc7b8b 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -16,7 +16,7 @@ import ( // nodeFromFileInfo returns a new node from the given path and FileInfo. It // returns the first error that is encountered, together with a node. func nodeFromFileInfo(path string, fi *ExtendedFileInfo, ignoreXattrListError bool) (*restic.Node, error) { - node := buildBasicNode(path, fi.FileInfo) + node := buildBasicNode(path, fi) if err := nodeFillExtendedStat(node, path, fi); err != nil { return node, err @@ -27,18 +27,18 @@ func nodeFromFileInfo(path string, fi *ExtendedFileInfo, ignoreXattrListError bo return node, err } -func buildBasicNode(path string, fi os.FileInfo) *restic.Node { +func buildBasicNode(path string, fi *ExtendedFileInfo) *restic.Node { mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky node := &restic.Node{ Path: path, - Name: fi.Name(), - Mode: fi.Mode() & mask, - ModTime: fi.ModTime(), + Name: fi.Name, + Mode: fi.Mode & mask, + ModTime: fi.ModTime, } - node.Type = nodeTypeFromFileInfo(fi.Mode()) + node.Type = nodeTypeFromFileInfo(fi.Mode) if node.Type == restic.NodeTypeFile { - node.Size = uint64(fi.Size()) + node.Size = uint64(fi.Size) } return node } diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index c0f8b08b09b..74cf6c0e5c3 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -361,7 +361,7 @@ func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFil } } - winFI := stat.Sys().(*syscall.Win32FileAttributeData) + winFI := stat.sys.(*syscall.Win32FileAttributeData) // Add Windows attributes node.GenericAttributes, err = restic.WindowsAttrsToGenericAttributes(restic.WindowsAttributes{ diff --git a/internal/fs/stat.go b/internal/fs/stat.go index 9e5be51e118..bd3993f415b 100644 --- a/internal/fs/stat.go +++ b/internal/fs/stat.go @@ -8,7 +8,8 @@ import ( // ExtendedFileInfo is an extended stat_t, filled with attributes that are // supported by most operating systems. The original FileInfo is embedded. type ExtendedFileInfo struct { - os.FileInfo + Name string + Mode os.FileMode DeviceID uint64 // ID of device containing the file Inode uint64 // Inode number @@ -23,6 +24,9 @@ type ExtendedFileInfo struct { AccessTime time.Time // last access time stamp ModTime time.Time // last (content) modification time stamp ChangeTime time.Time // last status change time stamp + + // nolint:unused // only used on Windows + sys any // Value returned by os.FileInfo.Sys() } // ExtendedStat returns an ExtendedFileInfo constructed from the os.FileInfo. diff --git a/internal/fs/stat_bsd.go b/internal/fs/stat_bsd.go index de2254d2441..16506415306 100644 --- a/internal/fs/stat_bsd.go +++ b/internal/fs/stat_bsd.go @@ -14,7 +14,9 @@ func extendedStat(fi os.FileInfo) *ExtendedFileInfo { s := fi.Sys().(*syscall.Stat_t) return &ExtendedFileInfo{ - FileInfo: fi, + Name: fi.Name(), + Mode: fi.Mode(), + DeviceID: uint64(s.Dev), Inode: uint64(s.Ino), Links: uint64(s.Nlink), diff --git a/internal/fs/stat_unix.go b/internal/fs/stat_unix.go index 46077402f8b..723ac8b1978 100644 --- a/internal/fs/stat_unix.go +++ b/internal/fs/stat_unix.go @@ -14,7 +14,9 @@ func extendedStat(fi os.FileInfo) *ExtendedFileInfo { s := fi.Sys().(*syscall.Stat_t) return &ExtendedFileInfo{ - FileInfo: fi, + Name: fi.Name(), + Mode: fi.Mode(), + DeviceID: uint64(s.Dev), Inode: s.Ino, Links: uint64(s.Nlink), diff --git a/internal/fs/stat_windows.go b/internal/fs/stat_windows.go index 0dbc429fba2..a2dfa5f6d5d 100644 --- a/internal/fs/stat_windows.go +++ b/internal/fs/stat_windows.go @@ -18,8 +18,11 @@ func extendedStat(fi os.FileInfo) *ExtendedFileInfo { } extFI := ExtendedFileInfo{ - FileInfo: fi, - Size: int64(s.FileSizeLow) | (int64(s.FileSizeHigh) << 32), + Name: fi.Name(), + Mode: fi.Mode(), + + Size: int64(s.FileSizeLow) | (int64(s.FileSizeHigh) << 32), + sys: fi.Sys(), } atime := syscall.NsecToTimespec(s.LastAccessTime.Nanoseconds()) From 10625465639ab38fee8cc025d6b6fd96ed94f11b Mon Sep 17 00:00:00 2001 From: Daniel Danner Date: Sat, 30 Nov 2024 17:52:29 +0100 Subject: [PATCH 835/893] Mention `size` --- cmd/restic/cmd_prune.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 75a6ffa72c1..2137147996c 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -74,7 +74,7 @@ func init() { func addPruneOptions(c *cobra.Command, pruneOptions *PruneOptions) { f := c.Flags() f.StringVar(&pruneOptions.MaxUnused, "max-unused", "5%", "tolerate given `limit` of unused data (absolute value in bytes with suffixes k/K, m/M, g/G, t/T, a value in % or the word 'unlimited')") - f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "stop after repacking this much data in total (allowed suffixes: k/K, m/M, g/G, t/T)") + f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "stop after repacking this much data in total (allowed suffixes for `size`: k/K, m/M, g/G, t/T)") f.BoolVar(&pruneOptions.RepackCacheableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable") f.BoolVar(&pruneOptions.RepackSmall, "repack-small", false, "repack pack files below 80% of target pack size") f.BoolVar(&pruneOptions.RepackUncompressed, "repack-uncompressed", false, "repack all uncompressed data") From ead57ec5015fddc72f55b8321f4eba6e4ff40da1 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 30 Nov 2024 21:17:06 +0100 Subject: [PATCH 836/893] bump backblaze/blazer to v0.7.1 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 007fbd255dc..47936412ccb 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 - github.com/Backblaze/blazer v0.6.1 + github.com/Backblaze/blazer v0.7.1 github.com/anacrolix/fuse v0.3.1 github.com/cenkalti/backoff/v4 v4.3.0 github.com/cespare/xxhash/v2 v2.3.0 diff --git a/go.sum b/go.sum index 25b0e06988b..1487fdd707f 100644 --- a/go.sum +++ b/go.sum @@ -29,8 +29,8 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s= -github.com/Backblaze/blazer v0.6.1/go.mod h1:7/jrGx4O6OKOto6av+hLwelPR8rwZ+PLxQ5ZOiYAjwY= +github.com/Backblaze/blazer v0.7.1 h1:J43PbFj6hXLg1jvCNr+rQoAsxzKK0IP7ftl1ReCwpcQ= +github.com/Backblaze/blazer v0.7.1/go.mod h1:MhntL1nMpIuoqrPP6TnZu/xTydMgOAe/Xm6KongbjKs= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74= github.com/anacrolix/envpprof v1.3.0 h1:WJt9bpuT7A/CDCxPOv/eeZqHWlle/Y0keJUvc6tcJDk= From 098db935f7f1795a373acf11ba68cfbefa252abf Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 30 Nov 2024 21:22:51 +0100 Subject: [PATCH 837/893] Stabilize `explicit-s3-anonymous-auth` and `safe-forget-keep-tags` flags The features can no longer be disabled. --- changelog/unreleased/pull-5162 | 7 +++++++ cmd/restic/cmd_forget.go | 3 +-- internal/backend/s3/s3.go | 7 +------ internal/feature/registry.go | 4 ++-- 4 files changed, 11 insertions(+), 10 deletions(-) create mode 100644 changelog/unreleased/pull-5162 diff --git a/changelog/unreleased/pull-5162 b/changelog/unreleased/pull-5162 new file mode 100644 index 00000000000..18e6be2b604 --- /dev/null +++ b/changelog/unreleased/pull-5162 @@ -0,0 +1,7 @@ +Change: Promote feature flags + +The `explicit-s3-anonymous-auth` and `safe-forget-keep-tags` features are +now stable and can no longer be disabled. The feature flags will be removed +in restic 0.19.0. + +https://github.com/restic/restic/pull/5162 diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 58a9d25b71d..f770dc36569 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -8,7 +8,6 @@ import ( "strconv" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" @@ -271,7 +270,7 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy) - if feature.Flag.Enabled(feature.SafeForgetKeepTags) && !policy.Empty() && len(keep) == 0 { + if !policy.Empty() && len(keep) == 0 { return fmt.Errorf("refusing to delete last snapshot of snapshot group \"%v\"", key.String()) } if len(keep) != 0 && !gopts.Quiet && !gopts.JSON { diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index 2176d289d87..e3d4cc499dc 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -129,12 +129,7 @@ func getCredentials(cfg Config, tr http.RoundTripper) (*credentials.Credentials, // Fail if no credentials were found to prevent repeated attempts to (unsuccessfully) retrieve new credentials. // The first attempt still has to timeout which slows down restic usage considerably. Thus, migrate towards forcing // users to explicitly decide between authenticated and anonymous access. - if feature.Flag.Enabled(feature.ExplicitS3AnonymousAuth) { - return nil, fmt.Errorf("no credentials found. Use `-o s3.unsafe-anonymous-auth=true` for anonymous authentication") - } - - debug.Log("using anonymous access for %#v", cfg.Endpoint) - creds = credentials.New(&credentials.Static{}) + return nil, fmt.Errorf("no credentials found. Use `-o s3.unsafe-anonymous-auth=true` for anonymous authentication") } roleArn := os.Getenv("RESTIC_AWS_ASSUME_ROLE_ARN") diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 7fe7da96514..0773ea13650 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -15,7 +15,7 @@ func init() { Flag.SetFlags(map[FlagName]FlagDesc{ BackendErrorRedesign: {Type: Beta, Description: "enforce timeouts for stuck HTTP requests and use new backend error handling design."}, DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, - ExplicitS3AnonymousAuth: {Type: Beta, Description: "forbid anonymous S3 authentication unless `-o s3.unsafe-anonymous-auth=true` is set"}, - SafeForgetKeepTags: {Type: Beta, Description: "prevent deleting all snapshots if the tag passed to `forget --keep-tags tagname` does not exist"}, + ExplicitS3AnonymousAuth: {Type: Stable, Description: "forbid anonymous S3 authentication unless `-o s3.unsafe-anonymous-auth=true` is set"}, + SafeForgetKeepTags: {Type: Stable, Description: "prevent deleting all snapshots if the tag passed to `forget --keep-tags tagname` does not exist"}, }) } From a4d776ec8fdf6e6f8bed0b7e2547b1f762a23e14 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Dec 2024 01:41:13 +0000 Subject: [PATCH 838/893] build(deps): bump golang.org/x/text from 0.19.0 to 0.20.0 Bumps [golang.org/x/text](https://github.com/golang/text) from 0.19.0 to 0.20.0. - [Release notes](https://github.com/golang/text/releases) - [Commits](https://github.com/golang/text/compare/v0.19.0...v0.20.0) --- updated-dependencies: - dependency-name: golang.org/x/text dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 007fbd255dc..3ed991e2d89 100644 --- a/go.mod +++ b/go.mod @@ -28,10 +28,10 @@ require ( golang.org/x/crypto v0.28.0 golang.org/x/net v0.30.0 golang.org/x/oauth2 v0.23.0 - golang.org/x/sync v0.8.0 + golang.org/x/sync v0.9.0 golang.org/x/sys v0.26.0 golang.org/x/term v0.25.0 - golang.org/x/text v0.19.0 + golang.org/x/text v0.20.0 golang.org/x/time v0.7.0 google.golang.org/api v0.204.0 ) diff --git a/go.sum b/go.sum index 25b0e06988b..556dfa9b964 100644 --- a/go.sum +++ b/go.sum @@ -268,8 +268,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -299,8 +299,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 2dc00cfd368d9d53fdc0e551325686077993573b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Dec 2024 11:45:54 +0000 Subject: [PATCH 839/893] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/storage/azblob Bumps [github.com/Azure/azure-sdk-for-go/sdk/storage/azblob](https://github.com/Azure/azure-sdk-for-go) from 1.4.0 to 1.5.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.4.0...sdk/azcore/v1.5.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/storage/azblob dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 47936412ccb..df92516b604 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ require ( cloud.google.com/go/storage v1.43.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 github.com/Backblaze/blazer v0.7.1 github.com/anacrolix/fuse v0.3.1 github.com/cenkalti/backoff/v4 v4.3.0 diff --git a/go.sum b/go.sum index 1487fdd707f..e9dcd47011a 100644 --- a/go.sum +++ b/go.sum @@ -23,8 +23,8 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xP github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 h1:Be6KInmFEKV81c0pOAEbRYehLMwmmGI1exuFj248AMk= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0/go.mod h1:WCPBHsOXfBVnivScjs2ypRfimjEW0qPVLGgJkZlrIOA= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= From 40987a5f8051e4a297934cecc382ee56641fa581 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Dec 2024 11:48:44 +0000 Subject: [PATCH 840/893] build(deps): bump golang.org/x/sys from 0.26.0 to 0.27.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.26.0 to 0.27.0. - [Commits](https://github.com/golang/sys/compare/v0.26.0...v0.27.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index df86021e534..c70b34682fe 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( golang.org/x/net v0.30.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.9.0 - golang.org/x/sys v0.26.0 + golang.org/x/sys v0.27.0 golang.org/x/term v0.25.0 golang.org/x/text v0.20.0 golang.org/x/time v0.7.0 diff --git a/go.sum b/go.sum index ff04127463e..c9b960b359c 100644 --- a/go.sum +++ b/go.sum @@ -286,8 +286,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From 770841f95d906dfbba80fff6a16031a0c158364d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Dec 2024 12:02:01 +0000 Subject: [PATCH 841/893] build(deps): bump github.com/pkg/sftp from 1.13.6 to 1.13.7 Bumps [github.com/pkg/sftp](https://github.com/pkg/sftp) from 1.13.6 to 1.13.7. - [Release notes](https://github.com/pkg/sftp/releases) - [Commits](https://github.com/pkg/sftp/compare/v1.13.6...v1.13.7) --- updated-dependencies: - dependency-name: github.com/pkg/sftp dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 20 +++++++++++++++----- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 17a390369e5..54462ea9b1b 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/peterbourgon/unixtransport v0.0.4 github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 - github.com/pkg/sftp v1.13.6 + github.com/pkg/sftp v1.13.7 github.com/pkg/xattr v0.4.10 github.com/restic/chunker v0.4.0 github.com/spf13/cobra v1.8.1 diff --git a/go.sum b/go.sum index e100df21697..bb125f0c0dd 100644 --- a/go.sum +++ b/go.sum @@ -170,8 +170,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= -github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/pkg/sftp v1.13.7 h1:uv+I3nNJvlKZIQGSr8JVQLNHFU9YhhNpvC14Y6KgmSM= +github.com/pkg/sftp v1.13.7/go.mod h1:KMKI0t3T6hfA+lTR/ssZdunHo+uwq7ghoN09/FSu3DY= github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA= github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -232,7 +232,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -245,6 +245,7 @@ golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -255,8 +256,9 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -285,13 +287,17 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -299,6 +305,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= @@ -312,6 +321,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From d7d9af4c9f7a583fc2b479f90f8ae244cc20e45c Mon Sep 17 00:00:00 2001 From: Srigovind Nayak <5201843+konidev20@users.noreply.github.com> Date: Sun, 1 Dec 2024 19:59:11 +0530 Subject: [PATCH 842/893] ui: restore --delete indicates number of deleted files (#5100) * ui: restore --delete indicates number of deleted files * adds new field `FilesDeleted` to the State struct, JSON and text progress updaters * increment FilesDeleted count when ReportedDeletedFile * ui: collect the files to be deleted, delete, then update the count post deletion * docs: update scripting output fields for restore command ui: report deleted directories and refactor function name to ReportDeletion --- changelog/unreleased/issue-5092 | 8 ++++++++ doc/075_scripting.rst | 4 ++++ internal/restorer/restorer.go | 20 +++++++++++++++++++- internal/ui/restore/json.go | 4 ++++ internal/ui/restore/json_test.go | 10 +++++----- internal/ui/restore/progress.go | 5 ++++- internal/ui/restore/progress_test.go | 18 +++++++++--------- internal/ui/restore/text.go | 6 ++++++ internal/ui/restore/text_test.go | 10 +++++----- 9 files changed, 64 insertions(+), 21 deletions(-) create mode 100644 changelog/unreleased/issue-5092 diff --git a/changelog/unreleased/issue-5092 b/changelog/unreleased/issue-5092 new file mode 100644 index 00000000000..b6a32b68bfc --- /dev/null +++ b/changelog/unreleased/issue-5092 @@ -0,0 +1,8 @@ +Enhancement: Indicate the of deleted files/directories during restore + +Restic now indicates the number of deleted files/directories during restore. +The `--json` output now includes a `files_deleted` field that shows the number +of files and directories that were deleted during restore. + +https://github.com/restic/restic/issues/5092 +https://github.com/restic/restic/pull/5100 diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 88fc1f35b0f..39a6dbc7fcb 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -563,6 +563,8 @@ Status +----------------------+------------------------------------------------------------+ |``files_skipped`` | Files skipped due to overwrite setting | +----------------------+------------------------------------------------------------+ +|``files_deleted`` | Files deleted | ++----------------------+------------------------------------------------------------+ |``total_bytes`` | Total number of bytes in restore set | +----------------------+------------------------------------------------------------+ |``bytes_restored`` | Number of bytes restored | @@ -615,6 +617,8 @@ Summary +----------------------+------------------------------------------------------------+ |``files_skipped`` | Files skipped due to overwrite setting | +----------------------+------------------------------------------------------------+ +|``files_deleted`` | Files deleted | ++----------------------+------------------------------------------------------------+ |``total_bytes`` | Total number of bytes in restore set | +----------------------+------------------------------------------------------------+ |``bytes_restored`` | Number of bytes restored | diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index f28cd0ba3f7..14a8edeaca7 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -511,12 +511,30 @@ func (res *Restorer) removeUnexpectedFiles(ctx context.Context, target, location selectedForRestore, _ := res.SelectFilter(nodeLocation, false) // only delete files that were selected for restore if selectedForRestore { - res.opts.Progress.ReportDeletedFile(nodeLocation) + // First collect all files that will be deleted + var filesToDelete []string + err := filepath.Walk(nodeTarget, func(path string, _ os.FileInfo, err error) error { + if err != nil { + return err + } + filesToDelete = append(filesToDelete, path) + return nil + }) + if err != nil { + return err + } + if !res.opts.DryRun { + // Perform the deletion if err := fs.RemoveAll(nodeTarget); err != nil { return err } } + + // Report paths as deleted only after successful removal + for i := len(filesToDelete) - 1; i >= 0; i-- { + res.opts.Progress.ReportDeletion(filesToDelete[i]) + } } } diff --git a/internal/ui/restore/json.go b/internal/ui/restore/json.go index 72cc38a6e13..f7f7bdd1f52 100644 --- a/internal/ui/restore/json.go +++ b/internal/ui/restore/json.go @@ -33,6 +33,7 @@ func (t *jsonPrinter) Update(p State, duration time.Duration) { TotalFiles: p.FilesTotal, FilesRestored: p.FilesFinished, FilesSkipped: p.FilesSkipped, + FilesDeleted: p.FilesDeleted, TotalBytes: p.AllBytesTotal, BytesRestored: p.AllBytesWritten, BytesSkipped: p.AllBytesSkipped, @@ -94,6 +95,7 @@ func (t *jsonPrinter) Finish(p State, duration time.Duration) { TotalFiles: p.FilesTotal, FilesRestored: p.FilesFinished, FilesSkipped: p.FilesSkipped, + FilesDeleted: p.FilesDeleted, TotalBytes: p.AllBytesTotal, BytesRestored: p.AllBytesWritten, BytesSkipped: p.AllBytesSkipped, @@ -108,6 +110,7 @@ type statusUpdate struct { TotalFiles uint64 `json:"total_files,omitempty"` FilesRestored uint64 `json:"files_restored,omitempty"` FilesSkipped uint64 `json:"files_skipped,omitempty"` + FilesDeleted uint64 `json:"files_deleted,omitempty"` TotalBytes uint64 `json:"total_bytes,omitempty"` BytesRestored uint64 `json:"bytes_restored,omitempty"` BytesSkipped uint64 `json:"bytes_skipped,omitempty"` @@ -137,6 +140,7 @@ type summaryOutput struct { TotalFiles uint64 `json:"total_files,omitempty"` FilesRestored uint64 `json:"files_restored,omitempty"` FilesSkipped uint64 `json:"files_skipped,omitempty"` + FilesDeleted uint64 `json:"files_deleted,omitempty"` TotalBytes uint64 `json:"total_bytes,omitempty"` BytesRestored uint64 `json:"bytes_restored,omitempty"` BytesSkipped uint64 `json:"bytes_skipped,omitempty"` diff --git a/internal/ui/restore/json_test.go b/internal/ui/restore/json_test.go index 917a48070dd..c7096c24658 100644 --- a/internal/ui/restore/json_test.go +++ b/internal/ui/restore/json_test.go @@ -17,31 +17,31 @@ func createJSONProgress() (*ui.MockTerminal, ProgressPrinter) { func TestJSONPrintUpdate(t *testing.T) { term, printer := createJSONProgress() - printer.Update(State{3, 11, 0, 29, 47, 0}, 5*time.Second) + printer.Update(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.Output) } func TestJSONPrintUpdateWithSkipped(t *testing.T) { term, printer := createJSONProgress() - printer.Update(State{3, 11, 2, 29, 47, 59}, 5*time.Second) + printer.Update(State{3, 11, 2, 0, 29, 47, 59}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":29,\"bytes_skipped\":59}\n"}, term.Output) } func TestJSONPrintSummaryOnSuccess(t *testing.T) { term, printer := createJSONProgress() - printer.Finish(State{11, 11, 0, 47, 47, 0}, 5*time.Second) + printer.Finish(State{11, 11, 0, 0, 47, 47, 0}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"total_bytes\":47,\"bytes_restored\":47}\n"}, term.Output) } func TestJSONPrintSummaryOnErrors(t *testing.T) { term, printer := createJSONProgress() - printer.Finish(State{3, 11, 0, 29, 47, 0}, 5*time.Second) + printer.Finish(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.Output) } func TestJSONPrintSummaryOnSuccessWithSkipped(t *testing.T) { term, printer := createJSONProgress() - printer.Finish(State{11, 11, 2, 47, 47, 59}, 5*time.Second) + printer.Finish(State{11, 11, 2, 0, 47, 47, 59}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":47,\"bytes_skipped\":59}\n"}, term.Output) } diff --git a/internal/ui/restore/progress.go b/internal/ui/restore/progress.go index 06f4c86aa11..41367f34651 100644 --- a/internal/ui/restore/progress.go +++ b/internal/ui/restore/progress.go @@ -11,6 +11,7 @@ type State struct { FilesFinished uint64 FilesTotal uint64 FilesSkipped uint64 + FilesDeleted uint64 AllBytesWritten uint64 AllBytesTotal uint64 AllBytesSkipped uint64 @@ -124,11 +125,13 @@ func (p *Progress) AddSkippedFile(name string, size uint64) { p.printer.CompleteItem(ActionFileUnchanged, name, size) } -func (p *Progress) ReportDeletedFile(name string) { +func (p *Progress) ReportDeletion(name string) { if p == nil { return } + p.s.FilesDeleted++ + p.m.Lock() defer p.m.Unlock() diff --git a/internal/ui/restore/progress_test.go b/internal/ui/restore/progress_test.go index b01440bee91..b6f72726cb4 100644 --- a/internal/ui/restore/progress_test.go +++ b/internal/ui/restore/progress_test.go @@ -72,7 +72,7 @@ func TestNew(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{0, 0, 0, 0, 0, 0}, 0, false}, + printerTraceEntry{State{0, 0, 0, 0, 0, 0, 0}, 0, false}, }, result) test.Equals(t, itemTrace{}, items) } @@ -85,7 +85,7 @@ func TestAddFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{0, 1, 0, 0, fileSize, 0}, 0, false}, + printerTraceEntry{State{0, 1, 0, 0, 0, fileSize, 0}, 0, false}, }, result) test.Equals(t, itemTrace{}, items) } @@ -100,7 +100,7 @@ func TestFirstProgressOnAFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{0, 1, 0, expectedBytesWritten, expectedBytesTotal, 0}, 0, false}, + printerTraceEntry{State{0, 1, 0, 0, expectedBytesWritten, expectedBytesTotal, 0}, 0, false}, }, result) test.Equals(t, itemTrace{}, items) } @@ -116,7 +116,7 @@ func TestLastProgressOnAFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{1, 1, 0, fileSize, fileSize, 0}, 0, false}, + printerTraceEntry{State{1, 1, 0, 0, fileSize, fileSize, 0}, 0, false}, }, result) test.Equals(t, itemTrace{ itemTraceEntry{action: ActionFileUpdated, item: "test", size: fileSize}, @@ -135,7 +135,7 @@ func TestLastProgressOnLastFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{2, 2, 0, 50 + fileSize, 50 + fileSize, 0}, 0, false}, + printerTraceEntry{State{2, 2, 0, 0, 50 + fileSize, 50 + fileSize, 0}, 0, false}, }, result) test.Equals(t, itemTrace{ itemTraceEntry{action: ActionFileUpdated, item: "test1", size: 50}, @@ -154,7 +154,7 @@ func TestSummaryOnSuccess(t *testing.T) { return true }) test.Equals(t, printerTrace{ - printerTraceEntry{State{2, 2, 0, 50 + fileSize, 50 + fileSize, 0}, mockFinishDuration, true}, + printerTraceEntry{State{2, 2, 0, 0, 50 + fileSize, 50 + fileSize, 0}, mockFinishDuration, true}, }, result) } @@ -169,7 +169,7 @@ func TestSummaryOnErrors(t *testing.T) { return true }) test.Equals(t, printerTrace{ - printerTraceEntry{State{1, 2, 0, 50 + fileSize/2, 50 + fileSize, 0}, mockFinishDuration, true}, + printerTraceEntry{State{1, 2, 0, 0, 50 + fileSize/2, 50 + fileSize, 0}, mockFinishDuration, true}, }, result) } @@ -181,7 +181,7 @@ func TestSkipFile(t *testing.T) { return true }) test.Equals(t, printerTrace{ - printerTraceEntry{State{0, 0, 1, 0, 0, fileSize}, mockFinishDuration, true}, + printerTraceEntry{State{0, 0, 1, 0, 0, 0, fileSize}, mockFinishDuration, true}, }, result) test.Equals(t, itemTrace{ itemTraceEntry{ActionFileUnchanged, "test", fileSize}, @@ -196,7 +196,7 @@ func TestProgressTypes(t *testing.T) { progress.AddFile(0) progress.AddProgress("dir", ActionDirRestored, fileSize, fileSize) progress.AddProgress("new", ActionFileRestored, 0, 0) - progress.ReportDeletedFile("del") + progress.ReportDeletion("del") return true }) test.Equals(t, itemTrace{ diff --git a/internal/ui/restore/text.go b/internal/ui/restore/text.go index ba0dcd00743..35c9db029f7 100644 --- a/internal/ui/restore/text.go +++ b/internal/ui/restore/text.go @@ -30,6 +30,9 @@ func (t *textPrinter) Update(p State, duration time.Duration) { if p.FilesSkipped > 0 { progress += fmt.Sprintf(", skipped %v files/dirs %v", p.FilesSkipped, ui.FormatBytes(p.AllBytesSkipped)) } + if p.FilesDeleted > 0 { + progress += fmt.Sprintf(", deleted %v files/dirs", p.FilesDeleted) + } t.terminal.SetStatus([]string{progress}) } @@ -82,6 +85,9 @@ func (t *textPrinter) Finish(p State, duration time.Duration) { if p.FilesSkipped > 0 { summary += fmt.Sprintf(", skipped %v files/dirs %v", p.FilesSkipped, ui.FormatBytes(p.AllBytesSkipped)) } + if p.FilesDeleted > 0 { + summary += fmt.Sprintf(", deleted %v files/dirs", p.FilesDeleted) + } t.terminal.Print(summary) } diff --git a/internal/ui/restore/text_test.go b/internal/ui/restore/text_test.go index 4ffb1615d83..746700cd897 100644 --- a/internal/ui/restore/text_test.go +++ b/internal/ui/restore/text_test.go @@ -17,31 +17,31 @@ func createTextProgress() (*ui.MockTerminal, ProgressPrinter) { func TestPrintUpdate(t *testing.T) { term, printer := createTextProgress() - printer.Update(State{3, 11, 0, 29, 47, 0}, 5*time.Second) + printer.Update(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B"}, term.Output) } func TestPrintUpdateWithSkipped(t *testing.T) { term, printer := createTextProgress() - printer.Update(State{3, 11, 2, 29, 47, 59}, 5*time.Second) + printer.Update(State{3, 11, 2, 0, 29, 47, 59}, 5*time.Second) test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B, skipped 2 files/dirs 59 B"}, term.Output) } func TestPrintSummaryOnSuccess(t *testing.T) { term, printer := createTextProgress() - printer.Finish(State{11, 11, 0, 47, 47, 0}, 5*time.Second) + printer.Finish(State{11, 11, 0, 0, 47, 47, 0}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05"}, term.Output) } func TestPrintSummaryOnErrors(t *testing.T) { term, printer := createTextProgress() - printer.Finish(State{3, 11, 0, 29, 47, 0}, 5*time.Second) + printer.Finish(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 3 / 11 files/dirs (29 B / 47 B) in 0:05"}, term.Output) } func TestPrintSummaryOnSuccessWithSkipped(t *testing.T) { term, printer := createTextProgress() - printer.Finish(State{11, 11, 2, 47, 47, 59}, 5*time.Second) + printer.Finish(State{11, 11, 2, 0, 47, 47, 59}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05, skipped 2 files/dirs 59 B"}, term.Output) } From 144221b430e3e4f9d2e7431925e1c4714cd38dde Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 15 Sep 2024 22:44:00 +0530 Subject: [PATCH 843/893] docker: add SLSA provenance to .github workflow * the id-token of the GitHub Actions workflow will be used for image signing * replace branch-based tagging with SHA-based tagging since, branch names are mutable, SLSA provenance requires immutable tagging * use official SLSA framework Github Reusable workflow docker: fix incorrect registry name in image output step * use REGISTRY environment variable instead of IMAGE_REGISTRY docker: revert change to remove branch tag --- .github/workflows/docker.yml | 32 +++++++++++++++++++++++++++++++- changelog/unreleased/issue-4983 | 8 ++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/issue-4983 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a24660b4553..56a39aaecad 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -19,6 +19,11 @@ jobs: permissions: contents: read packages: write + id-token: write # needed for signing the images with GitHub OIDC Token + + outputs: + image: ${{ steps.image.outputs.image }} + digest: ${{ steps.build-and-push.outputs.digest }} steps: - name: Checkout repository @@ -37,7 +42,8 @@ jobs: with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} tags: | - type=ref,event=branch + type=sha + type=ref, event=branch type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} @@ -55,6 +61,7 @@ jobs: if: github.ref != 'refs/heads/master' - name: Build and push Docker image + id: build-and-push uses: docker/build-push-action@15560696de535e4014efeff63c48f16952e52dd1 with: push: true @@ -64,3 +71,26 @@ jobs: pull: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} + + - name: Output image + id: image + run: | + # NOTE: Set the image as an output because the `env` context is not + # available to the inputs of a reusable workflow call. + image_name="${REGISTRY}/${IMAGE_NAME}" + echo "image=$image_name" >> "$GITHUB_OUTPUT" + + provenance: + needs: [build-and-push-image] + permissions: + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + packages: write # for uploading attestations. + if: github.repository == 'restic/restic' + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.0.0 + with: + image: ${{ needs.build-and-push-image.outputs.image }} + digest: ${{ needs.build-and-push-image.outputs.digest }} + registry-username: ${{ github.actor }} + secrets: + registry-password: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/changelog/unreleased/issue-4983 b/changelog/unreleased/issue-4983 new file mode 100644 index 00000000000..6649504bb1b --- /dev/null +++ b/changelog/unreleased/issue-4983 @@ -0,0 +1,8 @@ +Enhancement: add SLSA provenance to the Docker images + +Restic's Docker image build workflow now includes SLSA provenance generation. +This enhancement improves the security and traceability of the Docker images' +build process. + +https://github.com/restic/restic/issues/4983 +https://github.com/restic/restic/pull/4999 \ No newline at end of file From d422e75e0884882a8f18eeae8398c4dc721a2c21 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 29 Sep 2024 15:56:17 +0530 Subject: [PATCH 844/893] docs: add instructions for verifying SLSA provenance of Docker images docs: update the documentation --- doc/developer_information.rst | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/doc/developer_information.rst b/doc/developer_information.rst index f0fe28c32b2..f1eae3a678e 100644 --- a/doc/developer_information.rst +++ b/doc/developer_information.rst @@ -113,6 +113,34 @@ The following steps are necessary to build the binaries: restic/builder \ go run helpers/build-release-binaries/main.go --version 0.14.0 --verbose +Verifying SLSA Provenance for Docker Images +******************************************* + +Our Docker images are built with SLSA (Supply-chain Levels for Software Artifacts) +provenance. + +To verify this provenance: + +1. Install the `slsa-verifier` tool from https://github.com/slsa-framework/slsa-verifier + +2. Run the following command: + + .. code-block:: console + + $ slsa-verifier verify-image \ + --source-uri github.com/restic/restic \ + @ + + Replace `` with the Git tag of the release you're verifying, `` + with the full name of the Docker image (including the registry), and `` + with the SHA256 digest of the image. + +3. If the verification is successful, you'll see output indicating that the provenance +is valid. + +This verification ensures that the Docker image was built by our official GitHub +Actions workflow and has not been tampered with since its creation. + Verifying the Official Binaries ******************************* From 4fb9aa4351ab27d90972900a095f31613b60b096 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak <5201843+konidev20@users.noreply.github.com> Date: Mon, 2 Dec 2024 00:16:19 +0530 Subject: [PATCH 845/893] docker: fix typos and permissions for jobs --- .github/workflows/docker.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 56a39aaecad..1a75604e628 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -19,7 +19,6 @@ jobs: permissions: contents: read packages: write - id-token: write # needed for signing the images with GitHub OIDC Token outputs: image: ${{ steps.image.outputs.image }} @@ -43,7 +42,7 @@ jobs: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} tags: | type=sha - type=ref, event=branch + type=ref,event=branch type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} From 8d45a4b2833c209d41368df60e65d096df3fb1d3 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak <5201843+konidev20@users.noreply.github.com> Date: Mon, 2 Dec 2024 01:11:26 +0530 Subject: [PATCH 846/893] changelog: update to indicate change applies only for GHCR images. --- changelog/unreleased/issue-4983 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/changelog/unreleased/issue-4983 b/changelog/unreleased/issue-4983 index 6649504bb1b..e5292cf5c5c 100644 --- a/changelog/unreleased/issue-4983 +++ b/changelog/unreleased/issue-4983 @@ -1,8 +1,8 @@ -Enhancement: add SLSA provenance to the Docker images +Enhancement: add SLSA provenance to the GHCR Container images -Restic's Docker image build workflow now includes SLSA provenance generation. -This enhancement improves the security and traceability of the Docker images' -build process. +Restic's GitHub Container Registry (GHCR) image build workflow now includes +SLSA provenance generation. This enhancement improves the security and +traceability of images built and pushed to GHCR. https://github.com/restic/restic/issues/4983 https://github.com/restic/restic/pull/4999 \ No newline at end of file From 6808004ad1b3c5ad52bcc79d9584d98df177abed Mon Sep 17 00:00:00 2001 From: Aneesh N <99904+aneesh-n@users.noreply.github.com> Date: Tue, 10 Dec 2024 02:18:38 +0530 Subject: [PATCH 847/893] Refactor extended attributes and security descriptor helpers to use go-winio (#5040) * Refactor ea and sd helpers to use go-winio Import go-winio and instead of copying the functions to encode/decode extended attributes and enable process privileges for security descriptors, call the functions defined in go-winio. --- go.mod | 1 + go.sum | 2 + internal/fs/ea_windows.go | 153 +++-------------------- internal/fs/sd_windows.go | 256 +------------------------------------- 4 files changed, 23 insertions(+), 389 deletions(-) diff --git a/go.mod b/go.mod index 54462ea9b1b..a13ec2fb19b 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 github.com/Backblaze/blazer v0.7.1 + github.com/Microsoft/go-winio v0.6.2 github.com/anacrolix/fuse v0.3.1 github.com/cenkalti/backoff/v4 v4.3.0 github.com/cespare/xxhash/v2 v2.3.0 diff --git a/go.sum b/go.sum index bb125f0c0dd..6aa35aa8e3f 100644 --- a/go.sum +++ b/go.sum @@ -33,6 +33,8 @@ github.com/Backblaze/blazer v0.7.1 h1:J43PbFj6hXLg1jvCNr+rQoAsxzKK0IP7ftl1ReCwpc github.com/Backblaze/blazer v0.7.1/go.mod h1:MhntL1nMpIuoqrPP6TnZu/xTydMgOAe/Xm6KongbjKs= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/anacrolix/envpprof v1.3.0 h1:WJt9bpuT7A/CDCxPOv/eeZqHWlle/Y0keJUvc6tcJDk= github.com/anacrolix/envpprof v1.3.0/go.mod h1:7QIG4CaX1uexQ3tqd5+BRa/9e2D02Wcertl6Yh0jCB0= github.com/anacrolix/fuse v0.3.1 h1:oT8s3B5HFkBdLe/WKJO5MNo9iIyEtc+BhvTZYp4jhDM= diff --git a/internal/fs/ea_windows.go b/internal/fs/ea_windows.go index fe9a3c42a63..27c2fcdf151 100644 --- a/internal/fs/ea_windows.go +++ b/internal/fs/ea_windows.go @@ -4,17 +4,28 @@ package fs import ( - "bytes" - "encoding/binary" - "errors" "fmt" "syscall" "unsafe" + "github.com/Microsoft/go-winio" "golang.org/x/sys/windows" ) -// The code below was adapted from https://github.com/microsoft/go-winio under MIT license. +// extendedAttribute is a type alias for winio.ExtendedAttribute +type extendedAttribute = winio.ExtendedAttribute + +// encodeExtendedAttributes encodes the extended attributes to a byte slice. +func encodeExtendedAttributes(attrs []extendedAttribute) ([]byte, error) { + return winio.EncodeExtendedAttributes(attrs) +} + +// decodeExtendedAttributes decodes the extended attributes from a byte slice. +func decodeExtendedAttributes(data []byte) ([]extendedAttribute, error) { + return winio.DecodeExtendedAttributes(data) +} + +// The code below was copied over from https://github.com/microsoft/go-winio/blob/main/pipe.go under MIT license. // The MIT License (MIT) @@ -38,140 +49,6 @@ import ( // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -// The code below was copied over from https://github.com/microsoft/go-winio/blob/main/ea.go under MIT license. - -type fileFullEaInformation struct { - NextEntryOffset uint32 - Flags uint8 - NameLength uint8 - ValueLength uint16 -} - -var ( - fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) - - errInvalidEaBuffer = errors.New("invalid extended attribute buffer") - errEaNameTooLarge = errors.New("extended attribute name too large") - errEaValueTooLarge = errors.New("extended attribute value too large") -) - -// extendedAttribute represents a single Windows EA. -type extendedAttribute struct { - Name string - Value []byte - Flags uint8 -} - -func parseEa(b []byte) (ea extendedAttribute, nb []byte, err error) { - var info fileFullEaInformation - err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) - if err != nil { - err = errInvalidEaBuffer - return ea, nb, err - } - - nameOffset := fileFullEaInformationSize - nameLen := int(info.NameLength) - valueOffset := nameOffset + int(info.NameLength) + 1 - valueLen := int(info.ValueLength) - nextOffset := int(info.NextEntryOffset) - if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { - err = errInvalidEaBuffer - return ea, nb, err - } - - ea.Name = string(b[nameOffset : nameOffset+nameLen]) - ea.Value = b[valueOffset : valueOffset+valueLen] - ea.Flags = info.Flags - if info.NextEntryOffset != 0 { - nb = b[info.NextEntryOffset:] - } - return ea, nb, err -} - -// decodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION -// buffer retrieved from BackupRead, ZwQueryEaFile, etc. -func decodeExtendedAttributes(b []byte) (eas []extendedAttribute, err error) { - for len(b) != 0 { - ea, nb, err := parseEa(b) - if err != nil { - return nil, err - } - - eas = append(eas, ea) - b = nb - } - return eas, err -} - -func writeEa(buf *bytes.Buffer, ea *extendedAttribute, last bool) error { - if int(uint8(len(ea.Name))) != len(ea.Name) { - return errEaNameTooLarge - } - if int(uint16(len(ea.Value))) != len(ea.Value) { - return errEaValueTooLarge - } - entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) - withPadding := (entrySize + 3) &^ 3 - nextOffset := uint32(0) - if !last { - nextOffset = withPadding - } - info := fileFullEaInformation{ - NextEntryOffset: nextOffset, - Flags: ea.Flags, - NameLength: uint8(len(ea.Name)), - ValueLength: uint16(len(ea.Value)), - } - - err := binary.Write(buf, binary.LittleEndian, &info) - if err != nil { - return err - } - - _, err = buf.Write([]byte(ea.Name)) - if err != nil { - return err - } - - err = buf.WriteByte(0) - if err != nil { - return err - } - - _, err = buf.Write(ea.Value) - if err != nil { - return err - } - - _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) - if err != nil { - return err - } - - return nil -} - -// encodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION -// buffer for use with BackupWrite, ZwSetEaFile, etc. -func encodeExtendedAttributes(eas []extendedAttribute) ([]byte, error) { - var buf bytes.Buffer - for i := range eas { - last := false - if i == len(eas)-1 { - last = true - } - - err := writeEa(&buf, &eas[i], last) - if err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} - -// The code below was copied over from https://github.com/microsoft/go-winio/blob/main/pipe.go under MIT license. - type ntStatus int32 func (status ntStatus) Err() error { diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index 6bffa4fe26e..04623e8d3dd 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -1,15 +1,13 @@ package fs import ( - "bytes" - "encoding/binary" "fmt" "sync" "sync/atomic" "syscall" - "unicode/utf16" "unsafe" + "github.com/Microsoft/go-winio" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "golang.org/x/sys/windows" @@ -161,6 +159,10 @@ func setNamedSecurityInfoLow(filePath string, dacl *windows.ACL) error { return windows.SetNamedSecurityInfo(fixpath(filePath), windows.SE_FILE_OBJECT, lowRestoreSecurityFlags, nil, nil, dacl, nil) } +func enableProcessPrivileges(privileges []string) error { + return winio.EnableProcessPrivileges(privileges) +} + // enableBackupPrivilege enables privilege for backing up security descriptors func enableBackupPrivilege() { err := enableProcessPrivileges([]string{seBackupPrivilege}) @@ -212,251 +214,3 @@ func securityDescriptorStructToBytes(sd *windows.SECURITY_DESCRIPTOR) ([]byte, e b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) return b, nil } - -// The code below was adapted from -// https://github.com/microsoft/go-winio/blob/3c9576c9346a1892dee136329e7e15309e82fb4f/privilege.go -// under MIT license. - -// The MIT License (MIT) - -// Copyright (c) 2015 Microsoft - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") -) - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoErrorIOPending = 997 - - //revive:disable-next-line:var-naming ALL_CAPS - SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED - - //revive:disable-next-line:var-naming ALL_CAPS - ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED -) - -var ( - errErrorIOPending error = syscall.Errno(errnoErrorIOPending) - errErrorEinval error = syscall.EINVAL - - privNames = make(map[string]uint64) - privNameMutex sync.Mutex -) - -// privilegeError represents an error enabling privileges. -type privilegeError struct { - privileges []uint64 -} - -// Error returns the string message for the error. -func (e *privilegeError) Error() string { - s := "Could not enable privilege " - if len(e.privileges) > 1 { - s = "Could not enable privileges " - } - for i, p := range e.privileges { - if i != 0 { - s += ", " - } - s += `"` - s += getPrivilegeName(p) - s += `"` - } - return s -} - -func mapPrivileges(names []string) ([]uint64, error) { - privileges := make([]uint64, 0, len(names)) - privNameMutex.Lock() - defer privNameMutex.Unlock() - for _, name := range names { - p, ok := privNames[name] - if !ok { - err := lookupPrivilegeValue("", name, &p) - if err != nil { - return nil, err - } - privNames[name] = p - } - privileges = append(privileges, p) - } - return privileges, nil -} - -// enableProcessPrivileges enables privileges globally for the process. -func enableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) -} - -func enableDisableProcessPrivilege(names []string, action uint32) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - - p := windows.CurrentProcess() - var token windows.Token - err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) - if err != nil { - return err - } - - defer func() { - _ = token.Close() - }() - return adjustPrivileges(token, privileges, action) -} - -func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { - var b bytes.Buffer - _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) - for _, p := range privileges { - _ = binary.Write(&b, binary.LittleEndian, p) - _ = binary.Write(&b, binary.LittleEndian, action) - } - prevState := make([]byte, b.Len()) - reqSize := uint32(0) - success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) - if !success { - return err - } - if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno - debug.Log("Not all requested privileges were fully set: %v. AdjustTokenPrivileges returned warning: %v", privileges, err) - } - return nil -} - -func getPrivilegeName(luid uint64) string { - var nameBuffer [256]uint16 - bufSize := uint32(len(nameBuffer)) - err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) - if err != nil { - return fmt.Sprintf("", luid) - } - - var displayNameBuffer [256]uint16 - displayBufSize := uint32(len(displayNameBuffer)) - var langID uint32 - err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) - if err != nil { - return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) - } - - return string(utf16.Decode(displayNameBuffer[:displayBufSize])) -} - -// The functions below are copied over from https://github.com/microsoft/go-winio/blob/main/zsyscall_windows.go under MIT license. - -// This windows api always returns an error even in case of success, warnings (partial success) and error cases. -// -// Full success - When we call this with admin permissions, it returns DNS_ERROR_RCODE_NO_ERROR (0). -// This gets translated to errErrorEinval and ultimately in adjustTokenPrivileges, it gets ignored. -// -// Partial success - If we call this api without admin privileges, privileges related to SACLs do not get set and -// though the api returns success, it returns an error - golang.org/x/sys/windows.ERROR_NOT_ALL_ASSIGNED (1300) -func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } - r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageID *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageID) -} - -func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageID *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageID))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeName(_p0, luid, buffer, size) -} - -func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _lookupPrivilegeValue(_p0, _p1, luid) -} - -func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -// The code below was copied from https://github.com/microsoft/go-winio/blob/main/tools/mkwinsyscall/mkwinsyscall.go under MIT license. - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errErrorEinval - case errnoErrorIOPending: - return errErrorIOPending - } - return e -} From 37969ae8e3725404d7d0a3ba04c3164a35f8fc29 Mon Sep 17 00:00:00 2001 From: Tyson Key Date: Sun, 15 Dec 2024 23:59:17 +0000 Subject: [PATCH 848/893] Enable xattr support, on NetBSD 10+ --- internal/fs/node_noxattr.go | 4 ++-- internal/fs/node_xattr.go | 4 ++-- internal/fs/node_xattr_all_test.go | 4 ++-- internal/fs/node_xattr_test.go | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/fs/node_noxattr.go b/internal/fs/node_noxattr.go index 281a16dbdcb..726827f6294 100644 --- a/internal/fs/node_noxattr.go +++ b/internal/fs/node_noxattr.go @@ -1,5 +1,5 @@ -//go:build aix || dragonfly || netbsd || openbsd -// +build aix dragonfly netbsd openbsd +//go:build aix || dragonfly || openbsd +// +build aix dragonfly openbsd package fs diff --git a/internal/fs/node_xattr.go b/internal/fs/node_xattr.go index e1ddf9826e7..1bdafc57598 100644 --- a/internal/fs/node_xattr.go +++ b/internal/fs/node_xattr.go @@ -1,5 +1,5 @@ -//go:build darwin || freebsd || linux || solaris -// +build darwin freebsd linux solaris +//go:build darwin || freebsd || netbsd || linux || solaris +// +build darwin freebsd netbsd linux solaris package fs diff --git a/internal/fs/node_xattr_all_test.go b/internal/fs/node_xattr_all_test.go index 690302f707c..81c931e2403 100644 --- a/internal/fs/node_xattr_all_test.go +++ b/internal/fs/node_xattr_all_test.go @@ -1,5 +1,5 @@ -//go:build darwin || freebsd || linux || solaris || windows -// +build darwin freebsd linux solaris windows +//go:build darwin || freebsd || netbsd || linux || solaris || windows +// +build darwin freebsd netbsd linux solaris windows package fs diff --git a/internal/fs/node_xattr_test.go b/internal/fs/node_xattr_test.go index 3784dba45df..7205e1fbec1 100644 --- a/internal/fs/node_xattr_test.go +++ b/internal/fs/node_xattr_test.go @@ -1,5 +1,5 @@ -//go:build darwin || freebsd || linux || solaris -// +build darwin freebsd linux solaris +//go:build darwin || freebsd || netbsd || linux || solaris +// +build darwin freebsd netbsd linux solaris package fs From 9e85119d730edc1a2e1d290ffd8bc6563c69ea89 Mon Sep 17 00:00:00 2001 From: Tyson Key Date: Thu, 19 Dec 2024 14:32:16 +0000 Subject: [PATCH 849/893] Update changelog, for issue 5174 (Enable xattr support, on NetBSD 10+) --- changelog/unreleased/issue-5174 | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 changelog/unreleased/issue-5174 diff --git a/changelog/unreleased/issue-5174 b/changelog/unreleased/issue-5174 new file mode 100644 index 00000000000..7f7922a7aa9 --- /dev/null +++ b/changelog/unreleased/issue-5174 @@ -0,0 +1,6 @@ +Enhancement: Enable xattr support, on NetBSD 10+ + +Restic now supports backing up, and restoring extended attributes, on systems running NetBSD 10, or later. + +https://github.com/restic/restic/issues/5174 +https://github.com/restic/restic/pull/5180 From caa59bb81b5afaac43636e1656774e384611e97e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Jan 2025 01:54:05 +0000 Subject: [PATCH 850/893] build(deps): bump github.com/klauspost/compress from 1.17.9 to 1.17.11 Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.17.9 to 1.17.11. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.17.9...v1.17.11) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a13ec2fb19b..e6bde442a0b 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/go-ole/go-ole v1.3.0 github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/klauspost/compress v1.17.9 + github.com/klauspost/compress v1.17.11 github.com/minio/minio-go/v7 v7.0.77 github.com/ncw/swift/v2 v2.0.3 github.com/peterbourgon/unixtransport v0.0.4 diff --git a/go.sum b/go.sum index 6aa35aa8e3f..5db78034b04 100644 --- a/go.sum +++ b/go.sum @@ -136,8 +136,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= From 01405f1e1be7212be19c6790c2cfcbe55a62f0d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Jan 2025 01:54:17 +0000 Subject: [PATCH 851/893] build(deps): bump golang.org/x/sys from 0.27.0 to 0.28.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.27.0 to 0.28.0. - [Commits](https://github.com/golang/sys/compare/v0.27.0...v0.28.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a13ec2fb19b..14396a4c348 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( golang.org/x/net v0.30.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.9.0 - golang.org/x/sys v0.27.0 + golang.org/x/sys v0.28.0 golang.org/x/term v0.25.0 golang.org/x/text v0.20.0 golang.org/x/time v0.7.0 diff --git a/go.sum b/go.sum index 6aa35aa8e3f..9bf973326ab 100644 --- a/go.sum +++ b/go.sum @@ -292,8 +292,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= From bbb492ee650fe9afa23b43b2766b849e5eb463df Mon Sep 17 00:00:00 2001 From: knbr13 Date: Sun, 5 Jan 2025 13:53:20 +0200 Subject: [PATCH 852/893] remove duplicate imports --- internal/fs/node_test.go | 3 +-- internal/repository/lock_test.go | 29 ++++++++++++------------- internal/repository/repair_pack_test.go | 7 +++--- internal/repository/repository_test.go | 11 +++++----- 4 files changed, 23 insertions(+), 27 deletions(-) diff --git a/internal/fs/node_test.go b/internal/fs/node_test.go index 65098e30473..15f19adc7f2 100644 --- a/internal/fs/node_test.go +++ b/internal/fs/node_test.go @@ -13,7 +13,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" ) @@ -293,5 +292,5 @@ func TestNodeRestoreMetadataError(t *testing.T) { // This will fail because the target file does not exist err := NodeRestoreMetadata(node, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) }) - test.Assert(t, errors.Is(err, os.ErrNotExist), "failed for an unexpected reason") + rtest.Assert(t, errors.Is(err, os.ErrNotExist), "failed for an unexpected reason") } diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index bd7cbd5e2ca..a9ff369c206 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -13,7 +13,6 @@ import ( "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" ) @@ -36,8 +35,8 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*Repository, backen func checkedLockRepo(ctx context.Context, t *testing.T, repo *Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) { lock, wrappedCtx, err := lockerInst.Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) - test.OK(t, err) - test.OK(t, wrappedCtx.Err()) + rtest.OK(t, err) + rtest.OK(t, wrappedCtx.Err()) if lock.info.lock.Stale() { t.Fatal("lock returned stale lock") } @@ -77,13 +76,13 @@ func TestLockConflict(t *testing.T) { repo2 := TestOpenBackend(t, be) lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) - test.OK(t, err) + rtest.OK(t, err) defer lock.Unlock() _, _, err = Lock(context.Background(), repo2, false, 0, func(msg string) {}, func(format string, args ...interface{}) {}) if err == nil { t.Fatal("second lock should have failed") } - test.Assert(t, restic.IsAlreadyLocked(err), "unexpected error %v", err) + rtest.Assert(t, restic.IsAlreadyLocked(err), "unexpected error %v", err) } type writeOnceBackend struct { @@ -241,7 +240,7 @@ func TestLockWaitTimeout(t *testing.T) { repo, _ := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) - test.OK(t, err) + rtest.OK(t, err) defer elock.Unlock() retryLock := 200 * time.Millisecond @@ -250,11 +249,11 @@ func TestLockWaitTimeout(t *testing.T) { _, _, err = Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) - test.Assert(t, err != nil, + rtest.Assert(t, err != nil, "create normal lock with exclusively locked repo didn't return an error") - test.Assert(t, strings.Contains(err.Error(), "repository is already locked exclusively"), + rtest.Assert(t, strings.Contains(err.Error(), "repository is already locked exclusively"), "create normal lock with exclusively locked repo didn't return the correct error") - test.Assert(t, retryLock <= duration && duration < retryLock*3/2, + rtest.Assert(t, retryLock <= duration && duration < retryLock*3/2, "create normal lock with exclusively locked repo didn't wait for the specified timeout") } @@ -263,7 +262,7 @@ func TestLockWaitCancel(t *testing.T) { repo, _ := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) - test.OK(t, err) + rtest.OK(t, err) defer elock.Unlock() retryLock := 200 * time.Millisecond @@ -276,11 +275,11 @@ func TestLockWaitCancel(t *testing.T) { _, _, err = Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) - test.Assert(t, err != nil, + rtest.Assert(t, err != nil, "create normal lock with exclusively locked repo didn't return an error") - test.Assert(t, strings.Contains(err.Error(), "context canceled"), + rtest.Assert(t, strings.Contains(err.Error(), "context canceled"), "create normal lock with exclusively locked repo didn't return the correct error") - test.Assert(t, cancelAfter <= duration && duration < retryLock-10*time.Millisecond, + rtest.Assert(t, cancelAfter <= duration && duration < retryLock-10*time.Millisecond, "create normal lock with exclusively locked repo didn't return in time, duration %v", duration) } @@ -289,7 +288,7 @@ func TestLockWaitSuccess(t *testing.T) { repo, _ := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) - test.OK(t, err) + rtest.OK(t, err) retryLock := 200 * time.Millisecond unlockAfter := 40 * time.Millisecond @@ -299,6 +298,6 @@ func TestLockWaitSuccess(t *testing.T) { }) lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) - test.OK(t, err) + rtest.OK(t, err) lock.Unlock() } diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index e5f7a7f2265..5f02e7d6184 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -10,7 +10,6 @@ import ( backendtest "github.com/restic/restic/internal/backend/test" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/progress" ) @@ -25,10 +24,10 @@ func listBlobs(repo restic.Repository) restic.BlobSet { func replaceFile(t *testing.T, be backend.Backend, h backend.Handle, damage func([]byte) []byte) { buf, err := backendtest.LoadAll(context.TODO(), be, h) - test.OK(t, err) + rtest.OK(t, err) buf = damage(buf) - test.OK(t, be.Remove(context.TODO(), h)) - test.OK(t, be.Save(context.TODO(), h, backend.NewByteReader(buf, be.Hasher()))) + rtest.OK(t, be.Remove(context.TODO(), h)) + rtest.OK(t, be.Save(context.TODO(), h, backend.NewByteReader(buf, be.Hasher()))) } func TestRepairBrokenPack(t *testing.T) { diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 3467a9cfade..5a6897f8f44 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -21,7 +21,6 @@ import ( "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" ) @@ -145,7 +144,7 @@ func testLoadBlob(t *testing.T, version uint) { func TestLoadBlobBroken(t *testing.T) { be := mem.New() repo, _ := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}) - buf := test.Random(42, 1000) + buf := rtest.Random(42, 1000) var wg errgroup.Group repo.StartPackUploader(context.TODO(), &wg) @@ -421,7 +420,7 @@ func TestInvalidCompression(t *testing.T) { func TestListPack(t *testing.T) { be := mem.New() repo, _ := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}) - buf := test.Random(42, 1000) + buf := rtest.Random(42, 1000) var wg errgroup.Group repo.StartPackUploader(context.TODO(), &wg) @@ -460,12 +459,12 @@ func TestNoDoubleInit(t *testing.T) { rtest.OK(t, err) pol := r.Config().ChunkerPolynomial - err = repo.Init(context.TODO(), r.Config().Version, test.TestPassword, &pol) + err = repo.Init(context.TODO(), r.Config().Version, rtest.TestPassword, &pol) rtest.Assert(t, strings.Contains(err.Error(), "repository master key and config already initialized"), "expected config exist error, got %q", err) // must also prevent init if only keys exist rtest.OK(t, be.Remove(context.TODO(), backend.Handle{Type: backend.ConfigFile})) - err = repo.Init(context.TODO(), r.Config().Version, test.TestPassword, &pol) + err = repo.Init(context.TODO(), r.Config().Version, rtest.TestPassword, &pol) rtest.Assert(t, strings.Contains(err.Error(), "repository already contains keys"), "expected already contains keys error, got %q", err) // must also prevent init if a snapshot exists and keys were deleted @@ -475,6 +474,6 @@ func TestNoDoubleInit(t *testing.T) { rtest.OK(t, be.List(context.TODO(), restic.KeyFile, func(fi backend.FileInfo) error { return be.Remove(context.TODO(), backend.Handle{Type: restic.KeyFile, Name: fi.Name}) })) - err = repo.Init(context.TODO(), r.Config().Version, test.TestPassword, &pol) + err = repo.Init(context.TODO(), r.Config().Version, rtest.TestPassword, &pol) rtest.Assert(t, strings.Contains(err.Error(), "repository already contains snapshots"), "expected already contains snapshots error, got %q", err) } From af839f9548c3086c29a2b299a90722e849f5de77 Mon Sep 17 00:00:00 2001 From: Tesshu Flower Date: Mon, 4 Nov 2024 02:14:45 -0500 Subject: [PATCH 853/893] restore: exclude/include xattrs For: https://github.com/restic/restic/issues/5089 Signed-off-by: Tesshu Flower --- cmd/restic/cmd_restore.go | 48 ++++++++++++++++++++++++++---- internal/fs/node.go | 8 ++--- internal/fs/node_test.go | 9 ++++-- internal/fs/node_xattr.go | 20 ++++++++----- internal/fs/node_xattr_all_test.go | 2 +- internal/restorer/restorer.go | 4 ++- 6 files changed, 70 insertions(+), 21 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 82dd408a854..fc3148ce1ab 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -54,11 +54,13 @@ type RestoreOptions struct { filter.IncludePatternOptions Target string restic.SnapshotFilter - DryRun bool - Sparse bool - Verify bool - Overwrite restorer.OverwriteBehavior - Delete bool + DryRun bool + Sparse bool + Verify bool + Overwrite restorer.OverwriteBehavior + Delete bool + ExcludeXattrPattern []string + IncludeXattrPattern []string } var restoreOptions RestoreOptions @@ -72,6 +74,9 @@ func init() { restoreOptions.ExcludePatternOptions.Add(flags) restoreOptions.IncludePatternOptions.Add(flags) + flags.StringArrayVar(&restoreOptions.ExcludeXattrPattern, "exclude-xattr", nil, "exclude xattr by `pattern` (can be specified multiple times)") + flags.StringArrayVar(&restoreOptions.IncludeXattrPattern, "include-xattr", nil, "include xattr by `pattern` (can be specified multiple times)") + initSingleSnapshotFilter(flags, &restoreOptions.SnapshotFilter) flags.BoolVar(&restoreOptions.DryRun, "dry-run", false, "do not write any data, just show what would be done") flags.BoolVar(&restoreOptions.Sparse, "sparse", false, "restore files as sparse") @@ -96,6 +101,9 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, hasExcludes := len(excludePatternFns) > 0 hasIncludes := len(includePatternFns) > 0 + hasXattrExcludes := len(opts.ExcludeXattrPattern) > 0 + hasXattrIncludes := len(opts.IncludeXattrPattern) > 0 + switch { case len(args) == 0: return errors.Fatal("no snapshot ID specified") @@ -110,6 +118,11 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, if hasExcludes && hasIncludes { return errors.Fatal("exclude and include patterns are mutually exclusive") } + + if hasXattrExcludes && hasXattrIncludes { + return errors.Fatal("exclude and include xattr patterns are mutually exclusive") + } + if opts.DryRun && opts.Verify { return errors.Fatal("--dry-run and --verify are mutually exclusive") } @@ -219,6 +232,31 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, res.SelectFilter = selectIncludeFilter } + if !hasXattrExcludes && !hasXattrIncludes { + // set default of including xattrs from the 'user' namespace + opts.IncludeXattrPattern = []string{"user.*"} + } + if hasXattrExcludes { + if err := filter.ValidatePatterns(opts.ExcludeXattrPattern); err != nil { + return errors.Fatalf("--exclude-xattr: %s", err) + } + + res.XattrSelectFilter = func(xattrName string) bool { + shouldReject := filter.RejectByPattern(opts.ExcludeXattrPattern, Warnf)(xattrName) + return !shouldReject + } + } else { + // User has either input include xattr pattern(s) or we're using our default include pattern + if err := filter.ValidatePatterns(opts.IncludeXattrPattern); err != nil { + return errors.Fatalf("--include-xattr: %s", err) + } + + res.XattrSelectFilter = func(xattrName string) bool { + shouldInclude, _ := filter.IncludeByPattern(opts.IncludeXattrPattern, Warnf)(xattrName) + return shouldInclude + } + } + if !gopts.JSON { msg.P("restoring %s to %s\n", res.Snapshot(), opts.Target) } diff --git a/internal/fs/node.go b/internal/fs/node.go index 058d9cc7b8b..ab2aca957d0 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -230,8 +230,8 @@ func mkfifo(path string, mode uint32) (err error) { } // NodeRestoreMetadata restores node metadata -func NodeRestoreMetadata(node *restic.Node, path string, warn func(msg string)) error { - err := nodeRestoreMetadata(node, path, warn) +func NodeRestoreMetadata(node *restic.Node, path string, warn func(msg string), xattrSelectFilter func(xattrName string) bool) error { + err := nodeRestoreMetadata(node, path, warn, xattrSelectFilter) if err != nil { // It is common to have permission errors for folders like /home // unless you're running as root, so ignore those. @@ -246,14 +246,14 @@ func NodeRestoreMetadata(node *restic.Node, path string, warn func(msg string)) return err } -func nodeRestoreMetadata(node *restic.Node, path string, warn func(msg string)) error { +func nodeRestoreMetadata(node *restic.Node, path string, warn func(msg string), xattrSelectFilter func(xattrName string) bool) error { var firsterr error if err := lchown(path, int(node.UID), int(node.GID)); err != nil { firsterr = errors.WithStack(err) } - if err := nodeRestoreExtendedAttributes(node, path); err != nil { + if err := nodeRestoreExtendedAttributes(node, path, xattrSelectFilter); err != nil { debug.Log("error restoring extended attributes for %v: %v", path, err) if firsterr == nil { firsterr = err diff --git a/internal/fs/node_test.go b/internal/fs/node_test.go index 15f19adc7f2..b67295f68bc 100644 --- a/internal/fs/node_test.go +++ b/internal/fs/node_test.go @@ -13,6 +13,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" ) @@ -216,7 +217,8 @@ func TestNodeRestoreAt(t *testing.T) { nodePath = filepath.Join(tempdir, test.Name) } rtest.OK(t, NodeCreateAt(&test, nodePath)) - rtest.OK(t, NodeRestoreMetadata(&test, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) })) + rtest.OK(t, NodeRestoreMetadata(&test, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) }, + func(_ string) bool { return true } /* restore all xattrs */)) fs := &Local{} meta, err := fs.OpenFile(nodePath, O_NOFOLLOW, true) @@ -291,6 +293,7 @@ func TestNodeRestoreMetadataError(t *testing.T) { nodePath := filepath.Join(tempdir, node.Name) // This will fail because the target file does not exist - err := NodeRestoreMetadata(node, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) }) - rtest.Assert(t, errors.Is(err, os.ErrNotExist), "failed for an unexpected reason") + err := NodeRestoreMetadata(node, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) }, + func(_ string) bool { return true }) + test.Assert(t, errors.Is(err, os.ErrNotExist), "failed for an unexpected reason") } diff --git a/internal/fs/node_xattr.go b/internal/fs/node_xattr.go index 1bdafc57598..2a2b5c0fba4 100644 --- a/internal/fs/node_xattr.go +++ b/internal/fs/node_xattr.go @@ -65,14 +65,17 @@ func handleXattrErr(err error) error { } } -func nodeRestoreExtendedAttributes(node *restic.Node, path string) error { +func nodeRestoreExtendedAttributes(node *restic.Node, path string, xattrSelectFilter func(xattrName string) bool) error { expectedAttrs := map[string]struct{}{} for _, attr := range node.ExtendedAttributes { - err := setxattr(path, attr.Name, attr.Value) - if err != nil { - return err + // Only restore xattrs that match the filter + if xattrSelectFilter(attr.Name) { + err := setxattr(path, attr.Name, attr.Value) + if err != nil { + return err + } + expectedAttrs[attr.Name] = struct{}{} } - expectedAttrs[attr.Name] = struct{}{} } // remove unexpected xattrs @@ -84,8 +87,11 @@ func nodeRestoreExtendedAttributes(node *restic.Node, path string) error { if _, ok := expectedAttrs[name]; ok { continue } - if err := removexattr(path, name); err != nil { - return err + // Only attempt to remove xattrs that match the filter + if xattrSelectFilter(name) { + if err := removexattr(path, name); err != nil { + return err + } } } diff --git a/internal/fs/node_xattr_all_test.go b/internal/fs/node_xattr_all_test.go index 81c931e2403..469f140d7d9 100644 --- a/internal/fs/node_xattr_all_test.go +++ b/internal/fs/node_xattr_all_test.go @@ -26,7 +26,7 @@ func setAndVerifyXattr(t *testing.T, file string, attrs []restic.ExtendedAttribu Type: restic.NodeTypeFile, ExtendedAttributes: attrs, } - rtest.OK(t, nodeRestoreExtendedAttributes(node, file)) + rtest.OK(t, nodeRestoreExtendedAttributes(node, file, func(_ string) bool { return true } /*restore all xattrs*/)) nodeActual := &restic.Node{ Type: restic.NodeTypeFile, diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 14a8edeaca7..536958d4f42 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -31,6 +31,8 @@ type Restorer struct { // SelectFilter determines whether the item is selectedForRestore or whether a childMayBeSelected. // selectedForRestore must not depend on isDir as `removeUnexpectedFiles` always passes false to isDir. SelectFilter func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) + + XattrSelectFilter func(xattrName string) (xattrSelectedForRestore bool) } var restorerAbortOnAllErrors = func(_ string, err error) error { return err } @@ -288,7 +290,7 @@ func (res *Restorer) restoreNodeMetadataTo(node *restic.Node, target, location s return nil } debug.Log("restoreNodeMetadata %v %v %v", node.Name, target, location) - err := fs.NodeRestoreMetadata(node, target, res.Warn) + err := fs.NodeRestoreMetadata(node, target, res.Warn, res.XattrSelectFilter) if err != nil { debug.Log("node.RestoreMetadata(%s) error %v", target, err) } From f457b16b23f691c23ce0cabcd04ed257ef790b4e Mon Sep 17 00:00:00 2001 From: Tesshu Flower Date: Fri, 15 Nov 2024 15:55:29 -0500 Subject: [PATCH 854/893] update nodeRestoreExtendedAttributes() for win - also other platforms - move xattr include/exclude filter parsing into separate func Signed-off-by: Tesshu Flower --- cmd/restic/cmd_restore.go | 68 +++++++++++++++++------------- internal/fs/node_noxattr.go | 2 +- internal/fs/node_test.go | 3 +- internal/fs/node_windows.go | 17 +++++--- internal/fs/node_windows_test.go | 2 +- internal/fs/node_xattr_all_test.go | 3 +- internal/restorer/restorer.go | 13 +++--- 7 files changed, 62 insertions(+), 46 deletions(-) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index fc3148ce1ab..3bc6ac5c5b6 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -101,9 +101,6 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, hasExcludes := len(excludePatternFns) > 0 hasIncludes := len(includePatternFns) > 0 - hasXattrExcludes := len(opts.ExcludeXattrPattern) > 0 - hasXattrIncludes := len(opts.IncludeXattrPattern) > 0 - switch { case len(args) == 0: return errors.Fatal("no snapshot ID specified") @@ -119,10 +116,6 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, return errors.Fatal("exclude and include patterns are mutually exclusive") } - if hasXattrExcludes && hasXattrIncludes { - return errors.Fatal("exclude and include xattr patterns are mutually exclusive") - } - if opts.DryRun && opts.Verify { return errors.Fatal("--dry-run and --verify are mutually exclusive") } @@ -232,29 +225,9 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, res.SelectFilter = selectIncludeFilter } - if !hasXattrExcludes && !hasXattrIncludes { - // set default of including xattrs from the 'user' namespace - opts.IncludeXattrPattern = []string{"user.*"} - } - if hasXattrExcludes { - if err := filter.ValidatePatterns(opts.ExcludeXattrPattern); err != nil { - return errors.Fatalf("--exclude-xattr: %s", err) - } - - res.XattrSelectFilter = func(xattrName string) bool { - shouldReject := filter.RejectByPattern(opts.ExcludeXattrPattern, Warnf)(xattrName) - return !shouldReject - } - } else { - // User has either input include xattr pattern(s) or we're using our default include pattern - if err := filter.ValidatePatterns(opts.IncludeXattrPattern); err != nil { - return errors.Fatalf("--include-xattr: %s", err) - } - - res.XattrSelectFilter = func(xattrName string) bool { - shouldInclude, _ := filter.IncludeByPattern(opts.IncludeXattrPattern, Warnf)(xattrName) - return shouldInclude - } + res.XattrSelectFilter, err = getXattrSelectFilter(opts) + if err != nil { + return err } if !gopts.JSON { @@ -295,3 +268,38 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, return nil } + +func getXattrSelectFilter(opts RestoreOptions) (func(xattrName string) bool, error) { + hasXattrExcludes := len(opts.ExcludeXattrPattern) > 0 + hasXattrIncludes := len(opts.IncludeXattrPattern) > 0 + + if hasXattrExcludes && hasXattrIncludes { + return nil, errors.Fatal("exclude and include xattr patterns are mutually exclusive") + } + + if hasXattrExcludes { + if err := filter.ValidatePatterns(opts.ExcludeXattrPattern); err != nil { + return nil, errors.Fatalf("--exclude-xattr: %s", err) + } + + return func(xattrName string) bool { + shouldReject := filter.RejectByPattern(opts.ExcludeXattrPattern, Warnf)(xattrName) + return !shouldReject + }, nil + } + + if hasXattrIncludes { + // User has either input include xattr pattern(s) or we're using our default include pattern + if err := filter.ValidatePatterns(opts.IncludeXattrPattern); err != nil { + return nil, errors.Fatalf("--include-xattr: %s", err) + } + + return func(xattrName string) bool { + shouldInclude, _ := filter.IncludeByPattern(opts.IncludeXattrPattern, Warnf)(xattrName) + return shouldInclude + }, nil + } + + // no includes or excludes, set default of including all xattrs + return func(_ string) bool { return true }, nil +} diff --git a/internal/fs/node_noxattr.go b/internal/fs/node_noxattr.go index 726827f6294..2dbd72c9d2d 100644 --- a/internal/fs/node_noxattr.go +++ b/internal/fs/node_noxattr.go @@ -8,7 +8,7 @@ import ( ) // nodeRestoreExtendedAttributes is a no-op -func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { +func nodeRestoreExtendedAttributes(_ *restic.Node, _ string, _ func(xattrName string) bool) error { return nil } diff --git a/internal/fs/node_test.go b/internal/fs/node_test.go index b67295f68bc..490ab7e409e 100644 --- a/internal/fs/node_test.go +++ b/internal/fs/node_test.go @@ -217,8 +217,9 @@ func TestNodeRestoreAt(t *testing.T) { nodePath = filepath.Join(tempdir, test.Name) } rtest.OK(t, NodeCreateAt(&test, nodePath)) + // Restore metadata, restoring all xattrs rtest.OK(t, NodeRestoreMetadata(&test, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) }, - func(_ string) bool { return true } /* restore all xattrs */)) + func(_ string) bool { return true })) fs := &Local{} meta, err := fs.OpenFile(nodePath, O_NOFOLLOW, true) diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index 74cf6c0e5c3..df0a7ea651f 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -69,15 +69,20 @@ func utimesNano(path string, atime, mtime int64, _ restic.NodeType) error { } // restore extended attributes for windows -func nodeRestoreExtendedAttributes(node *restic.Node, path string) (err error) { +func nodeRestoreExtendedAttributes(node *restic.Node, path string, xattrSelectFilter func(xattrName string) bool) error { count := len(node.ExtendedAttributes) if count > 0 { - eas := make([]extendedAttribute, count) - for i, attr := range node.ExtendedAttributes { - eas[i] = extendedAttribute{Name: attr.Name, Value: attr.Value} + eas := []extendedAttribute{} + for _, attr := range node.ExtendedAttributes { + // Filter for xattrs we want to include/exclude + if xattrSelectFilter(attr.Name) { + eas = append(eas, extendedAttribute{Name: attr.Name, Value: attr.Value}) + } } - if errExt := restoreExtendedAttributes(node.Type, path, eas); errExt != nil { - return errExt + if len(eas) > 0 { + if errExt := restoreExtendedAttributes(node.Type, path, eas); errExt != nil { + return errExt + } } } return nil diff --git a/internal/fs/node_windows_test.go b/internal/fs/node_windows_test.go index f75df54d3c3..458a7bcb179 100644 --- a/internal/fs/node_windows_test.go +++ b/internal/fs/node_windows_test.go @@ -218,7 +218,7 @@ func restoreAndGetNode(t *testing.T, tempDir string, testNode *restic.Node, warn // If warning is not expected, this code should not get triggered. test.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", testPath, msg)) } - }) + }, func(_ string) bool { return true }) test.OK(t, errors.Wrapf(err, "Failed to restore metadata for: %s", testPath)) fs := &Local{} diff --git a/internal/fs/node_xattr_all_test.go b/internal/fs/node_xattr_all_test.go index 469f140d7d9..65ac5c5806a 100644 --- a/internal/fs/node_xattr_all_test.go +++ b/internal/fs/node_xattr_all_test.go @@ -26,7 +26,8 @@ func setAndVerifyXattr(t *testing.T, file string, attrs []restic.ExtendedAttribu Type: restic.NodeTypeFile, ExtendedAttributes: attrs, } - rtest.OK(t, nodeRestoreExtendedAttributes(node, file, func(_ string) bool { return true } /*restore all xattrs*/)) + /* restore all xattrs */ + rtest.OK(t, nodeRestoreExtendedAttributes(node, file, func(_ string) bool { return true })) nodeActual := &restic.Node{ Type: restic.NodeTypeFile, diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index 536958d4f42..cce175ebc1a 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -99,12 +99,13 @@ func (c *OverwriteBehavior) Type() string { // NewRestorer creates a restorer preloaded with the content from the snapshot id. func NewRestorer(repo restic.Repository, sn *restic.Snapshot, opts Options) *Restorer { r := &Restorer{ - repo: repo, - opts: opts, - fileList: make(map[string]bool), - Error: restorerAbortOnAllErrors, - SelectFilter: func(string, bool) (bool, bool) { return true, true }, - sn: sn, + repo: repo, + opts: opts, + fileList: make(map[string]bool), + Error: restorerAbortOnAllErrors, + SelectFilter: func(string, bool) (bool, bool) { return true, true }, + XattrSelectFilter: func(string) bool { return true }, + sn: sn, } return r From 24422e20a6abb804f0c132137730285f01026ef4 Mon Sep 17 00:00:00 2001 From: Tesshu Flower Date: Mon, 2 Dec 2024 19:38:43 -0500 Subject: [PATCH 855/893] restore: xattr restore filter tests Signed-off-by: Tesshu Flower --- internal/fs/node_xattr_all_test.go | 138 +++++++++++++++++++++++++++++ 1 file changed, 138 insertions(+) diff --git a/internal/fs/node_xattr_all_test.go b/internal/fs/node_xattr_all_test.go index 65ac5c5806a..cf3738722bd 100644 --- a/internal/fs/node_xattr_all_test.go +++ b/internal/fs/node_xattr_all_test.go @@ -4,12 +4,14 @@ package fs import ( + "bytes" "os" "path/filepath" "runtime" "strings" "testing" + "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -37,6 +39,56 @@ func setAndVerifyXattr(t *testing.T, file string, attrs []restic.ExtendedAttribu rtest.Assert(t, nodeActual.Equals(*node), "xattr mismatch got %v expected %v", nodeActual.ExtendedAttributes, node.ExtendedAttributes) } +func setAndVerifyXattrWithSelectFilter(t *testing.T, file string, testAttr []testXattrToRestore, xattrSelectFilter func(_ string) bool) { + attrs := make([]restic.ExtendedAttribute, len(testAttr)) + for i := range testAttr { + attrs[i] = testAttr[i].xattr + } + + if runtime.GOOS == "windows" { + // windows seems to convert the xattr name to upper case + for i := range attrs { + attrs[i].Name = strings.ToUpper(attrs[i].Name) + } + } + + node := &restic.Node{ + Type: restic.NodeTypeFile, + ExtendedAttributes: attrs, + } + + rtest.OK(t, nodeRestoreExtendedAttributes(node, file, xattrSelectFilter)) + + nodeActual := &restic.Node{ + Type: restic.NodeTypeFile, + } + rtest.OK(t, nodeFillExtendedAttributes(nodeActual, file, false)) + + // Check nodeActual to make sure only xattrs we expect are there + for _, testAttr := range testAttr { + xattrFound := false + xattrRestored := false + for _, restoredAttr := range nodeActual.ExtendedAttributes { + if restoredAttr.Name == testAttr.xattr.Name { + xattrFound = true + xattrRestored = bytes.Equal(restoredAttr.Value, testAttr.xattr.Value) + break + } + } + if testAttr.shouldRestore { + rtest.Assert(t, xattrFound, "xattr %s not restored", testAttr.xattr.Name) + rtest.Assert(t, xattrRestored, "xattr %v value not restored", testAttr.xattr) + } else { + rtest.Assert(t, !xattrFound, "xattr %v should not have been restored", testAttr.xattr) + } + } +} + +type testXattrToRestore struct { + xattr restic.ExtendedAttribute + shouldRestore bool +} + func TestOverwriteXattr(t *testing.T) { dir := t.TempDir() file := filepath.Join(dir, "file") @@ -47,6 +99,10 @@ func TestOverwriteXattr(t *testing.T) { Name: "user.foo", Value: []byte("bar"), }, + { + Name: "abc.test", + Value: []byte("testxattr"), + }, }) setAndVerifyXattr(t, file, []restic.ExtendedAttribute{ @@ -56,3 +112,85 @@ func TestOverwriteXattr(t *testing.T) { }, }) } + +func TestOverwriteXattrWithSelectFilter(t *testing.T) { + dir := t.TempDir() + file := filepath.Join(dir, "file2") + rtest.OK(t, os.WriteFile(file, []byte("hello world"), 0o600)) + + noopWarnf := func(_ string, _ ...interface{}) {} + + // Set a filter as if the user passed in --include-xattr user.* + xattrSelectFilter1 := func(xattrName string) bool { + shouldInclude, _ := filter.IncludeByPattern([]string{"user.*"}, noopWarnf)(xattrName) + return shouldInclude + } + + setAndVerifyXattrWithSelectFilter(t, file, []testXattrToRestore{ + { + xattr: restic.ExtendedAttribute{ + Name: "user.foo", + Value: []byte("bar"), + }, + shouldRestore: true, + }, + { + xattr: restic.ExtendedAttribute{ + Name: "user.test", + Value: []byte("testxattr"), + }, + shouldRestore: true, + }, + { + xattr: restic.ExtendedAttribute{ + Name: "security.other", + Value: []byte("testing"), + }, + shouldRestore: false, + }, + }, xattrSelectFilter1) + + // Set a filter as if the user passed in --include-xattr user.* + xattrSelectFilter2 := func(xattrName string) bool { + shouldInclude, _ := filter.IncludeByPattern([]string{"user.o*", "user.comm*"}, noopWarnf)(xattrName) + return shouldInclude + } + + setAndVerifyXattrWithSelectFilter(t, file, []testXattrToRestore{ + { + xattr: restic.ExtendedAttribute{ + Name: "user.other", + Value: []byte("some"), + }, + shouldRestore: true, + }, + { + xattr: restic.ExtendedAttribute{ + Name: "security.other", + Value: []byte("testing"), + }, + shouldRestore: false, + }, + { + xattr: restic.ExtendedAttribute{ + Name: "user.open", + Value: []byte("door"), + }, + shouldRestore: true, + }, + { + xattr: restic.ExtendedAttribute{ + Name: "user.common", + Value: []byte("testing"), + }, + shouldRestore: true, + }, + { + xattr: restic.ExtendedAttribute{ + Name: "user.bad", + Value: []byte("dontincludeme"), + }, + shouldRestore: false, + }, + }, xattrSelectFilter2) +} From 3ac697d03d8460b774cadf133ef378174c598ea9 Mon Sep 17 00:00:00 2001 From: Tesshu Flower Date: Mon, 2 Dec 2024 23:33:15 -0500 Subject: [PATCH 856/893] linux default restore only user xattrs, doc update * On Linux restore only user.* xattrs by default * restore all for other OSs * Update docs and changelog about the new restore flags --exclude-xattr and --include-xattr Signed-off-by: Tesshu Flower --- changelog/unreleased/issue-5089 | 22 ++++++++++++++++++++++ cmd/restic/cmd_restore.go | 11 ++++++++++- doc/050_restore.rst | 16 ++++++++++++++++ 3 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/issue-5089 diff --git a/changelog/unreleased/issue-5089 b/changelog/unreleased/issue-5089 new file mode 100644 index 00000000000..51b5a679c3f --- /dev/null +++ b/changelog/unreleased/issue-5089 @@ -0,0 +1,22 @@ +Enhancement: Allow including or excluding extended file attributes +during restore. + +# Describe the problem in the past tense, the new behavior in the present +# tense. Mention the affected commands, backends, operating systems, etc. +# If the problem description just says that a feature was missing, then +# only explain the new behavior. +# Focus on user-facing behavior, not the implementation. +# Use "Restic now ..." instead of "We have changed ...". +# +Restic restore used to attempt to restore all extended file attributes. +Now two new command line flags are added to restore to control which +extended file attributes will be restored. + +The new flags are `--exclude-xattr` and `--include-xattr`. + +If the flags are not provided, restic will default to restoring +only `user` namespaced extended file attributes on Linux, and all +extended file attributes on other operating systems. + +https://github.com/restic/restic/issues/5089 +https://github.com/restic/restic/pull/5129 diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 3bc6ac5c5b6..870d496c94b 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -3,6 +3,7 @@ package main import ( "context" "path/filepath" + "runtime" "time" "github.com/restic/restic/internal/debug" @@ -300,6 +301,14 @@ func getXattrSelectFilter(opts RestoreOptions) (func(xattrName string) bool, err }, nil } - // no includes or excludes, set default of including all xattrs + // User has not specified any xattr includes or excludes + if runtime.GOOS == "linux" { + // For Linux, set default of including only user.* xattrs + return func(xattrName string) bool { + shouldInclude, _ := filter.IncludeByPattern([]string{"user.*"}, Warnf)(xattrName) + return shouldInclude + }, nil + } + // Not linux, default to including all xattrs return func(_ string) bool { return true }, nil } diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 9558ab1d4a0..4ca738a3fdc 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -88,6 +88,22 @@ disk space. Note that the exact location of the holes can differ from those in the original file, as their location is determined while restoring and is not stored explicitly. +Restoring extended file attributes +---------------------------------- + +By default, user namespaced extended attributes for files are restored on Linux, +and all extended attributes are restored for other operating systems. + +Use ``--exclude-xattr`` and ``--include-xattr`` to control which extended +attributes are restored for files in the snapshot. For example, to restore +user and security namespaced extended attributes for files: + +.. code-block:: console + + $ restic -r /srv/restic-repo restore 79766175 --target /tmp/restore-work --include-xattr user.* --include-xattr security.* + enter password for repository: + restoring to /tmp/restore-work + Restoring in-place ------------------ From cd84fe085368785cb2e4c8b2b8f3dfe67141dc3c Mon Sep 17 00:00:00 2001 From: Tesshu Flower Date: Fri, 10 Jan 2025 15:25:09 -0500 Subject: [PATCH 857/893] xattrs - restore all by default, doc/chglog update Signed-off-by: Tesshu Flower --- changelog/unreleased/issue-5089 | 15 +++------------ cmd/restic/cmd_restore.go | 11 +---------- doc/050_restore.rst | 5 ++--- 3 files changed, 6 insertions(+), 25 deletions(-) diff --git a/changelog/unreleased/issue-5089 b/changelog/unreleased/issue-5089 index 51b5a679c3f..43c5c83667d 100644 --- a/changelog/unreleased/issue-5089 +++ b/changelog/unreleased/issue-5089 @@ -1,22 +1,13 @@ -Enhancement: Allow including or excluding extended file attributes -during restore. +Enhancement: Allow including/excluding extended file attributes during restore -# Describe the problem in the past tense, the new behavior in the present -# tense. Mention the affected commands, backends, operating systems, etc. -# If the problem description just says that a feature was missing, then -# only explain the new behavior. -# Focus on user-facing behavior, not the implementation. -# Use "Restic now ..." instead of "We have changed ...". -# -Restic restore used to attempt to restore all extended file attributes. +Restic restore attempts to restore all extended file attributes. Now two new command line flags are added to restore to control which extended file attributes will be restored. The new flags are `--exclude-xattr` and `--include-xattr`. If the flags are not provided, restic will default to restoring -only `user` namespaced extended file attributes on Linux, and all -extended file attributes on other operating systems. +all extended file attributes. https://github.com/restic/restic/issues/5089 https://github.com/restic/restic/pull/5129 diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 870d496c94b..7a3b029daab 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -3,7 +3,6 @@ package main import ( "context" "path/filepath" - "runtime" "time" "github.com/restic/restic/internal/debug" @@ -301,14 +300,6 @@ func getXattrSelectFilter(opts RestoreOptions) (func(xattrName string) bool, err }, nil } - // User has not specified any xattr includes or excludes - if runtime.GOOS == "linux" { - // For Linux, set default of including only user.* xattrs - return func(xattrName string) bool { - shouldInclude, _ := filter.IncludeByPattern([]string{"user.*"}, Warnf)(xattrName) - return shouldInclude - }, nil - } - // Not linux, default to including all xattrs + // default to including all xattrs return func(_ string) bool { return true }, nil } diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 4ca738a3fdc..b37f3c4fb5a 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -91,10 +91,9 @@ stored explicitly. Restoring extended file attributes ---------------------------------- -By default, user namespaced extended attributes for files are restored on Linux, -and all extended attributes are restored for other operating systems. +By default, all extended attributes for files are restored. -Use ``--exclude-xattr`` and ``--include-xattr`` to control which extended +Use only ``--exclude-xattr`` or ``--include-xattr`` to control which extended attributes are restored for files in the snapshot. For example, to restore user and security namespaced extended attributes for files: From 44cef250778a2f57dbf39a1508d7d2f4b56c5f78 Mon Sep 17 00:00:00 2001 From: Tesshu Flower Date: Fri, 10 Jan 2025 21:12:03 -0500 Subject: [PATCH 858/893] remove bad test xattr Signed-off-by: Tesshu Flower --- internal/fs/node_xattr_all_test.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/internal/fs/node_xattr_all_test.go b/internal/fs/node_xattr_all_test.go index cf3738722bd..79fde63e130 100644 --- a/internal/fs/node_xattr_all_test.go +++ b/internal/fs/node_xattr_all_test.go @@ -99,10 +99,6 @@ func TestOverwriteXattr(t *testing.T) { Name: "user.foo", Value: []byte("bar"), }, - { - Name: "abc.test", - Value: []byte("testxattr"), - }, }) setAndVerifyXattr(t, file, []restic.ExtendedAttribute{ From 99646fdf6292080769fa907045de4c4d2ce4c25c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 11 Jan 2025 18:31:06 +0000 Subject: [PATCH 859/893] build(deps): bump golang.org/x/net from 0.30.0 to 0.34.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.30.0 to 0.34.0. - [Commits](https://github.com/golang/net/compare/v0.30.0...v0.34.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 14396a4c348..0164bf95470 100644 --- a/go.mod +++ b/go.mod @@ -26,13 +26,13 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.6.0 - golang.org/x/crypto v0.28.0 - golang.org/x/net v0.30.0 + golang.org/x/crypto v0.32.0 + golang.org/x/net v0.34.0 golang.org/x/oauth2 v0.23.0 - golang.org/x/sync v0.9.0 - golang.org/x/sys v0.28.0 - golang.org/x/term v0.25.0 - golang.org/x/text v0.20.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.29.0 + golang.org/x/term v0.28.0 + golang.org/x/text v0.21.0 golang.org/x/time v0.7.0 google.golang.org/api v0.204.0 ) diff --git a/go.sum b/go.sum index 9bf973326ab..132181b05ca 100644 --- a/go.sum +++ b/go.sum @@ -235,8 +235,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= @@ -261,8 +261,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -272,8 +272,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -292,16 +292,16 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -310,8 +310,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From da71e77b289c74f1307a770e5036cd58289b16ba Mon Sep 17 00:00:00 2001 From: Michael Wildman Date: Thu, 8 Aug 2024 22:48:03 +1200 Subject: [PATCH 860/893] backup: allow excluding online-only cloud files --- changelog/unreleased/issue-3697 | 12 +++++ cmd/restic/cmd_backup.go | 13 ++++++ doc/040_backup.rst | 3 +- internal/archiver/exclude.go | 18 +++++++ internal/fs/stat_bsd.go | 5 ++ internal/fs/stat_unix.go | 5 ++ internal/fs/stat_windows.go | 19 ++++++++ internal/fs/stat_windows_test.go | 80 ++++++++++++++++++++++++++++++++ 8 files changed, 154 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/issue-3697 create mode 100644 internal/fs/stat_windows_test.go diff --git a/changelog/unreleased/issue-3697 b/changelog/unreleased/issue-3697 new file mode 100644 index 00000000000..514f9d70864 --- /dev/null +++ b/changelog/unreleased/issue-3697 @@ -0,0 +1,12 @@ +Enhancement: Allow excluding online-only cloud files (e.g. OneDrive) + +Restic treated OneDrive Files On-Demand as though they were regular files +for the purpose of backup which caused issues with VSS, could make backup +incredibly slow (as OneDrive attempted to download files), or could fill +the source disk (e.g. 1TB of files in OneDrive on a 500GB disk). +Restic now allows the user to exclude these files when backing up with +the `--exclude-cloud-files` switch. + +https://github.com/restic/restic/issues/3697 +https://github.com/restic/restic/issues/4935 +https://github.com/restic/restic/pull/4990 \ No newline at end of file diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index b7eed13184c..42908557ed9 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -77,6 +77,7 @@ type BackupOptions struct { ExcludeIfPresent []string ExcludeCaches bool ExcludeLargerThan string + ExcludeCloudFiles bool Stdin bool StdinFilename string StdinCommand bool @@ -140,6 +141,7 @@ func init() { f.BoolVar(&backupOptions.NoScan, "no-scan", false, "do not run scanner to estimate size of backup") if runtime.GOOS == "windows" { f.BoolVar(&backupOptions.UseFsSnapshot, "use-fs-snapshot", false, "use filesystem snapshot where possible (currently only Windows VSS)") + f.BoolVar(&backupOptions.ExcludeCloudFiles, "exclude-cloud-files", false, "excludes online-only cloud files (such as OneDrive Files On-Demand)") } f.BoolVar(&backupOptions.SkipIfUnchanged, "skip-if-unchanged", false, "skip snapshot creation if identical to parent snapshot") @@ -347,6 +349,17 @@ func collectRejectFuncs(opts BackupOptions, targets []string, fs fs.FS) (funcs [ funcs = append(funcs, f) } + if opts.ExcludeCloudFiles && !opts.Stdin && !opts.StdinCommand { + if runtime.GOOS != "windows" { + return nil, errors.Fatalf("exclude-cloud-files is only supported on Windows") + } + f, err := archiver.RejectCloudFiles(Warnf) + if err != nil { + return nil, err + } + funcs = append(funcs, f) + } + if opts.ExcludeCaches { opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55") } diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 696b235cce6..a30d80402be 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -297,7 +297,8 @@ the exclude options are: - ``--exclude-file`` Specified one or more times to exclude items listed in a given file - ``--iexclude-file`` Same as ``exclude-file`` but ignores cases like in ``--iexclude`` - ``--exclude-if-present foo`` Specified one or more times to exclude a folder's content if it contains a file called ``foo`` (optionally having a given header, no wildcards for the file name supported) -- ``--exclude-larger-than size`` Specified once to excludes files larger than the given size +- ``--exclude-larger-than size`` Specified once to exclude files larger than the given size +- ``--exclude-cloud-files`` Specified once to exclude online-only cloud files (such as OneDrive Files On-Demand), currently only supported on Windows Please see ``restic help backup`` for more specific information about each exclude option. diff --git a/internal/archiver/exclude.go b/internal/archiver/exclude.go index 6db62aa2025..c7dff0acb1a 100644 --- a/internal/archiver/exclude.go +++ b/internal/archiver/exclude.go @@ -316,3 +316,21 @@ func RejectBySize(maxSize int64) (RejectFunc, error) { return false }, nil } + +// RejectCloudFiles returns a func which rejects files which are online-only cloud files +func RejectCloudFiles(warnf func(msg string, args ...interface{})) (RejectFunc, error) { + return func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { + recall, err := fi.RecallOnDataAccess() + if err != nil { + warnf("item %v: error checking online-only status: %v", item, err) + return false + } + + if recall { + debug.Log("rejecting online-only cloud file %s", item) + return true + } + + return false + }, nil +} diff --git a/internal/fs/stat_bsd.go b/internal/fs/stat_bsd.go index 16506415306..95238be777c 100644 --- a/internal/fs/stat_bsd.go +++ b/internal/fs/stat_bsd.go @@ -32,3 +32,8 @@ func extendedStat(fi os.FileInfo) *ExtendedFileInfo { ChangeTime: time.Unix(s.Ctimespec.Unix()), } } + +// RecallOnDataAccess checks windows-specific attributes to determine if a file is a cloud-only placeholder. +func (*ExtendedFileInfo) RecallOnDataAccess() (bool, error) { + return false, nil +} diff --git a/internal/fs/stat_unix.go b/internal/fs/stat_unix.go index 723ac8b1978..70124658f44 100644 --- a/internal/fs/stat_unix.go +++ b/internal/fs/stat_unix.go @@ -32,3 +32,8 @@ func extendedStat(fi os.FileInfo) *ExtendedFileInfo { ChangeTime: time.Unix(s.Ctim.Unix()), } } + +// RecallOnDataAccess checks windows-specific attributes to determine if a file is a cloud-only placeholder. +func (*ExtendedFileInfo) RecallOnDataAccess() (bool, error) { + return false, nil +} diff --git a/internal/fs/stat_windows.go b/internal/fs/stat_windows.go index a2dfa5f6d5d..a62ddf87fd5 100644 --- a/internal/fs/stat_windows.go +++ b/internal/fs/stat_windows.go @@ -8,6 +8,8 @@ import ( "os" "syscall" "time" + + "golang.org/x/sys/windows" ) // extendedStat extracts info into an ExtendedFileInfo for Windows. @@ -36,3 +38,20 @@ func extendedStat(fi os.FileInfo) *ExtendedFileInfo { return &extFI } + +// RecallOnDataAccess checks if a file is available locally on the disk or if the file is +// just a placeholder which must be downloaded from a remote server. This is typically used +// in cloud syncing services (e.g. OneDrive) to prevent downloading files from cloud storage +// until they are accessed. +func (fi *ExtendedFileInfo) RecallOnDataAccess() (bool, error) { + attrs, ok := fi.sys.(*syscall.Win32FileAttributeData) + if !ok { + return false, fmt.Errorf("could not determine file attributes: %s", fi.Name) + } + + if attrs.FileAttributes&windows.FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS > 0 { + return true, nil + } + + return false, nil +} diff --git a/internal/fs/stat_windows_test.go b/internal/fs/stat_windows_test.go new file mode 100644 index 00000000000..4f258d836c1 --- /dev/null +++ b/internal/fs/stat_windows_test.go @@ -0,0 +1,80 @@ +package fs_test + +import ( + iofs "io/fs" + "os" + "path/filepath" + "syscall" + "testing" + "time" + + "github.com/restic/restic/internal/fs" + rtest "github.com/restic/restic/internal/test" + "golang.org/x/sys/windows" +) + +func TestRecallOnDataAccessRealFile(t *testing.T) { + // create a temp file for testing + tempdir := rtest.TempDir(t) + filename := filepath.Join(tempdir, "regular-file") + err := os.WriteFile(filename, []byte("foobar"), 0640) + rtest.OK(t, err) + + fi, err := os.Stat(filename) + rtest.OK(t, err) + + xs := fs.ExtendedStat(fi) + + // ensure we can check attrs without error + recall, err := xs.RecallOnDataAccess() + rtest.Assert(t, err == nil, "err should be nil", err) + rtest.Assert(t, recall == false, "RecallOnDataAccess should be false") +} + +// mockFileInfo implements os.FileInfo for mocking file attributes +type mockFileInfo struct { + FileAttributes uint32 +} + +func (m mockFileInfo) IsDir() bool { + return false +} +func (m mockFileInfo) ModTime() time.Time { + return time.Now() +} +func (m mockFileInfo) Mode() iofs.FileMode { + return 0 +} +func (m mockFileInfo) Name() string { + return "test" +} +func (m mockFileInfo) Size() int64 { + return 0 +} +func (m mockFileInfo) Sys() any { + return &syscall.Win32FileAttributeData{ + FileAttributes: m.FileAttributes, + } +} + +func TestRecallOnDataAccessMockCloudFile(t *testing.T) { + fi := mockFileInfo{ + FileAttributes: windows.FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS, + } + xs := fs.ExtendedStat(fi) + + recall, err := xs.RecallOnDataAccess() + rtest.Assert(t, err == nil, "err should be nil", err) + rtest.Assert(t, recall, "RecallOnDataAccess should be true") +} + +func TestRecallOnDataAccessMockRegularFile(t *testing.T) { + fi := mockFileInfo{ + FileAttributes: windows.FILE_ATTRIBUTE_ARCHIVE, + } + xs := fs.ExtendedStat(fi) + + recall, err := xs.RecallOnDataAccess() + rtest.Assert(t, err == nil, "err should be nil", err) + rtest.Assert(t, recall == false, "RecallOnDataAccess should be false") +} From 5bf0204caf74c31133d01f858a59daedac979385 Mon Sep 17 00:00:00 2001 From: Albin Vass Date: Mon, 13 Jan 2025 22:08:38 +0100 Subject: [PATCH 861/893] Do not skip root tree when searching for trees (#5153) This fixes an issue where restic cannot find the tree when trying to find the tree id of a snapshot. --------- Co-authored-by: Albin Vass Co-authored-by: Michael Eischer --- changelog/unreleased/pull-5153 | 6 ++++ cmd/restic/cmd_find.go | 45 +++++++++++++++---------- cmd/restic/cmd_find_integration_test.go | 22 +++++++----- 3 files changed, 48 insertions(+), 25 deletions(-) create mode 100644 changelog/unreleased/pull-5153 diff --git a/changelog/unreleased/pull-5153 b/changelog/unreleased/pull-5153 new file mode 100644 index 00000000000..e76d7f2fc84 --- /dev/null +++ b/changelog/unreleased/pull-5153 @@ -0,0 +1,6 @@ +Bugfix: Include root tree when searching using `find --tree` + +`restic find --tree` didn't find trees referenced by `restic snapshot --json`. +It now correctly includes the root tree when searching. + +https://github.com/restic/restic/pull/5153 diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 92e88203fbb..2e06fa00c60 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -336,6 +336,26 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error }}) } +func (f *Finder) findTree(treeID restic.ID, nodepath string) error { + found := false + if _, ok := f.treeIDs[treeID.String()]; ok { + found = true + } else if _, ok := f.treeIDs[treeID.Str()]; ok { + found = true + } + if found { + f.out.PrintObject("tree", treeID.String(), nodepath, "", f.out.newsn) + f.itemsFound++ + // Terminate if we have found all trees (and we are not + // looking for blobs) + if f.itemsFound >= len(f.treeIDs) && f.blobIDs == nil { + // Return an error to terminate the Walk + return errors.New("OK") + } + } + return nil +} + func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error { debug.Log("searching IDs in snapshot %s", sn.ID()) @@ -354,26 +374,17 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error { } if node == nil { + if nodepath == "/" { + if err := f.findTree(parentTreeID, "/"); err != nil { + return err + } + } return nil } - if node.Type == restic.NodeTypeDir && f.treeIDs != nil { - treeID := node.Subtree - found := false - if _, ok := f.treeIDs[treeID.Str()]; ok { - found = true - } else if _, ok := f.treeIDs[treeID.String()]; ok { - found = true - } - if found { - f.out.PrintObject("tree", treeID.String(), nodepath, "", sn) - f.itemsFound++ - // Terminate if we have found all trees (and we are not - // looking for blobs) - if f.itemsFound >= len(f.treeIDs) && f.blobIDs == nil { - // Return an error to terminate the Walk - return errors.New("OK") - } + if node.Type == "dir" && f.treeIDs != nil { + if err := f.findTree(*node.Subtree, nodepath); err != nil { + return err } } diff --git a/cmd/restic/cmd_find_integration_test.go b/cmd/restic/cmd_find_integration_test.go index dd8ab87fd7f..7e35cb141ac 100644 --- a/cmd/restic/cmd_find_integration_test.go +++ b/cmd/restic/cmd_find_integration_test.go @@ -10,11 +10,10 @@ import ( rtest "github.com/restic/restic/internal/test" ) -func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte { +func testRunFind(t testing.TB, wantJSON bool, opts FindOptions, gopts GlobalOptions, pattern string) []byte { buf, err := withCaptureStdout(func() error { gopts.JSON = wantJSON - opts := FindOptions{} return runFind(context.TODO(), opts, gopts, []string{pattern}) }) rtest.OK(t, err) @@ -31,14 +30,14 @@ func TestFind(t *testing.T) { testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) testRunCheck(t, env.gopts) - results := testRunFind(t, false, env.gopts, "unexistingfile") + results := testRunFind(t, false, FindOptions{}, env.gopts, "unexistingfile") rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile) - results = testRunFind(t, false, env.gopts, "testfile") + results = testRunFind(t, false, FindOptions{}, env.gopts, "testfile") lines := strings.Split(string(results), "\n") rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile) - results = testRunFind(t, false, env.gopts, "testfile*") + results = testRunFind(t, false, FindOptions{}, env.gopts, "testfile*") lines = strings.Split(string(results), "\n") rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile) } @@ -67,21 +66,28 @@ func TestFindJSON(t *testing.T) { testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) testRunCheck(t, env.gopts) + snapshot, _ := testRunSnapshots(t, env.gopts) - results := testRunFind(t, true, env.gopts, "unexistingfile") + results := testRunFind(t, true, FindOptions{}, env.gopts, "unexistingfile") matches := []testMatches{} rtest.OK(t, json.Unmarshal(results, &matches)) rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile) - results = testRunFind(t, true, env.gopts, "testfile") + results = testRunFind(t, true, FindOptions{}, env.gopts, "testfile") rtest.OK(t, json.Unmarshal(results, &matches)) rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile) rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile) rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile) - results = testRunFind(t, true, env.gopts, "testfile*") + results = testRunFind(t, true, FindOptions{}, env.gopts, "testfile*") rtest.OK(t, json.Unmarshal(results, &matches)) rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile) rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile) rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile) + + results = testRunFind(t, true, FindOptions{TreeID: true}, env.gopts, snapshot.Tree.String()) + rtest.OK(t, json.Unmarshal(results, &matches)) + rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", matches) + rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", matches[0].Matches) + rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile) } From 99e105eeb6bf7a4e5e2aac44f1dab7d66c0da35a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 1 Dec 2024 12:19:16 +0100 Subject: [PATCH 862/893] repository: restrict SaveUnpacked and RemoveUnpacked Those methods now only allow modifying snapshots. Internal data types used by the repository are now read-only. The repository-internal code can bypass the restrictions by wrapping the repository in an `internalRepository` type. The restriction itself is implemented by using a new datatype WriteableFileType in the SaveUnpacked and RemoveUnpacked methods. This statically ensures that code cannot bypass the access restrictions. The test changes are somewhat noisy as some of them modify repository internals and therefore require some way to bypass the access restrictions. This works by capturing an `internalRepository` or `Backend` when creating the Repository using a test helper function. --- cmd/restic/cmd_forget.go | 2 +- cmd/restic/cmd_recover.go | 2 +- cmd/restic/cmd_rewrite.go | 4 +- cmd/restic/cmd_tag.go | 2 +- cmd/restic/cmd_unlock.go | 6 +- cmd/restic/integration_helpers_test.go | 19 ++- internal/archiver/archiver.go | 2 +- internal/checker/checker_test.go | 6 +- internal/migrations/upgrade_repo_v2_test.go | 2 +- internal/repository/fuzz_test.go | 2 +- internal/repository/index/index.go | 2 +- internal/repository/index/master_index.go | 10 +- .../repository/index/master_index_test.go | 26 ++--- internal/repository/lock.go | 51 +++++++-- internal/repository/lock_test.go | 81 +++++++++++++ internal/repository/packer_manager.go | 2 +- internal/repository/prune.go | 10 +- internal/repository/prune_test.go | 2 +- internal/repository/repack_test.go | 14 +-- internal/repository/repair_index.go | 2 +- internal/repository/repair_index_test.go | 2 +- internal/repository/repair_pack.go | 2 +- internal/repository/repository.go | 30 ++++- .../repository/repository_internal_test.go | 48 ++++++++ internal/repository/repository_test.go | 57 ++------- internal/repository/testing.go | 24 ++-- internal/repository/upgrade_repo.go | 2 +- internal/repository/upgrade_repo_test.go | 2 +- internal/restic/config.go | 2 +- internal/restic/json.go | 2 +- internal/restic/lock.go | 61 ++-------- internal/restic/lock_test.go | 108 +++--------------- internal/restic/parallel.go | 2 +- internal/restic/repository.go | 48 +++++--- internal/restic/snapshot.go | 4 +- internal/restic/snapshot_test.go | 2 +- internal/restic/tree_test.go | 4 +- 37 files changed, 353 insertions(+), 294 deletions(-) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index f770dc36569..f9ae85cd158 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -304,7 +304,7 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption if len(removeSnIDs) > 0 { if !opts.DryRun { bar := printer.NewCounter("files deleted") - err := restic.ParallelRemove(ctx, repo, removeSnIDs, restic.SnapshotFile, func(id restic.ID, err error) error { + err := restic.ParallelRemove(ctx, repo, removeSnIDs, restic.WriteableSnapshotFile, func(id restic.ID, err error) error { if err != nil { printer.E("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id) } else { diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index 133f779783a..78fc2d14859 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -168,7 +168,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { } -func createSnapshot(ctx context.Context, name, hostname string, tags []string, repo restic.SaverUnpacked, tree *restic.ID) error { +func createSnapshot(ctx context.Context, name, hostname string, tags []string, repo restic.SaverUnpacked[restic.WriteableFileType], tree *restic.ID) error { sn, err := restic.NewSnapshot([]string{name}, tags, hostname, time.Now()) if err != nil { return errors.Fatalf("unable to save snapshot: %v", err) diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index b62d1ed9564..707f8af9baa 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -194,7 +194,7 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r if dryRun { Verbosef("would delete empty snapshot\n") } else { - if err = repo.RemoveUnpacked(ctx, restic.SnapshotFile, *sn.ID()); err != nil { + if err = repo.RemoveUnpacked(ctx, restic.WriteableSnapshotFile, *sn.ID()); err != nil { return false, err } debug.Log("removed empty snapshot %v", sn.ID()) @@ -253,7 +253,7 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r Verbosef("saved new snapshot %v\n", id.Str()) if forget { - if err = repo.RemoveUnpacked(ctx, restic.SnapshotFile, *sn.ID()); err != nil { + if err = repo.RemoveUnpacked(ctx, restic.WriteableSnapshotFile, *sn.ID()); err != nil { return false, err } debug.Log("removed old snapshot %v", sn.ID()) diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index 8a2a83678c7..539a0cc59ec 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -90,7 +90,7 @@ func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Sna debug.Log("new snapshot saved as %v", id) // Remove the old snapshot. - if err = repo.RemoveUnpacked(ctx, restic.SnapshotFile, *sn.ID()); err != nil { + if err = repo.RemoveUnpacked(ctx, restic.WriteableSnapshotFile, *sn.ID()); err != nil { return false, err } diff --git a/cmd/restic/cmd_unlock.go b/cmd/restic/cmd_unlock.go index d87cde065e6..825eb815c10 100644 --- a/cmd/restic/cmd_unlock.go +++ b/cmd/restic/cmd_unlock.go @@ -3,7 +3,7 @@ package main import ( "context" - "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/repository" "github.com/spf13/cobra" ) @@ -45,9 +45,9 @@ func runUnlock(ctx context.Context, opts UnlockOptions, gopts GlobalOptions) err return err } - fn := restic.RemoveStaleLocks + fn := repository.RemoveStaleLocks if opts.RemoveAll { - fn = restic.RemoveAllLocks + fn = repository.RemoveAllLocks } processed, err := fn(ctx, repo) diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index 8ae3bb78a61..21944a9ce3a 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -275,17 +275,30 @@ func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet { return treePacks } +func captureBackend(gopts *GlobalOptions) func() backend.Backend { + var be backend.Backend + gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { + be = r + return r, nil + } + return func() backend.Backend { + return be + } +} + func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) { - ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) + be := captureBackend(&gopts) + ctx, _, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) rtest.OK(t, err) defer unlock() for id := range remove { - rtest.OK(t, r.RemoveUnpacked(ctx, restic.PackFile, id)) + rtest.OK(t, be().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()})) } } func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, removeTreePacks bool) { + be := captureBackend(&gopts) ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) rtest.OK(t, err) defer unlock() @@ -305,7 +318,7 @@ func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, rem if treePacks.Has(id) != removeTreePacks || keep.Has(id) { return nil } - return r.RemoveUnpacked(ctx, restic.PackFile, id) + return be().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}) })) } diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 55b6ee4b3f2..0b71cbacf0d 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -74,7 +74,7 @@ type ToNoder interface { type archiverRepo interface { restic.Loader restic.BlobSaver - restic.SaverUnpacked + restic.SaverUnpacked[restic.WriteableFileType] Config() restic.Config StartPackUploader(ctx context.Context, wg *errgroup.Group) diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index 0e2125bba43..92bbb1da660 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -145,11 +145,11 @@ func TestUnreferencedPack(t *testing.T) { } func TestUnreferencedBlobs(t *testing.T) { - repo, _, cleanup := repository.TestFromFixture(t, checkerTestData) + repo, be, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() snapshotID := restic.TestParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02") - test.OK(t, repo.RemoveUnpacked(context.TODO(), restic.SnapshotFile, snapshotID)) + test.OK(t, be.Remove(context.TODO(), backend.Handle{Type: restic.SnapshotFile, Name: snapshotID.String()})) unusedBlobsBySnapshot := restic.BlobHandles{ restic.TestParseHandle("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849", restic.DataBlob), @@ -334,7 +334,7 @@ func (b *errorOnceBackend) Load(ctx context.Context, h backend.Handle, length in } func TestCheckerModifiedData(t *testing.T) { - repo, be := repository.TestRepositoryWithVersion(t, 0) + repo, _, be := repository.TestRepositoryWithVersion(t, 0) sn := archiver.TestSnapshot(t, repo, ".", nil) t.Logf("archived as %v", sn.ID().Str()) diff --git a/internal/migrations/upgrade_repo_v2_test.go b/internal/migrations/upgrade_repo_v2_test.go index 44a39b6c56c..1f4cba4e57a 100644 --- a/internal/migrations/upgrade_repo_v2_test.go +++ b/internal/migrations/upgrade_repo_v2_test.go @@ -8,7 +8,7 @@ import ( ) func TestUpgradeRepoV2(t *testing.T) { - repo, _ := repository.TestRepositoryWithVersion(t, 1) + repo, _, _ := repository.TestRepositoryWithVersion(t, 1) if repo.Config().Version != 1 { t.Fatal("test repo has wrong version") } diff --git a/internal/repository/fuzz_test.go b/internal/repository/fuzz_test.go index f1fb061575a..c20f9a710ad 100644 --- a/internal/repository/fuzz_test.go +++ b/internal/repository/fuzz_test.go @@ -18,7 +18,7 @@ func FuzzSaveLoadBlob(f *testing.F) { } id := restic.Hash(blob) - repo, _ := TestRepositoryWithVersion(t, 2) + repo, _, _ := TestRepositoryWithVersion(t, 2) var wg errgroup.Group repo.StartPackUploader(context.TODO(), &wg) diff --git a/internal/repository/index/index.go b/internal/repository/index/index.go index 14e4543bd41..c62c1c462df 100644 --- a/internal/repository/index/index.go +++ b/internal/repository/index/index.go @@ -351,7 +351,7 @@ func (idx *Index) Encode(w io.Writer) error { } // SaveIndex saves an index in the repository. -func (idx *Index) SaveIndex(ctx context.Context, repo restic.SaverUnpacked) (restic.ID, error) { +func (idx *Index) SaveIndex(ctx context.Context, repo restic.SaverUnpacked[restic.FileType]) (restic.ID, error) { buf := bytes.NewBuffer(nil) err := idx.Encode(buf) diff --git a/internal/repository/index/master_index.go b/internal/repository/index/master_index.go index ce9afcde43c..16923090b9d 100644 --- a/internal/repository/index/master_index.go +++ b/internal/repository/index/master_index.go @@ -321,7 +321,7 @@ type MasterIndexRewriteOpts struct { // This is used by repair index to only rewrite and delete the old indexes. // // Must not be called concurrently to any other MasterIndex operation. -func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, excludePacks restic.IDSet, oldIndexes restic.IDSet, extraObsolete restic.IDs, opts MasterIndexRewriteOpts) error { +func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked[restic.FileType], excludePacks restic.IDSet, oldIndexes restic.IDSet, extraObsolete restic.IDs, opts MasterIndexRewriteOpts) error { for _, idx := range mi.idx { if !idx.Final() { panic("internal error - index must be saved before calling MasterIndex.Rewrite") @@ -499,7 +499,7 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud // It is only intended for use by prune with the UnsafeRecovery option. // // Must not be called concurrently to any other MasterIndex operation. -func (mi *MasterIndex) SaveFallback(ctx context.Context, repo restic.SaverRemoverUnpacked, excludePacks restic.IDSet, p *progress.Counter) error { +func (mi *MasterIndex) SaveFallback(ctx context.Context, repo restic.SaverRemoverUnpacked[restic.FileType], excludePacks restic.IDSet, p *progress.Counter) error { p.SetMax(uint64(len(mi.Packs(excludePacks)))) mi.idxMutex.Lock() @@ -574,7 +574,7 @@ func (mi *MasterIndex) SaveFallback(ctx context.Context, repo restic.SaverRemove } // saveIndex saves all indexes in the backend. -func (mi *MasterIndex) saveIndex(ctx context.Context, r restic.SaverUnpacked, indexes ...*Index) error { +func (mi *MasterIndex) saveIndex(ctx context.Context, r restic.SaverUnpacked[restic.FileType], indexes ...*Index) error { for i, idx := range indexes { debug.Log("Saving index %d", i) @@ -590,12 +590,12 @@ func (mi *MasterIndex) saveIndex(ctx context.Context, r restic.SaverUnpacked, in } // SaveIndex saves all new indexes in the backend. -func (mi *MasterIndex) SaveIndex(ctx context.Context, r restic.SaverUnpacked) error { +func (mi *MasterIndex) SaveIndex(ctx context.Context, r restic.SaverUnpacked[restic.FileType]) error { return mi.saveIndex(ctx, r, mi.finalizeNotFinalIndexes()...) } // SaveFullIndex saves all full indexes in the backend. -func (mi *MasterIndex) SaveFullIndex(ctx context.Context, r restic.SaverUnpacked) error { +func (mi *MasterIndex) SaveFullIndex(ctx context.Context, r restic.SaverUnpacked[restic.FileType]) error { return mi.saveIndex(ctx, r, mi.finalizeFullIndexes()...) } diff --git a/internal/repository/index/master_index_test.go b/internal/repository/index/master_index_test.go index 23185962e7b..516ef045c28 100644 --- a/internal/repository/index/master_index_test.go +++ b/internal/repository/index/master_index_test.go @@ -346,13 +346,13 @@ var ( depth = 3 ) -func createFilledRepo(t testing.TB, snapshots int, version uint) restic.Repository { - repo, _ := repository.TestRepositoryWithVersion(t, version) +func createFilledRepo(t testing.TB, snapshots int, version uint) (restic.Repository, restic.Unpacked[restic.FileType]) { + repo, unpacked, _ := repository.TestRepositoryWithVersion(t, version) for i := 0; i < snapshots; i++ { restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth) } - return repo + return repo, unpacked } func TestIndexSave(t *testing.T) { @@ -362,15 +362,15 @@ func TestIndexSave(t *testing.T) { func testIndexSave(t *testing.T, version uint) { for _, test := range []struct { name string - saver func(idx *index.MasterIndex, repo restic.Repository) error + saver func(idx *index.MasterIndex, repo restic.Unpacked[restic.FileType]) error }{ - {"rewrite no-op", func(idx *index.MasterIndex, repo restic.Repository) error { + {"rewrite no-op", func(idx *index.MasterIndex, repo restic.Unpacked[restic.FileType]) error { return idx.Rewrite(context.TODO(), repo, nil, nil, nil, index.MasterIndexRewriteOpts{}) }}, - {"rewrite skip-all", func(idx *index.MasterIndex, repo restic.Repository) error { + {"rewrite skip-all", func(idx *index.MasterIndex, repo restic.Unpacked[restic.FileType]) error { return idx.Rewrite(context.TODO(), repo, nil, restic.NewIDSet(), nil, index.MasterIndexRewriteOpts{}) }}, - {"SaveFallback", func(idx *index.MasterIndex, repo restic.Repository) error { + {"SaveFallback", func(idx *index.MasterIndex, repo restic.Unpacked[restic.FileType]) error { err := restic.ParallelRemove(context.TODO(), repo, idx.IDs(), restic.IndexFile, nil, nil) if err != nil { return nil @@ -379,7 +379,7 @@ func testIndexSave(t *testing.T, version uint) { }}, } { t.Run(test.name, func(t *testing.T) { - repo := createFilledRepo(t, 3, version) + repo, unpacked := createFilledRepo(t, 3, version) idx := index.NewMasterIndex() rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) @@ -388,7 +388,7 @@ func testIndexSave(t *testing.T, version uint) { blobs[pb] = struct{}{} })) - rtest.OK(t, test.saver(idx, repo)) + rtest.OK(t, test.saver(idx, unpacked)) idx = index.NewMasterIndex() rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) @@ -411,7 +411,7 @@ func TestIndexSavePartial(t *testing.T) { } func testIndexSavePartial(t *testing.T, version uint) { - repo := createFilledRepo(t, 3, version) + repo, unpacked := createFilledRepo(t, 3, version) // capture blob list before adding fourth snapshot idx := index.NewMasterIndex() @@ -424,14 +424,14 @@ func testIndexSavePartial(t *testing.T, version uint) { // add+remove new snapshot and track its pack files packsBefore := listPacks(t, repo) sn := restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(4)*time.Second), depth) - rtest.OK(t, repo.RemoveUnpacked(context.TODO(), restic.SnapshotFile, *sn.ID())) + rtest.OK(t, repo.RemoveUnpacked(context.TODO(), restic.WriteableSnapshotFile, *sn.ID())) packsAfter := listPacks(t, repo) newPacks := packsAfter.Sub(packsBefore) // rewrite index and remove pack files of new snapshot idx = index.NewMasterIndex() rtest.OK(t, idx.Load(context.TODO(), repo, nil, nil)) - rtest.OK(t, idx.Rewrite(context.TODO(), repo, newPacks, nil, nil, index.MasterIndexRewriteOpts{})) + rtest.OK(t, idx.Rewrite(context.TODO(), unpacked, newPacks, nil, nil, index.MasterIndexRewriteOpts{})) // check blobs idx = index.NewMasterIndex() @@ -446,7 +446,7 @@ func testIndexSavePartial(t *testing.T, version uint) { rtest.Equals(t, 0, len(blobs), "saved index is missing blobs") // remove pack files to make check happy - rtest.OK(t, restic.ParallelRemove(context.TODO(), repo, newPacks, restic.PackFile, nil, nil)) + rtest.OK(t, restic.ParallelRemove(context.TODO(), unpacked, newPacks, restic.PackFile, nil, nil)) checker.TestCheckRepo(t, repo, false) } diff --git a/internal/repository/lock.go b/internal/repository/lock.go index fd46066d1f4..a5019523343 100644 --- a/internal/repository/lock.go +++ b/internal/repository/lock.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "sync" + "sync/atomic" "time" "github.com/restic/restic/internal/backend" @@ -42,13 +43,7 @@ func Lock(ctx context.Context, repo *Repository, exclusive bool, retryLock time. // Lock wraps the ctx such that it is cancelled when the repository is unlocked // cancelling the original context also stops the lock refresh -func (l *locker) Lock(ctx context.Context, repo *Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { - - lockFn := restic.NewLock - if exclusive { - lockFn = restic.NewExclusiveLock - } - +func (l *locker) Lock(ctx context.Context, r *Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { var lock *restic.Lock var err error @@ -56,9 +51,11 @@ func (l *locker) Lock(ctx context.Context, repo *Repository, exclusive bool, ret retryMessagePrinted := false retryTimeout := time.After(retryLock) + repo := &internalRepository{r} + retryLoop: for { - lock, err = lockFn(ctx, repo) + lock, err = restic.NewLock(ctx, repo, exclusive) if err != nil && restic.IsAlreadyLocked(err) { if !retryMessagePrinted { @@ -75,7 +72,7 @@ retryLoop: case <-retryTimeout: debug.Log("repo already locked, timeout expired") // Last lock attempt - lock, err = lockFn(ctx, repo) + lock, err = restic.NewLock(ctx, repo, exclusive) break retryLoop case <-retrySleepCh: retrySleep = minDuration(retrySleep*2, l.retrySleepMax) @@ -272,3 +269,39 @@ func (l *Unlocker) Unlock() { l.info.cancel() l.info.refreshWG.Wait() } + +// RemoveStaleLocks deletes all locks detected as stale from the repository. +func RemoveStaleLocks(ctx context.Context, repo *Repository) (uint, error) { + var processed uint + err := restic.ForAllLocks(ctx, repo, nil, func(id restic.ID, lock *restic.Lock, err error) error { + if err != nil { + // ignore locks that cannot be loaded + debug.Log("ignore lock %v: %v", id, err) + return nil + } + + if lock.Stale() { + err = (&internalRepository{repo}).RemoveUnpacked(ctx, restic.LockFile, id) + if err == nil { + processed++ + } + return err + } + + return nil + }) + return processed, err +} + +// RemoveAllLocks removes all locks forcefully. +func RemoveAllLocks(ctx context.Context, repo *Repository) (uint, error) { + var processed uint32 + err := restic.ParallelList(ctx, repo, restic.LockFile, repo.Connections(), func(ctx context.Context, id restic.ID, _ int64) error { + err := (&internalRepository{repo}).RemoveUnpacked(ctx, restic.LockFile, id) + if err == nil { + atomic.AddUint32(&processed, 1) + } + return err + }) + return uint(processed), err +} diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index a9ff369c206..c31221e429a 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -3,6 +3,7 @@ package repository import ( "context" "fmt" + "os" "runtime" "strings" "sync" @@ -301,3 +302,83 @@ func TestLockWaitSuccess(t *testing.T) { rtest.OK(t, err) lock.Unlock() } + +func createFakeLock(repo *Repository, t time.Time, pid int) (restic.ID, error) { + hostname, err := os.Hostname() + if err != nil { + return restic.ID{}, err + } + + newLock := &restic.Lock{Time: t, PID: pid, Hostname: hostname} + return restic.SaveJSONUnpacked(context.TODO(), &internalRepository{repo}, restic.LockFile, &newLock) +} + +func lockExists(repo restic.Lister, t testing.TB, lockID restic.ID) bool { + var exists bool + rtest.OK(t, repo.List(context.TODO(), restic.LockFile, func(id restic.ID, size int64) error { + if id == lockID { + exists = true + } + return nil + })) + + return exists +} + +func removeLock(repo *Repository, id restic.ID) error { + return (&internalRepository{repo}).RemoveUnpacked(context.TODO(), restic.LockFile, id) +} + +func TestLockWithStaleLock(t *testing.T) { + repo := TestRepository(t) + + id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) + rtest.OK(t, err) + + id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()) + rtest.OK(t, err) + + id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000) + rtest.OK(t, err) + + processed, err := RemoveStaleLocks(context.TODO(), repo) + rtest.OK(t, err) + + rtest.Assert(t, lockExists(repo, t, id1) == false, + "stale lock still exists after RemoveStaleLocks was called") + rtest.Assert(t, lockExists(repo, t, id2) == true, + "non-stale lock was removed by RemoveStaleLocks") + rtest.Assert(t, lockExists(repo, t, id3) == false, + "stale lock still exists after RemoveStaleLocks was called") + rtest.Assert(t, processed == 2, + "number of locks removed does not match: expected %d, got %d", + 2, processed) + + rtest.OK(t, removeLock(repo, id2)) +} + +func TestRemoveAllLocks(t *testing.T) { + repo := TestRepository(t) + + id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) + rtest.OK(t, err) + + id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()) + rtest.OK(t, err) + + id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000) + rtest.OK(t, err) + + processed, err := RemoveAllLocks(context.TODO(), repo) + rtest.OK(t, err) + + rtest.Assert(t, lockExists(repo, t, id1) == false, + "lock still exists after RemoveAllLocks was called") + rtest.Assert(t, lockExists(repo, t, id2) == false, + "lock still exists after RemoveAllLocks was called") + rtest.Assert(t, lockExists(repo, t, id3) == false, + "lock still exists after RemoveAllLocks was called") + rtest.Assert(t, processed == 3, + "number of locks removed does not match: expected %d, got %d", + 3, processed) +} diff --git a/internal/repository/packer_manager.go b/internal/repository/packer_manager.go index 731ad9a6ad0..9d53c911b24 100644 --- a/internal/repository/packer_manager.go +++ b/internal/repository/packer_manager.go @@ -190,5 +190,5 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *packe r.idx.StorePack(id, p.Packer.Blobs()) // Save index if full - return r.idx.SaveFullIndex(ctx, r) + return r.idx.SaveFullIndex(ctx, &internalRepository{r}) } diff --git a/internal/repository/prune.go b/internal/repository/prune.go index d5fdbba0725..3803b6f337a 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -544,7 +544,7 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) er // unreferenced packs can be safely deleted first if len(plan.removePacksFirst) != 0 { printer.P("deleting unreferenced packs\n") - _ = deleteFiles(ctx, true, repo, plan.removePacksFirst, restic.PackFile, printer) + _ = deleteFiles(ctx, true, &internalRepository{repo}, plan.removePacksFirst, restic.PackFile, printer) // forget unused data plan.removePacksFirst = nil } @@ -588,7 +588,7 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) er if plan.opts.UnsafeRecovery { printer.P("deleting index files\n") indexFiles := repo.idx.IDs() - err := deleteFiles(ctx, false, repo, indexFiles, restic.IndexFile, printer) + err := deleteFiles(ctx, false, &internalRepository{repo}, indexFiles, restic.IndexFile, printer) if err != nil { return errors.Fatalf("%s", err) } @@ -601,14 +601,14 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) er if len(plan.removePacks) != 0 { printer.P("removing %d old packs\n", len(plan.removePacks)) - _ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer) + _ = deleteFiles(ctx, true, &internalRepository{repo}, plan.removePacks, restic.PackFile, printer) } if ctx.Err() != nil { return ctx.Err() } if plan.opts.UnsafeRecovery { - err := repo.idx.SaveFallback(ctx, repo, plan.ignorePacks, printer.NewCounter("packs processed")) + err := repo.idx.SaveFallback(ctx, &internalRepository{repo}, plan.ignorePacks, printer.NewCounter("packs processed")) if err != nil { return errors.Fatalf("%s", err) } @@ -623,7 +623,7 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) er // deleteFiles deletes the given fileList of fileType in parallel // if ignoreError=true, it will print a warning if there was an error, else it will abort. -func deleteFiles(ctx context.Context, ignoreError bool, repo restic.RemoverUnpacked, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { +func deleteFiles(ctx context.Context, ignoreError bool, repo restic.RemoverUnpacked[restic.FileType], fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { bar := printer.NewCounter("files deleted") defer bar.Done() diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go index 94d0dfa94ee..cc569aa4333 100644 --- a/internal/repository/prune_test.go +++ b/internal/repository/prune_test.go @@ -20,7 +20,7 @@ func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { random := rand.New(rand.NewSource(seed)) t.Logf("rand initialized with seed %d", seed) - repo, be := repository.TestRepositoryWithVersion(t, 0) + repo, _, be := repository.TestRepositoryWithVersion(t, 0) createRandomBlobs(t, random, repo, 4, 0.5, true) createRandomBlobs(t, random, repo, 5, 0.5, true) keep, _ := selectBlobs(t, random, repo, 0.5) diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index 59bafe84cb7..0691cdbbb04 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -159,14 +159,14 @@ func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSe return packs } -func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs restic.BlobSet) { +func repack(t *testing.T, repo restic.Repository, be backend.Backend, packs restic.IDSet, blobs restic.BlobSet) { repackedBlobs, err := repository.Repack(context.TODO(), repo, repo, packs, blobs, nil) if err != nil { t.Fatal(err) } for id := range repackedBlobs { - err = repo.RemoveUnpacked(context.TODO(), restic.PackFile, id) + err = be.Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}) if err != nil { t.Fatal(err) } @@ -186,7 +186,7 @@ func TestRepack(t *testing.T) { } func testRepack(t *testing.T, version uint) { - repo, _ := repository.TestRepositoryWithVersion(t, version) + repo, _, be := repository.TestRepositoryWithVersion(t, version) seed := time.Now().UnixNano() random := rand.New(rand.NewSource(seed)) @@ -199,7 +199,7 @@ func testRepack(t *testing.T, version uint) { packsBefore := listPacks(t, repo) // Running repack on empty ID sets should not do anything at all. - repack(t, repo, nil, nil) + repack(t, repo, be, nil, nil) packsAfter := listPacks(t, repo) @@ -212,7 +212,7 @@ func testRepack(t *testing.T, version uint) { removePacks := findPacksForBlobs(t, repo, removeBlobs) - repack(t, repo, removePacks, keepBlobs) + repack(t, repo, be, removePacks, keepBlobs) rebuildAndReloadIndex(t, repo) packsAfter = listPacks(t, repo) @@ -261,8 +261,8 @@ func (r oneConnectionRepo) Connections() uint { } func testRepackCopy(t *testing.T, version uint) { - repo, _ := repository.TestRepositoryWithVersion(t, version) - dstRepo, _ := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) + dstRepo, _, _ := repository.TestRepositoryWithVersion(t, version) // test with minimal possible connection count repoWrapped := &oneConnectionRepo{repo} diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index bff7ec5da4f..cc08206d523 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -123,7 +123,7 @@ func rewriteIndexFiles(ctx context.Context, repo *Repository, removePacks restic printer.P("rebuilding index\n") bar := printer.NewCounter("indexes processed") - return repo.idx.Rewrite(ctx, repo, removePacks, oldIndexes, extraObsolete, index.MasterIndexRewriteOpts{ + return repo.idx.Rewrite(ctx, &internalRepository{repo}, removePacks, oldIndexes, extraObsolete, index.MasterIndexRewriteOpts{ SaveProgress: bar, DeleteProgress: func() *progress.Counter { return printer.NewCounter("old indexes deleted") diff --git a/internal/repository/repair_index_test.go b/internal/repository/repair_index_test.go index 3b0af4e224b..0fc89c79a65 100644 --- a/internal/repository/repair_index_test.go +++ b/internal/repository/repair_index_test.go @@ -23,7 +23,7 @@ func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, random := rand.New(rand.NewSource(seed)) t.Logf("rand initialized with seed %d", seed) - repo, be := repository.TestRepositoryWithVersion(t, 0) + repo, _, be := repository.TestRepositoryWithVersion(t, 0) createRandomBlobs(t, random, repo, 4, 0.5, true) createRandomBlobs(t, random, repo, 5, 0.5, true) indexes := listIndex(t, repo) diff --git a/internal/repository/repair_pack.go b/internal/repository/repair_pack.go index 811388cc9ad..a9f8413e477 100644 --- a/internal/repository/repair_pack.go +++ b/internal/repository/repair_pack.go @@ -65,7 +65,7 @@ func RepairPacks(ctx context.Context, repo *Repository, ids restic.IDSet, printe printer.P("removing salvaged pack files") // if we fail to delete the damaged pack files, then prune will remove them later on bar = printer.NewCounter("files deleted") - _ = restic.ParallelRemove(ctx, repo, ids, restic.PackFile, nil, bar) + _ = restic.ParallelRemove(ctx, &internalRepository{repo}, ids, restic.PackFile, nil, bar) bar.Done() return nil diff --git a/internal/repository/repository.go b/internal/repository/repository.go index d408e310570..119cb44ca9d 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -53,6 +53,11 @@ type Repository struct { dec *zstd.Decoder } +// internalRepository allows using SaveUnpacked and RemoveUnpacked with all FileTypes +type internalRepository struct { + *Repository +} + type Options struct { Compression CompressionMode PackSize uint @@ -446,7 +451,15 @@ func (r *Repository) decompressUnpacked(p []byte) ([]byte, error) { // SaveUnpacked encrypts data and stores it in the backend. Returned is the // storage hash. -func (r *Repository) SaveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (id restic.ID, err error) { +func (r *Repository) SaveUnpacked(ctx context.Context, t restic.WriteableFileType, buf []byte) (id restic.ID, err error) { + return r.saveUnpacked(ctx, t.ToFileType(), buf) +} + +func (r *internalRepository) SaveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (id restic.ID, err error) { + return r.Repository.saveUnpacked(ctx, t, buf) +} + +func (r *Repository) saveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (id restic.ID, err error) { p := buf if t != restic.ConfigFile { p, err = r.compressUnpacked(p) @@ -507,8 +520,15 @@ func (r *Repository) verifyUnpacked(buf []byte, t restic.FileType, expected []by return nil } -func (r *Repository) RemoveUnpacked(ctx context.Context, t restic.FileType, id restic.ID) error { - // TODO prevent everything except removing snapshots for non-repository code +func (r *Repository) RemoveUnpacked(ctx context.Context, t restic.WriteableFileType, id restic.ID) error { + return r.removeUnpacked(ctx, t.ToFileType(), id) +} + +func (r *internalRepository) RemoveUnpacked(ctx context.Context, t restic.FileType, id restic.ID) error { + return r.Repository.removeUnpacked(ctx, t, id) +} + +func (r *Repository) removeUnpacked(ctx context.Context, t restic.FileType, id restic.ID) error { return r.be.Remove(ctx, backend.Handle{Type: t, Name: id.String()}) } @@ -518,7 +538,7 @@ func (r *Repository) Flush(ctx context.Context) error { return err } - return r.idx.SaveIndex(ctx, r) + return r.idx.SaveIndex(ctx, &internalRepository{r}) } func (r *Repository) StartPackUploader(ctx context.Context, wg *errgroup.Group) { @@ -803,7 +823,7 @@ func (r *Repository) init(ctx context.Context, password string, cfg restic.Confi r.key = key.master r.keyID = key.ID() r.setConfig(cfg) - return restic.SaveConfig(ctx, r, cfg) + return restic.SaveConfig(ctx, &internalRepository{r}, cfg) } // Key returns the current master key. diff --git a/internal/repository/repository_internal_test.go b/internal/repository/repository_internal_test.go index 35082774c47..edec4aa482e 100644 --- a/internal/repository/repository_internal_test.go +++ b/internal/repository/repository_internal_test.go @@ -16,6 +16,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -84,6 +85,53 @@ func BenchmarkSortCachedPacksFirst(b *testing.B) { } } +func BenchmarkLoadIndex(b *testing.B) { + BenchmarkAllVersions(b, benchmarkLoadIndex) +} + +func benchmarkLoadIndex(b *testing.B, version uint) { + TestUseLowSecurityKDFParameters(b) + + repo, _, be := TestRepositoryWithVersion(b, version) + idx := index.NewIndex() + + for i := 0; i < 5000; i++ { + idx.StorePack(restic.NewRandomID(), []restic.Blob{ + { + BlobHandle: restic.NewRandomBlobHandle(), + Length: 1234, + Offset: 1235, + }, + }) + } + idx.Finalize() + + id, err := idx.SaveIndex(context.TODO(), &internalRepository{repo}) + rtest.OK(b, err) + + b.Logf("index saved as %v", id.Str()) + fi, err := be.Stat(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: id.String()}) + rtest.OK(b, err) + b.Logf("filesize is %v", fi.Size) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := loadIndex(context.TODO(), repo, id) + rtest.OK(b, err) + } +} + +// loadIndex loads the index id from backend and returns it. +func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (*index.Index, error) { + buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id) + if err != nil { + return nil, err + } + + return index.DecodeIndex(buf, id) +} + // buildPackfileWithoutHeader returns a manually built pack file without a header. func buildPackfileWithoutHeader(blobSizes []int, key *crypto.Key, compress bool) (blobs []restic.Blob, packfile []byte) { opts := []zstd.EOption{ diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 5a6897f8f44..1b0d47c8f46 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -43,7 +43,7 @@ func testSaveCalculateID(t *testing.T, version uint) { } func testSave(t *testing.T, version uint, calculateID bool) { - repo, _ := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) for _, size := range testSizes { data := make([]byte, size) @@ -86,7 +86,7 @@ func BenchmarkSaveAndEncrypt(t *testing.B) { } func benchmarkSaveAndEncrypt(t *testing.B, version uint) { - repo, _ := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) size := 4 << 20 // 4MiB data := make([]byte, size) @@ -112,7 +112,7 @@ func TestLoadBlob(t *testing.T) { } func testLoadBlob(t *testing.T, version uint) { - repo, _ := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) length := 1000000 buf := crypto.NewBlobBuffer(length) _, err := io.ReadFull(rnd, buf) @@ -168,7 +168,7 @@ func BenchmarkLoadBlob(b *testing.B) { } func benchmarkLoadBlob(b *testing.B, version uint) { - repo, _ := repository.TestRepositoryWithVersion(b, version) + repo, _, _ := repository.TestRepositoryWithVersion(b, version) length := 1000000 buf := crypto.NewBlobBuffer(length) _, err := io.ReadFull(rnd, buf) @@ -209,7 +209,7 @@ func BenchmarkLoadUnpacked(b *testing.B) { } func benchmarkLoadUnpacked(b *testing.B, version uint) { - repo, _ := repository.TestRepositoryWithVersion(b, version) + repo, _, _ := repository.TestRepositoryWithVersion(b, version) length := 1000000 buf := crypto.NewBlobBuffer(length) _, err := io.ReadFull(rnd, buf) @@ -217,7 +217,7 @@ func benchmarkLoadUnpacked(b *testing.B, version uint) { dataID := restic.Hash(buf) - storageID, err := repo.SaveUnpacked(context.TODO(), restic.PackFile, buf) + storageID, err := repo.SaveUnpacked(context.TODO(), restic.WriteableSnapshotFile, buf) rtest.OK(b, err) // rtest.OK(b, repo.Flush()) @@ -225,7 +225,7 @@ func benchmarkLoadUnpacked(b *testing.B, version uint) { b.SetBytes(int64(length)) for i := 0; i < b.N; i++ { - data, err := repo.LoadUnpacked(context.TODO(), restic.PackFile, storageID) + data, err := repo.LoadUnpacked(context.TODO(), restic.SnapshotFile, storageID) rtest.OK(b, err) // See comment in BenchmarkLoadBlob. @@ -262,7 +262,7 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (* } func TestRepositoryLoadUnpackedBroken(t *testing.T) { - repo, be := repository.TestRepositoryWithVersion(t, 0) + repo, _, be := repository.TestRepositoryWithVersion(t, 0) data := rtest.Random(23, 12345) id := restic.Hash(data) @@ -309,43 +309,6 @@ func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) { rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } -func BenchmarkLoadIndex(b *testing.B) { - repository.BenchmarkAllVersions(b, benchmarkLoadIndex) -} - -func benchmarkLoadIndex(b *testing.B, version uint) { - repository.TestUseLowSecurityKDFParameters(b) - - repo, be := repository.TestRepositoryWithVersion(b, version) - idx := index.NewIndex() - - for i := 0; i < 5000; i++ { - idx.StorePack(restic.NewRandomID(), []restic.Blob{ - { - BlobHandle: restic.NewRandomBlobHandle(), - Length: 1234, - Offset: 1235, - }, - }) - } - idx.Finalize() - - id, err := idx.SaveIndex(context.TODO(), repo) - rtest.OK(b, err) - - b.Logf("index saved as %v", id.Str()) - fi, err := be.Stat(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: id.String()}) - rtest.OK(b, err) - b.Logf("filesize is %v", fi.Size) - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, err := loadIndex(context.TODO(), repo, id) - rtest.OK(b, err) - } -} - // saveRandomDataBlobs generates random data blobs and saves them to the repository. func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax int) { var wg errgroup.Group @@ -368,7 +331,7 @@ func TestRepositoryIncrementalIndex(t *testing.T) { } func testRepositoryIncrementalIndex(t *testing.T, version uint) { - repo, _ := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) index.IndexFull = func(*index.Index) bool { return true } @@ -453,7 +416,7 @@ func TestListPack(t *testing.T) { } func TestNoDoubleInit(t *testing.T) { - r, be := repository.TestRepositoryWithVersion(t, restic.StableRepoVersion) + r, _, be := repository.TestRepositoryWithVersion(t, restic.StableRepoVersion) repo, err := repository.New(be, repository.Options{}) rtest.OK(t, err) diff --git a/internal/repository/testing.go b/internal/repository/testing.go index 2155cad166d..a8321faadff 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -78,30 +78,31 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o // instead. The directory is not removed, but left there for inspection. func TestRepository(t testing.TB) *Repository { t.Helper() - repo, _ := TestRepositoryWithVersion(t, 0) + repo, _, _ := TestRepositoryWithVersion(t, 0) return repo } -func TestRepositoryWithVersion(t testing.TB, version uint) (*Repository, backend.Backend) { +func TestRepositoryWithVersion(t testing.TB, version uint) (*Repository, restic.Unpacked[restic.FileType], backend.Backend) { t.Helper() dir := os.Getenv("RESTIC_TEST_REPO") opts := Options{} + var repo *Repository + var be backend.Backend if dir != "" { _, err := os.Stat(dir) if err != nil { - be, err := local.Create(context.TODO(), local.Config{Path: dir}) + lbe, err := local.Create(context.TODO(), local.Config{Path: dir}) if err != nil { t.Fatalf("error creating local backend at %v: %v", dir, err) } - return TestRepositoryWithBackend(t, be, version, opts) - } - - if err == nil { + repo, be = TestRepositoryWithBackend(t, lbe, version, opts) + } else { t.Logf("directory at %v already exists, using mem backend", dir) } + } else { + repo, be = TestRepositoryWithBackend(t, nil, version, opts) } - - return TestRepositoryWithBackend(t, nil, version, opts) + return repo, &internalRepository{repo}, be } func TestFromFixture(t testing.TB, repoFixture string) (*Repository, backend.Backend, func()) { @@ -156,3 +157,8 @@ func BenchmarkAllVersions(b *testing.B, bench VersionedBenchmark) { }) } } + +func TestNewLock(t *testing.T, repo *Repository, exclusive bool) (*restic.Lock, error) { + // TODO get rid of this test helper + return restic.NewLock(context.TODO(), &internalRepository{repo}, exclusive) +} diff --git a/internal/repository/upgrade_repo.go b/internal/repository/upgrade_repo.go index ea3ae2c0e0f..0a91b109320 100644 --- a/internal/repository/upgrade_repo.go +++ b/internal/repository/upgrade_repo.go @@ -45,7 +45,7 @@ func upgradeRepository(ctx context.Context, repo *Repository) error { cfg := repo.Config() cfg.Version = 2 - err := restic.SaveConfig(ctx, repo, cfg) + err := restic.SaveConfig(ctx, &internalRepository{repo}, cfg) if err != nil { return fmt.Errorf("save new config file failed: %w", err) } diff --git a/internal/repository/upgrade_repo_test.go b/internal/repository/upgrade_repo_test.go index 61ca6ef9564..c6bc574cf4e 100644 --- a/internal/repository/upgrade_repo_test.go +++ b/internal/repository/upgrade_repo_test.go @@ -13,7 +13,7 @@ import ( ) func TestUpgradeRepoV2(t *testing.T) { - repo, _ := TestRepositoryWithVersion(t, 1) + repo, _, _ := TestRepositoryWithVersion(t, 1) if repo.Config().Version != 1 { t.Fatal("test repo has wrong version") } diff --git a/internal/restic/config.go b/internal/restic/config.go index 3fb61cc137f..264792e11eb 100644 --- a/internal/restic/config.go +++ b/internal/restic/config.go @@ -87,7 +87,7 @@ func LoadConfig(ctx context.Context, r LoaderUnpacked) (Config, error) { return cfg, nil } -func SaveConfig(ctx context.Context, r SaverUnpacked, cfg Config) error { +func SaveConfig(ctx context.Context, r SaverUnpacked[FileType], cfg Config) error { _, err := SaveJSONUnpacked(ctx, r, ConfigFile, cfg) return err } diff --git a/internal/restic/json.go b/internal/restic/json.go index 05d049b5949..ec64ff15344 100644 --- a/internal/restic/json.go +++ b/internal/restic/json.go @@ -21,7 +21,7 @@ func LoadJSONUnpacked(ctx context.Context, repo LoaderUnpacked, t FileType, id I // SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the // backend as type t, without a pack. It returns the storage hash. -func SaveJSONUnpacked(ctx context.Context, repo SaverUnpacked, t FileType, item interface{}) (ID, error) { +func SaveJSONUnpacked[FT FileTypes](ctx context.Context, repo SaverUnpacked[FT], t FT, item interface{}) (ID, error) { debug.Log("save new blob %v", t) plaintext, err := json.Marshal(item) if err != nil { diff --git a/internal/restic/lock.go b/internal/restic/lock.go index 8ad84091af9..20fa1e20e8c 100644 --- a/internal/restic/lock.go +++ b/internal/restic/lock.go @@ -7,7 +7,6 @@ import ( "os/signal" "os/user" "sync" - "sync/atomic" "syscall" "testing" "time" @@ -39,7 +38,7 @@ type Lock struct { UID uint32 `json:"uid,omitempty"` GID uint32 `json:"gid,omitempty"` - repo Unpacked + repo Unpacked[FileType] lockID *ID } @@ -87,20 +86,6 @@ func IsInvalidLock(err error) bool { var ErrRemovedLock = errors.New("lock file was removed in the meantime") -// NewLock returns a new, non-exclusive lock for the repository. If an -// exclusive lock is already held by another process, it returns an error -// that satisfies IsAlreadyLocked. -func NewLock(ctx context.Context, repo Unpacked) (*Lock, error) { - return newLock(ctx, repo, false) -} - -// NewExclusiveLock returns a new, exclusive lock for the repository. If -// another lock (normal and exclusive) is already held by another process, -// it returns an error that satisfies IsAlreadyLocked. -func NewExclusiveLock(ctx context.Context, repo Unpacked) (*Lock, error) { - return newLock(ctx, repo, true) -} - var waitBeforeLockCheck = 200 * time.Millisecond // delay increases by factor 2 on each retry @@ -113,11 +98,15 @@ func TestSetLockTimeout(t testing.TB, d time.Duration) { initialWaitBetweenLockRetries = d } -func newLock(ctx context.Context, repo Unpacked, excl bool) (*Lock, error) { +// NewLock returns a new lock for the repository. If an +// exclusive lock is already held by another process, it returns an error +// that satisfies IsAlreadyLocked. If the new lock is exclude, then other +// non-exclusive locks also result in an IsAlreadyLocked error. +func NewLock(ctx context.Context, repo Unpacked[FileType], exclusive bool) (*Lock, error) { lock := &Lock{ Time: time.Now(), PID: os.Getpid(), - Exclusive: excl, + Exclusive: exclusive, repo: repo, } @@ -444,42 +433,6 @@ func LoadLock(ctx context.Context, repo LoaderUnpacked, id ID) (*Lock, error) { return lock, nil } -// RemoveStaleLocks deletes all locks detected as stale from the repository. -func RemoveStaleLocks(ctx context.Context, repo Unpacked) (uint, error) { - var processed uint - err := ForAllLocks(ctx, repo, nil, func(id ID, lock *Lock, err error) error { - if err != nil { - // ignore locks that cannot be loaded - debug.Log("ignore lock %v: %v", id, err) - return nil - } - - if lock.Stale() { - err = repo.RemoveUnpacked(ctx, LockFile, id) - if err == nil { - processed++ - } - return err - } - - return nil - }) - return processed, err -} - -// RemoveAllLocks removes all locks forcefully. -func RemoveAllLocks(ctx context.Context, repo Unpacked) (uint, error) { - var processed uint32 - err := ParallelList(ctx, repo, LockFile, repo.Connections(), func(ctx context.Context, id ID, _ int64) error { - err := repo.RemoveUnpacked(ctx, LockFile, id) - if err == nil { - atomic.AddUint32(&processed, 1) - } - return err - }) - return uint(processed), err -} - // ForAllLocks reads all locks in parallel and calls the given callback. // It is guaranteed that the function is not run concurrently. If the // callback returns an error, this function is cancelled and also returns that error. diff --git a/internal/restic/lock_test.go b/internal/restic/lock_test.go index 606ed210de6..67d2b9a465c 100644 --- a/internal/restic/lock_test.go +++ b/internal/restic/lock_test.go @@ -19,7 +19,7 @@ func TestLock(t *testing.T) { repo := repository.TestRepository(t) restic.TestSetLockTimeout(t, 5*time.Millisecond) - lock, err := restic.NewLock(context.TODO(), repo) + lock, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) rtest.OK(t, lock.Unlock(context.TODO())) @@ -29,7 +29,7 @@ func TestDoubleUnlock(t *testing.T) { repo := repository.TestRepository(t) restic.TestSetLockTimeout(t, 5*time.Millisecond) - lock, err := restic.NewLock(context.TODO(), repo) + lock, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) rtest.OK(t, lock.Unlock(context.TODO())) @@ -43,10 +43,10 @@ func TestMultipleLock(t *testing.T) { repo := repository.TestRepository(t) restic.TestSetLockTimeout(t, 5*time.Millisecond) - lock1, err := restic.NewLock(context.TODO(), repo) + lock1, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) - lock2, err := restic.NewLock(context.TODO(), repo) + lock2, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) rtest.OK(t, lock1.Unlock(context.TODO())) @@ -69,10 +69,10 @@ func TestMultipleLockFailure(t *testing.T) { repo, _ := repository.TestRepositoryWithBackend(t, be, 0, repository.Options{}) restic.TestSetLockTimeout(t, 5*time.Millisecond) - lock1, err := restic.NewLock(context.TODO(), repo) + lock1, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) - _, err = restic.NewLock(context.TODO(), repo) + _, err = repository.TestNewLock(t, repo, false) rtest.Assert(t, err != nil, "unreadable lock file did not result in an error") rtest.OK(t, lock1.Unlock(context.TODO())) @@ -81,7 +81,7 @@ func TestMultipleLockFailure(t *testing.T) { func TestLockExclusive(t *testing.T) { repo := repository.TestRepository(t) - elock, err := restic.NewExclusiveLock(context.TODO(), repo) + elock, err := repository.TestNewLock(t, repo, true) rtest.OK(t, err) rtest.OK(t, elock.Unlock(context.TODO())) } @@ -90,10 +90,10 @@ func TestLockOnExclusiveLockedRepo(t *testing.T) { repo := repository.TestRepository(t) restic.TestSetLockTimeout(t, 5*time.Millisecond) - elock, err := restic.NewExclusiveLock(context.TODO(), repo) + elock, err := repository.TestNewLock(t, repo, true) rtest.OK(t, err) - lock, err := restic.NewLock(context.TODO(), repo) + lock, err := repository.TestNewLock(t, repo, false) rtest.Assert(t, err != nil, "create normal lock with exclusively locked repo didn't return an error") rtest.Assert(t, restic.IsAlreadyLocked(err), @@ -107,10 +107,10 @@ func TestExclusiveLockOnLockedRepo(t *testing.T) { repo := repository.TestRepository(t) restic.TestSetLockTimeout(t, 5*time.Millisecond) - elock, err := restic.NewLock(context.TODO(), repo) + elock, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) - lock, err := restic.NewExclusiveLock(context.TODO(), repo) + lock, err := repository.TestNewLock(t, repo, true) rtest.Assert(t, err != nil, "create normal lock with exclusively locked repo didn't return an error") rtest.Assert(t, restic.IsAlreadyLocked(err), @@ -120,20 +120,6 @@ func TestExclusiveLockOnLockedRepo(t *testing.T) { rtest.OK(t, elock.Unlock(context.TODO())) } -func createFakeLock(repo restic.SaverUnpacked, t time.Time, pid int) (restic.ID, error) { - hostname, err := os.Hostname() - if err != nil { - return restic.ID{}, err - } - - newLock := &restic.Lock{Time: t, PID: pid, Hostname: hostname} - return restic.SaveJSONUnpacked(context.TODO(), repo, restic.LockFile, &newLock) -} - -func removeLock(repo restic.RemoverUnpacked, id restic.ID) error { - return repo.RemoveUnpacked(context.TODO(), restic.LockFile, id) -} - var staleLockTests = []struct { timestamp time.Time stale bool @@ -190,72 +176,6 @@ func TestLockStale(t *testing.T) { } } -func lockExists(repo restic.Lister, t testing.TB, lockID restic.ID) bool { - var exists bool - rtest.OK(t, repo.List(context.TODO(), restic.LockFile, func(id restic.ID, size int64) error { - if id == lockID { - exists = true - } - return nil - })) - - return exists -} - -func TestLockWithStaleLock(t *testing.T) { - repo := repository.TestRepository(t) - - id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) - rtest.OK(t, err) - - id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()) - rtest.OK(t, err) - - id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000) - rtest.OK(t, err) - - processed, err := restic.RemoveStaleLocks(context.TODO(), repo) - rtest.OK(t, err) - - rtest.Assert(t, lockExists(repo, t, id1) == false, - "stale lock still exists after RemoveStaleLocks was called") - rtest.Assert(t, lockExists(repo, t, id2) == true, - "non-stale lock was removed by RemoveStaleLocks") - rtest.Assert(t, lockExists(repo, t, id3) == false, - "stale lock still exists after RemoveStaleLocks was called") - rtest.Assert(t, processed == 2, - "number of locks removed does not match: expected %d, got %d", - 2, processed) - - rtest.OK(t, removeLock(repo, id2)) -} - -func TestRemoveAllLocks(t *testing.T) { - repo := repository.TestRepository(t) - - id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) - rtest.OK(t, err) - - id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()) - rtest.OK(t, err) - - id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000) - rtest.OK(t, err) - - processed, err := restic.RemoveAllLocks(context.TODO(), repo) - rtest.OK(t, err) - - rtest.Assert(t, lockExists(repo, t, id1) == false, - "lock still exists after RemoveAllLocks was called") - rtest.Assert(t, lockExists(repo, t, id2) == false, - "lock still exists after RemoveAllLocks was called") - rtest.Assert(t, lockExists(repo, t, id3) == false, - "lock still exists after RemoveAllLocks was called") - rtest.Assert(t, processed == 3, - "number of locks removed does not match: expected %d, got %d", - 3, processed) -} - func checkSingleLock(t *testing.T, repo restic.Lister) restic.ID { t.Helper() var lockID *restic.ID @@ -279,7 +199,7 @@ func testLockRefresh(t *testing.T, refresh func(lock *restic.Lock) error) { repo := repository.TestRepository(t) restic.TestSetLockTimeout(t, 5*time.Millisecond) - lock, err := restic.NewLock(context.TODO(), repo) + lock, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) time0 := lock.Time @@ -312,10 +232,10 @@ func TestLockRefreshStale(t *testing.T) { } func TestLockRefreshStaleMissing(t *testing.T) { - repo, be := repository.TestRepositoryWithVersion(t, 0) + repo, _, be := repository.TestRepositoryWithVersion(t, 0) restic.TestSetLockTimeout(t, 5*time.Millisecond) - lock, err := restic.NewLock(context.TODO(), repo) + lock, err := repository.TestNewLock(t, repo, false) rtest.OK(t, err) lockID := checkSingleLock(t, repo) diff --git a/internal/restic/parallel.go b/internal/restic/parallel.go index 0c2215325bd..1c56f684861 100644 --- a/internal/restic/parallel.go +++ b/internal/restic/parallel.go @@ -54,7 +54,7 @@ func ParallelList(ctx context.Context, r Lister, t FileType, parallelism uint, f // ParallelRemove deletes the given fileList of fileType in parallel // if callback returns an error, then it will abort. -func ParallelRemove(ctx context.Context, repo RemoverUnpacked, fileList IDSet, fileType FileType, report func(id ID, err error) error, bar *progress.Counter) error { +func ParallelRemove[FT FileTypes](ctx context.Context, repo RemoverUnpacked[FT], fileList IDSet, fileType FT, report func(id ID, err error) error, bar *progress.Counter) error { fileChan := make(chan ID) wg, ctx := errgroup.WithContext(ctx) wg.Go(func() error { diff --git a/internal/restic/repository.go b/internal/restic/repository.go index b18b036a733..07ef9cbc085 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -57,14 +57,16 @@ type Repository interface { LoadRaw(ctx context.Context, t FileType, id ID) (data []byte, err error) // LoadUnpacked loads and decrypts the file with the given type and ID. LoadUnpacked(ctx context.Context, t FileType, id ID) (data []byte, err error) - SaveUnpacked(ctx context.Context, t FileType, buf []byte) (ID, error) + SaveUnpacked(ctx context.Context, t WriteableFileType, buf []byte) (ID, error) // RemoveUnpacked removes a file from the repository. This will eventually be restricted to deleting only snapshots. - RemoveUnpacked(ctx context.Context, t FileType, id ID) error + RemoveUnpacked(ctx context.Context, t WriteableFileType, id ID) error } type FileType = backend.FileType -// These are the different data types a backend can store. +// These are the different data types a backend can store. Only filetypes contained +// in the `WriteableFileType` subset can be modified via the Repository interface. +// All other filetypes are considered internal datastructures of the Repository. const ( PackFile FileType = backend.PackFile KeyFile FileType = backend.KeyFile @@ -74,6 +76,26 @@ const ( ConfigFile FileType = backend.ConfigFile ) +type WriteableFileType backend.FileType + +// These are the different data types that can be modified via SaveUnpacked or RemoveUnpacked. +const ( + WriteableSnapshotFile WriteableFileType = WriteableFileType(SnapshotFile) +) + +func (w *WriteableFileType) ToFileType() FileType { + switch *w { + case WriteableSnapshotFile: + return SnapshotFile + default: + panic("invalid WriteableFileType") + } +} + +type FileTypes interface { + FileType | WriteableFileType +} + // LoaderUnpacked allows loading a blob not stored in a pack file type LoaderUnpacked interface { // Connections returns the maximum number of concurrent backend operations @@ -82,22 +104,22 @@ type LoaderUnpacked interface { } // SaverUnpacked allows saving a blob not stored in a pack file -type SaverUnpacked interface { +type SaverUnpacked[FT FileTypes] interface { // Connections returns the maximum number of concurrent backend operations Connections() uint - SaveUnpacked(ctx context.Context, t FileType, buf []byte) (ID, error) + SaveUnpacked(ctx context.Context, t FT, buf []byte) (ID, error) } // RemoverUnpacked allows removing an unpacked blob -type RemoverUnpacked interface { +type RemoverUnpacked[FT FileTypes] interface { // Connections returns the maximum number of concurrent backend operations Connections() uint - RemoveUnpacked(ctx context.Context, t FileType, id ID) error + RemoveUnpacked(ctx context.Context, t FT, id ID) error } -type SaverRemoverUnpacked interface { - SaverUnpacked - RemoverUnpacked +type SaverRemoverUnpacked[FT FileTypes] interface { + SaverUnpacked[FT] + RemoverUnpacked[FT] } type PackBlobs struct { @@ -126,10 +148,10 @@ type ListerLoaderUnpacked interface { LoaderUnpacked } -type Unpacked interface { +type Unpacked[FT FileTypes] interface { ListerLoaderUnpacked - SaverUnpacked - RemoverUnpacked + SaverUnpacked[FT] + RemoverUnpacked[FT] } type ListBlobser interface { diff --git a/internal/restic/snapshot.go b/internal/restic/snapshot.go index 39ed806276a..f9cdf4daf21 100644 --- a/internal/restic/snapshot.go +++ b/internal/restic/snapshot.go @@ -90,8 +90,8 @@ func LoadSnapshot(ctx context.Context, loader LoaderUnpacked, id ID) (*Snapshot, } // SaveSnapshot saves the snapshot sn and returns its ID. -func SaveSnapshot(ctx context.Context, repo SaverUnpacked, sn *Snapshot) (ID, error) { - return SaveJSONUnpacked(ctx, repo, SnapshotFile, sn) +func SaveSnapshot(ctx context.Context, repo SaverUnpacked[WriteableFileType], sn *Snapshot) (ID, error) { + return SaveJSONUnpacked(ctx, repo, WriteableSnapshotFile, sn) } // ForAllSnapshots reads all snapshots in parallel and calls the diff --git a/internal/restic/snapshot_test.go b/internal/restic/snapshot_test.go index 9099c8b5f30..68016287a58 100644 --- a/internal/restic/snapshot_test.go +++ b/internal/restic/snapshot_test.go @@ -32,7 +32,7 @@ func TestLoadJSONUnpacked(t *testing.T) { } func testLoadJSONUnpacked(t *testing.T, version uint) { - repo, _ := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) // archive a snapshot sn := restic.Snapshot{} diff --git a/internal/restic/tree_test.go b/internal/restic/tree_test.go index 07ca254f135..5c9c0739c1e 100644 --- a/internal/restic/tree_test.go +++ b/internal/restic/tree_test.go @@ -184,7 +184,7 @@ func testLoadTree(t *testing.T, version uint) { } // archive a few files - repo, _ := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil) rtest.OK(t, repo.Flush(context.Background())) @@ -202,7 +202,7 @@ func benchmarkLoadTree(t *testing.B, version uint) { } // archive a few files - repo, _ := repository.TestRepositoryWithVersion(t, version) + repo, _, _ := repository.TestRepositoryWithVersion(t, version) sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil) rtest.OK(t, repo.Flush(context.Background())) From b7ff8ea9cd8d254f9efe6320036616215c18f276 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 1 Dec 2024 12:28:47 +0100 Subject: [PATCH 863/893] repository: expose cache via method --- cmd/restic/cmd_backup.go | 2 +- cmd/restic/cmd_prune.go | 2 +- cmd/restic/exclude.go | 4 ++-- internal/repository/check.go | 4 ++-- internal/repository/raw.go | 4 ++-- internal/repository/repository.go | 22 +++++++++++++--------- 6 files changed, 21 insertions(+), 17 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 42908557ed9..93b4556c7eb 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -304,7 +304,7 @@ func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error { // from being saved in a snapshot based on path only func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []archiver.RejectByNameFunc, err error) { // exclude restic cache - if repo.Cache != nil { + if repo.Cache() != nil { f, err := rejectResticCache(repo) if err != nil { return nil, err diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 2137147996c..fce109bddd6 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -171,7 +171,7 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term } func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet, term *termstatus.Terminal) error { - if repo.Cache == nil { + if repo.Cache() == nil { Print("warning: running prune without a cache, this may be very slow!\n") } diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go index 99d1128a9e1..1c05f4abba8 100644 --- a/cmd/restic/exclude.go +++ b/cmd/restic/exclude.go @@ -11,12 +11,12 @@ import ( // rejectResticCache returns a RejectByNameFunc that rejects the restic cache // directory (if set). func rejectResticCache(repo *repository.Repository) (archiver.RejectByNameFunc, error) { - if repo.Cache == nil { + if repo.Cache() == nil { return func(string) bool { return false }, nil } - cacheBase := repo.Cache.BaseDir() + cacheBase := repo.Cache().BaseDir() if cacheBase == "" { return nil, errors.New("cacheBase is empty string") diff --git a/internal/repository/check.go b/internal/repository/check.go index 4e57a7c1c25..2bf2ac8f36d 100644 --- a/internal/repository/check.go +++ b/internal/repository/check.go @@ -40,9 +40,9 @@ func (e *partialReadError) Error() string { func CheckPack(ctx context.Context, r *Repository, id restic.ID, blobs []restic.Blob, size int64, bufRd *bufio.Reader, dec *zstd.Decoder) error { err := checkPackInner(ctx, r, id, blobs, size, bufRd, dec) if err != nil { - if r.Cache != nil { + if r.cache != nil { // ignore error as there's not much we can do here - _ = r.Cache.Forget(backend.Handle{Type: restic.PackFile, Name: id.String()}) + _ = r.cache.Forget(backend.Handle{Type: restic.PackFile, Name: id.String()}) } // retry pack verification to detect transient errors diff --git a/internal/repository/raw.go b/internal/repository/raw.go index 31443b0107e..c5a4a72b78c 100644 --- a/internal/repository/raw.go +++ b/internal/repository/raw.go @@ -21,10 +21,10 @@ func (r *Repository) LoadRaw(ctx context.Context, t restic.FileType, id restic.I // retry loading damaged data only once. If a file fails to download correctly // the second time, then it is likely corrupted at the backend. if h.Type != backend.ConfigFile && id != restic.Hash(buf) { - if r.Cache != nil { + if r.cache != nil { // Cleanup cache to make sure it's not the cached copy that is broken. // Ignore error as there's not much we can do in that case. - _ = r.Cache.Forget(h) + _ = r.cache.Forget(h) } buf, err = loadRaw(ctx, r.be, h) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 119cb44ca9d..aee0db103bd 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -38,7 +38,7 @@ type Repository struct { key *crypto.Key keyID restic.ID idx *index.MasterIndex - Cache *cache.Cache + cache *cache.Cache opts Options @@ -154,10 +154,14 @@ func (r *Repository) UseCache(c *cache.Cache) { return } debug.Log("using cache") - r.Cache = c + r.cache = c r.be = c.Wrap(r.be) } +func (r *Repository) Cache() *cache.Cache { + return r.cache +} + // SetDryRun sets the repo backend into dry-run mode. func (r *Repository) SetDryRun() { r.be = dryrun.New(r.be) @@ -230,15 +234,15 @@ func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic. } // try cached pack files first - sortCachedPacksFirst(r.Cache, blobs) + sortCachedPacksFirst(r.cache, blobs) buf, err := r.loadBlob(ctx, blobs, buf) if err != nil { - if r.Cache != nil { + if r.cache != nil { for _, blob := range blobs { h := backend.Handle{Type: restic.PackFile, Name: blob.PackID.String(), IsMetadata: blob.Type.IsMetadata()} // ignore errors as there's not much we can do here - _ = r.Cache.Forget(h) + _ = r.cache.Forget(h) } } @@ -722,14 +726,14 @@ func (r *Repository) createIndexFromPacks(ctx context.Context, packsize map[rest // prepareCache initializes the local cache. indexIDs is the list of IDs of // index files still present in the repo. func (r *Repository) prepareCache() error { - if r.Cache == nil { + if r.cache == nil { return nil } packs := r.idx.Packs(restic.NewIDSet()) // clear old packs - err := r.Cache.Clear(restic.PackFile, packs) + err := r.cache.Clear(restic.PackFile, packs) if err != nil { fmt.Fprintf(os.Stderr, "error clearing pack files in cache: %v\n", err) } @@ -855,9 +859,9 @@ func (r *Repository) ListPack(ctx context.Context, id restic.ID, size int64) ([] entries, hdrSize, err := pack.List(r.Key(), backend.ReaderAt(ctx, r.be, h), size) if err != nil { - if r.Cache != nil { + if r.cache != nil { // ignore error as there is not much we can do here - _ = r.Cache.Forget(h) + _ = r.cache.Forget(h) } // retry on error From 115ecb3c92ca2ca81c1b09d9f0072c80defa092f Mon Sep 17 00:00:00 2001 From: Srigovind Nayak <5201843+konidev20@users.noreply.github.com> Date: Tue, 14 Jan 2025 23:27:47 +0530 Subject: [PATCH 864/893] tag: output the original ID and new snapshotID (#5144) * tag: output the original ID and new snapshotID tag: print changed snapshot information immediately * print changed snapshot immediately after it has been saved * add message type to the changedSnapshot * add a summary type which will share the JSON output of the numer of changed snapshots * updated verbosity of the changed snapshot in text mode to only work when verbosity > 2 * also use the terminal status printer for a standard handling for stdout messages --- changelog/unreleased/issue-5137 | 8 ++++ cmd/restic/cmd_tag.go | 61 +++++++++++++++++++++----- cmd/restic/cmd_tag_integration_test.go | 2 +- doc/075_scripting.rst | 24 ++++++++++ 4 files changed, 82 insertions(+), 13 deletions(-) create mode 100644 changelog/unreleased/issue-5137 diff --git a/changelog/unreleased/issue-5137 b/changelog/unreleased/issue-5137 new file mode 100644 index 00000000000..ba681202cd3 --- /dev/null +++ b/changelog/unreleased/issue-5137 @@ -0,0 +1,8 @@ +Enhancement: Restic tag command returns the modified snapshot information + +Restic `tag` command now returns the modified snapshot information in the +output. Added `--json` option to the command to get the output in JSON format +for scripting access. + +https://github.com/restic/restic/issues/5137 +https://github.com/restic/restic/pull/5144 \ No newline at end of file diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index 539a0cc59ec..f71e2556c45 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -9,6 +9,8 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui" + "github.com/restic/restic/internal/ui/termstatus" ) var cmdTag = &cobra.Command{ @@ -34,7 +36,9 @@ Exit status is 12 if the password is incorrect. GroupID: cmdGroupDefault, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return runTag(cmd.Context(), tagOptions, globalOptions, args) + term, cancel := setupTermstatus() + defer cancel() + return runTag(cmd.Context(), tagOptions, globalOptions, term, args) }, } @@ -58,7 +62,18 @@ func init() { initMultiSnapshotFilter(tagFlags, &tagOptions.SnapshotFilter, true) } -func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Snapshot, setTags, addTags, removeTags []string) (bool, error) { +type changedSnapshot struct { + MessageType string `json:"message_type"` // changed + OldSnapshotID restic.ID `json:"old_snapshot_id"` + NewSnapshotID restic.ID `json:"new_snapshot_id"` +} + +type changedSnapshotsSummary struct { + MessageType string `json:"message_type"` // summary + ChangedSnapshots int `json:"changed_snapshots"` +} + +func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Snapshot, setTags, addTags, removeTags []string, printFunc func(changedSnapshot)) (bool, error) { var changed bool if len(setTags) != 0 { @@ -87,7 +102,7 @@ func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Sna return false, err } - debug.Log("new snapshot saved as %v", id) + debug.Log("old snapshot %v saved as a new snapshot %v", sn.ID(), id) // Remove the old snapshot. if err = repo.RemoveUnpacked(ctx, restic.WriteableSnapshotFile, *sn.ID()); err != nil { @@ -95,11 +110,13 @@ func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Sna } debug.Log("old snapshot %v removed", sn.ID()) + + printFunc(changedSnapshot{MessageType: "changed", OldSnapshotID: *sn.ID(), NewSnapshotID: id}) } return changed, nil } -func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []string) error { +func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { if len(opts.SetTags) == 0 && len(opts.AddTags) == 0 && len(opts.RemoveTags) == 0 { return errors.Fatal("nothing to do!") } @@ -114,24 +131,44 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st } defer unlock() - changeCnt := 0 + printFunc := func(c changedSnapshot) { + Verboseff("old snapshot ID: %v -> new snapshot ID: %v\n", c.OldSnapshotID, c.NewSnapshotID) + } + + summary := changedSnapshotsSummary{MessageType: "summary", ChangedSnapshots: 0} + printSummary := func(c changedSnapshotsSummary) { + if c.ChangedSnapshots == 0 { + Verbosef("no snapshots were modified\n") + } else { + Verbosef("modified %v snapshots\n", c.ChangedSnapshots) + } + } + + if gopts.JSON { + printFunc = func(c changedSnapshot) { + term.Print(ui.ToJSONString(c)) + } + printSummary = func(c changedSnapshotsSummary) { + term.Print(ui.ToJSONString(c)) + } + } + for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { - changed, err := changeTags(ctx, repo, sn, opts.SetTags.Flatten(), opts.AddTags.Flatten(), opts.RemoveTags.Flatten()) + changed, err := changeTags(ctx, repo, sn, opts.SetTags.Flatten(), opts.AddTags.Flatten(), opts.RemoveTags.Flatten(), printFunc) if err != nil { Warnf("unable to modify the tags for snapshot ID %q, ignoring: %v\n", sn.ID(), err) continue } if changed { - changeCnt++ + summary.ChangedSnapshots++ } } + if ctx.Err() != nil { return ctx.Err() } - if changeCnt == 0 { - Verbosef("no snapshots were modified\n") - } else { - Verbosef("modified tags on %v snapshots\n", changeCnt) - } + + printSummary(summary) + return nil } diff --git a/cmd/restic/cmd_tag_integration_test.go b/cmd/restic/cmd_tag_integration_test.go index 6979f9c1161..53360ca84a6 100644 --- a/cmd/restic/cmd_tag_integration_test.go +++ b/cmd/restic/cmd_tag_integration_test.go @@ -9,7 +9,7 @@ import ( ) func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) { - rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{})) + rtest.OK(t, runTag(context.TODO(), opts, gopts, nil, []string{})) } // nolint: staticcheck // false positive nil pointer dereference check diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 39a6dbc7fcb..57a8e2872f5 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -722,6 +722,30 @@ The stats command returns a single JSON object. | ``compression_space_saving`` | Overall space saving due to compression | +------------------------------+-----------------------------------------------------+ +tag +--- + +The ``tag`` command uses the JSON lines format with the following message types. + +Changed +^^^^^^^ + ++--------------------------+-------------------------------------------+ +| ``message_type`` | Always "changed" | ++--------------------------+-------------------------------------------+ +| ``old_snapshot_id`` | ID of the snapshot before the change | ++--------------------------+-------------------------------------------+ +| ``new_snapshot_id`` | ID of the snapshot after the change | ++--------------------------+-------------------------------------------+ + +Summary +^^^^^^^ + ++-----------------------------+-------------------------------------------+ +| ``message_type`` | Always "summary" | ++-----------------------------+-------------------------------------------+ +| ``changed_snapshot_count`` | Total number of changed snapshots | ++-----------------------------+-------------------------------------------+ version ------- From d5a94583ed8b9601ccbaa76fca74513ccdf49f51 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 18 Jan 2025 18:27:43 +0100 Subject: [PATCH 865/893] bump dockerfile to go 1.23 --- docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 02b53261ffa..4c031ebacb4 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22-alpine AS builder +FROM golang:1.23-alpine AS builder WORKDIR /go/src/github.com/restic/restic From 5e8654c71d7ce4c1f4b09544684ecabde3854c92 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 18 Jan 2025 22:54:47 +0100 Subject: [PATCH 866/893] restore: fix xattr filter test on windows --- internal/fs/node_xattr_all_test.go | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/internal/fs/node_xattr_all_test.go b/internal/fs/node_xattr_all_test.go index 79fde63e130..6a9a2e4bf91 100644 --- a/internal/fs/node_xattr_all_test.go +++ b/internal/fs/node_xattr_all_test.go @@ -42,14 +42,11 @@ func setAndVerifyXattr(t *testing.T, file string, attrs []restic.ExtendedAttribu func setAndVerifyXattrWithSelectFilter(t *testing.T, file string, testAttr []testXattrToRestore, xattrSelectFilter func(_ string) bool) { attrs := make([]restic.ExtendedAttribute, len(testAttr)) for i := range testAttr { - attrs[i] = testAttr[i].xattr - } - - if runtime.GOOS == "windows" { // windows seems to convert the xattr name to upper case - for i := range attrs { - attrs[i].Name = strings.ToUpper(attrs[i].Name) + if runtime.GOOS == "windows" { + testAttr[i].xattr.Name = strings.ToUpper(testAttr[i].xattr.Name) } + attrs[i] = testAttr[i].xattr } node := &restic.Node{ @@ -109,6 +106,18 @@ func TestOverwriteXattr(t *testing.T) { }) } +func uppercaseOnWindows(patterns []string) []string { + // windows seems to convert the xattr name to upper case + if runtime.GOOS == "windows" { + out := []string{} + for _, pattern := range patterns { + out = append(out, strings.ToUpper(pattern)) + } + return out + } + return patterns +} + func TestOverwriteXattrWithSelectFilter(t *testing.T) { dir := t.TempDir() file := filepath.Join(dir, "file2") @@ -118,7 +127,7 @@ func TestOverwriteXattrWithSelectFilter(t *testing.T) { // Set a filter as if the user passed in --include-xattr user.* xattrSelectFilter1 := func(xattrName string) bool { - shouldInclude, _ := filter.IncludeByPattern([]string{"user.*"}, noopWarnf)(xattrName) + shouldInclude, _ := filter.IncludeByPattern(uppercaseOnWindows([]string{"user.*"}), noopWarnf)(xattrName) return shouldInclude } @@ -148,7 +157,7 @@ func TestOverwriteXattrWithSelectFilter(t *testing.T) { // Set a filter as if the user passed in --include-xattr user.* xattrSelectFilter2 := func(xattrName string) bool { - shouldInclude, _ := filter.IncludeByPattern([]string{"user.o*", "user.comm*"}, noopWarnf)(xattrName) + shouldInclude, _ := filter.IncludeByPattern(uppercaseOnWindows([]string{"user.o*", "user.comm*"}), noopWarnf)(xattrName) return shouldInclude } From 9331461a1397a56f2958036b4d0f2497c1dcdd13 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 18 Jan 2025 22:15:35 +0100 Subject: [PATCH 867/893] prune: correctly account for duplicates in max-unused check The size comparison for `--max-unused` only accounted for unused but not for duplicate data. For repositories with a large amount of duplicates this can result in a situation where no data gets pruned even though the amount of unused data is much higher than specified. --- changelog/unreleased/pull-5212 | 10 ++++ internal/repository/prune.go | 3 +- internal/repository/prune_test.go | 79 +++++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/pull-5212 diff --git a/changelog/unreleased/pull-5212 b/changelog/unreleased/pull-5212 new file mode 100644 index 00000000000..5acd74c0074 --- /dev/null +++ b/changelog/unreleased/pull-5212 @@ -0,0 +1,10 @@ +Bugfix: Correctly account for duplicate data in `prune --max-unused` + +`prune --max-unused size` did not correctly account for duplicate data. +If a repository contained a large amount of duplicate data, this could +previously result in pruning too little data. + +This issue is now resolved. + +https://github.com/restic/restic/pull/5212 +https://forum.restic.net/t/restic-not-obeying-max-unused-parameter-on-prune/8879 diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 3803b6f337a..1f58322392d 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -478,7 +478,8 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo *Repository, maxUnusedSizeAfter := opts.MaxUnusedBytes(stats.Size.Used) for _, p := range repackCandidates { - reachedUnusedSizeAfter := (stats.Size.Unused-stats.Size.Remove-stats.Size.Repackrm < maxUnusedSizeAfter) + remainingUnusedSize := stats.Size.Duplicate + stats.Size.Unused - stats.Size.Remove - stats.Size.Repackrm + reachedUnusedSizeAfter := remainingUnusedSize < maxUnusedSizeAfter reachedRepackSize := stats.Size.Repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize) diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go index cc569aa4333..3234622f4bb 100644 --- a/internal/repository/prune_test.go +++ b/internal/repository/prune_test.go @@ -112,3 +112,82 @@ func TestPrune(t *testing.T) { }) } } + +// TestPruneMaxUnusedDuplicate checks that MaxUnused correctly accounts for duplicates. +// +// Create a repository containing blobs a to d that are stored in packs as follows: +// - a, d +// - b, d +// - c, d +// All blobs should be kept during prune, but the duplicates should be gone afterwards. +// The special construction ensures that each pack contains a used, non-duplicate blob. +// This ensures that special cases that delete completely duplicate packs files do not +// apply. +func TestPruneMaxUnusedDuplicate(t *testing.T) { + seed := time.Now().UnixNano() + random := rand.New(rand.NewSource(seed)) + t.Logf("rand initialized with seed %d", seed) + + repo, _, _ := repository.TestRepositoryWithVersion(t, 0) + // large blobs to prevent repacking due to too small packsize + const blobSize = 1024 * 1024 + + bufs := [][]byte{} + for i := 0; i < 4; i++ { + // use uniform length for simpler control via MaxUnusedBytes + buf := make([]byte, blobSize) + random.Read(buf) + bufs = append(bufs, buf) + } + keep := restic.NewBlobSet() + + for _, blobs := range [][][]byte{ + {bufs[0], bufs[3]}, + {bufs[1], bufs[3]}, + {bufs[2], bufs[3]}, + } { + var wg errgroup.Group + repo.StartPackUploader(context.TODO(), &wg) + + for _, blob := range blobs { + id, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, blob, restic.ID{}, true) + keep.Insert(restic.BlobHandle{Type: restic.DataBlob, ID: id}) + rtest.OK(t, err) + } + + rtest.OK(t, repo.Flush(context.Background())) + } + + opts := repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + // non-zero number of unused bytes, that is nevertheless smaller than a single blob + // setting this to zero would bypass the unused/duplicate size accounting that should + // be tested here + MaxUnusedBytes: func(used uint64) (unused uint64) { return blobSize / 2 }, + } + + plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository, usedBlobs restic.FindBlobSet) error { + for blob := range keep { + usedBlobs.Insert(blob) + } + return nil + }, &progress.NoopPrinter{}) + rtest.OK(t, err) + + rtest.OK(t, plan.Execute(context.TODO(), &progress.NoopPrinter{})) + + rsize := plan.Stats().Size + remainingUnusedSize := rsize.Duplicate + rsize.Unused - rsize.Remove - rsize.Repackrm + maxUnusedSize := opts.MaxUnusedBytes(rsize.Used) + rtest.Assert(t, remainingUnusedSize <= maxUnusedSize, "too much unused data remains got %v, expected less than %v", remainingUnusedSize, maxUnusedSize) + + // divide by blobSize to ignore pack file overhead + rtest.Equals(t, rsize.Used/blobSize, uint64(4)) + rtest.Equals(t, rsize.Duplicate/blobSize, uint64(2)) + rtest.Equals(t, rsize.Unused, uint64(0)) + rtest.Equals(t, rsize.Remove, uint64(0)) + rtest.Equals(t, rsize.Repack/blobSize, uint64(4)) + rtest.Equals(t, rsize.Repackrm/blobSize, uint64(2)) + rtest.Equals(t, rsize.Unref, uint64(0)) + rtest.Equals(t, rsize.Uncompressed, uint64(0)) +} From 6301250d8314297f519d94d9a26ab8101c712ee6 Mon Sep 17 00:00:00 2001 From: Snshadow <89772726+Snshadow@users.noreply.github.com> Date: Mon, 27 Jan 2025 00:25:38 +0900 Subject: [PATCH 868/893] fix: Windows VSS Event ID 8194 (#5170) --- changelog/unreleased/pull-5170 | 21 +++++++++++++++++++++ internal/fs/vss_windows.go | 20 ++++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 changelog/unreleased/pull-5170 diff --git a/changelog/unreleased/pull-5170 b/changelog/unreleased/pull-5170 new file mode 100644 index 00000000000..312eb14a561 --- /dev/null +++ b/changelog/unreleased/pull-5170 @@ -0,0 +1,21 @@ +Bugfix: Prevent Windows VSS event log 8194 warnings for backup with fs snapshot + +When running `restic backup` with `--use-fs-snapshot` flag in Windows with admin rights, event logs like +``` +Volume Shadow Copy Service error: Unexpected error querying for the IVssWriterCallback interface. hr = 0x80070005, Access is denied. +. This is often caused by incorrect security settings in either the writer or requestor process. + +Operation: + Gathering Writer Data + +Context: + Writer Class Id: {e8132975-6f93-4464-a53e-1050253ae220} + Writer Name: System Writer + Writer Instance ID: {54b151ac-d27d-4628-9cb0-2bc40959f50f} +``` +are created several times(the backup itself succeeds). Prevent this from occurring. + + +https://github.com/restic/restic/issues/5169 +https://github.com/restic/restic/pull/5170 +https://forum.restic.net/t/windows-shadow-copy-snapshot-vss-unexpected-provider-error/3674/2 diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 840e971078d..1cbaf82f87a 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -810,6 +810,26 @@ func initializeVssCOMInterface() (*ole.IUnknown, error) { } } + // initialize COM security for VSS, this can't be called more then once + + // Allowing all processes to perform incoming COM calls is not necessarily a security weakness. + // A requester acting as a COM server, like all other COM servers, always retains the option to authorize its clients on every COM method implemented in its process. + // + // Note that internal COM callbacks implemented by VSS are secured by default. + // Reference: https://learn.microsoft.com/en-us/windows/win32/vss/security-considerations-for-requestors#:~:text=Allowing%20all%20processes,secured%20by%20default. + + if err = ole.CoInitializeSecurity( + -1, // Default COM authentication service + 6, // RPC_C_AUTHN_LEVEL_PKT_PRIVACY + 3, // RPC_C_IMP_LEVEL_IMPERSONATE + 0x20, // EOAC_STATIC_CLOAKING + ); err != nil { + // TODO warn for expected event logs for VSS IVssWriterCallback failure + return nil, newVssError( + "Failed to initialize security for VSS request", + HRESULT(err.(*ole.OleError).Code())) + } + var oleIUnknown *ole.IUnknown result, _, _ := vssInstance.Call(uintptr(unsafe.Pointer(&oleIUnknown))) hresult := HRESULT(result) From a63500663ac0b01e14b30bafc5b5c104c39f72ad Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 28 Jan 2025 19:51:12 +0100 Subject: [PATCH 869/893] CI: disable shadow builtin rule Removing the shadowing cases leads to weird workarounds but doesn't help much with code clarity. --- .golangci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.golangci.yml b/.golangci.yml index e632965bb2d..b415e1f56cd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -56,6 +56,7 @@ issues: # staticcheck: there's no easy way to replace these packages - "SA1019: \"golang.org/x/crypto/poly1305\" is deprecated" - "SA1019: \"golang.org/x/crypto/openpgp\" is deprecated" + - "redefines-builtin-id:" exclude-rules: # revive: ignore unused parameters in tests From e77681f2cdbcc844c6d7a0987027270d2d9e2267 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 28 Jan 2025 19:52:22 +0100 Subject: [PATCH 870/893] remove unnecessary min function --- internal/repository/packer_manager_test.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/internal/repository/packer_manager_test.go b/internal/repository/packer_manager_test.go index 0f3aea05f4b..bcc494075fd 100644 --- a/internal/repository/packer_manager_test.go +++ b/internal/repository/packer_manager_test.go @@ -23,13 +23,6 @@ func randomID(rd io.Reader) restic.ID { const maxBlobSize = 1 << 20 -func min(a, b int) int { - if a < b { - return a - } - return b -} - func fillPacks(t testing.TB, rnd *rand.Rand, pm *packerManager, buf []byte) (bytes int) { for i := 0; i < 102; i++ { l := rnd.Intn(maxBlobSize) From 8eaa4b6602902bef4e41bb0b11483628a02c4a78 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Tue, 28 Jan 2025 19:55:45 +0100 Subject: [PATCH 871/893] CI: bump golangci-lint to v1.63.4 --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5a41723bbd3..d28d2e46fab 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -259,7 +259,7 @@ jobs: uses: golangci/golangci-lint-action@v6 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.61.0 + version: v1.63.4 args: --verbose --timeout 5m # only run golangci-lint for pull requests, otherwise ALL hints get From c4be05dbc2741673bd22a2a850b223d5cdebb5c8 Mon Sep 17 00:00:00 2001 From: Winfried Plappert Date: Wed, 29 Jan 2025 20:44:16 +0000 Subject: [PATCH 872/893] Issue 4433: Ability to define sort order for output of find command (#5184) The old sorting behaviour was to sort snapshots from oldest to newest. The new sorting order is from newest to oldest. If one wants to revert to the old behaviour, use the option --reverse. --------- Co-authored-by: Michael Eischer --- changelog/unreleased/issue-4433 | 9 ++++++ cmd/restic/cmd_find.go | 11 +++++-- cmd/restic/cmd_find_integration_test.go | 42 ++++++++++++++++++++++++- 3 files changed, 59 insertions(+), 3 deletions(-) create mode 100644 changelog/unreleased/issue-4433 diff --git a/changelog/unreleased/issue-4433 b/changelog/unreleased/issue-4433 new file mode 100644 index 00000000000..b6aefbe8eb9 --- /dev/null +++ b/changelog/unreleased/issue-4433 @@ -0,0 +1,9 @@ +Enhancement: Sort `find` output from newest to oldest and add `--reverse` option + +The old output behaviour was to sort snapshots from oldest to newest. +The new sorting order is from newest to oldest. If one wants to revert to the +old behaviour, use the option --reverse. + +https://github.com/restic/restic/issues/4433 +https://github.com/restic/restic/pull/5184 + diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 2e06fa00c60..2fcef57418b 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -22,7 +22,9 @@ var cmdFind = &cobra.Command{ Long: ` The "find" command searches for files or directories in snapshots stored in the repo. -It can also be used to search for restic blobs or trees for troubleshooting.`, +It can also be used to search for restic blobs or trees for troubleshooting. +The default sort option for the snapshots is youngest to oldest. To sort the +output from oldest to youngest specify --reverse.`, Example: `restic find config.json restic find --json "*.yml" "*.json" restic find --json --blob 420f620f b46ebe8a ddd38656 @@ -56,6 +58,7 @@ type FindOptions struct { CaseInsensitive bool ListLong bool HumanReadable bool + Reverse bool restic.SnapshotFilter } @@ -73,6 +76,7 @@ func init() { f.BoolVar(&findOptions.PackID, "pack", false, "pattern is a pack-ID") f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob or --tree)") f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern") + f.BoolVarP(&findOptions.Reverse, "reverse", "R", false, "reverse sort order oldest to newest") f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode") f.BoolVar(&findOptions.HumanReadable, "human-readable", false, "print sizes in human readable format") @@ -637,7 +641,10 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args [] } sort.Slice(filteredSnapshots, func(i, j int) bool { - return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time) + if opts.Reverse { + return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time) + } + return filteredSnapshots[i].Time.After(filteredSnapshots[j].Time) }) for _, sn := range filteredSnapshots { diff --git a/cmd/restic/cmd_find_integration_test.go b/cmd/restic/cmd_find_integration_test.go index 7e35cb141ac..95799749ae0 100644 --- a/cmd/restic/cmd_find_integration_test.go +++ b/cmd/restic/cmd_find_integration_test.go @@ -28,7 +28,6 @@ func TestFind(t *testing.T) { opts := BackupOptions{} testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) - testRunCheck(t, env.gopts) results := testRunFind(t, false, FindOptions{}, env.gopts, "unexistingfile") rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile) @@ -91,3 +90,44 @@ func TestFindJSON(t *testing.T) { rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", matches[0].Matches) rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile) } + +func TestFindSorting(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := testSetupBackupData(t, env) + opts := BackupOptions{} + + // first backup + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + sn1 := testListSnapshots(t, env.gopts, 1)[0] + + // second backup + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + snapshots := testListSnapshots(t, env.gopts, 2) + // get id of new snapshot without depending on file order returned by filesystem + sn2 := snapshots[0] + if sn1.Equal(sn2) { + sn2 = snapshots[1] + } + + // first restic find - with default FindOptions{} + results := testRunFind(t, true, FindOptions{}, env.gopts, "testfile") + lines := strings.Split(string(results), "\n") + rtest.Assert(t, len(lines) == 2, "expected two files found in repo (%v), found %d", datafile, len(lines)) + matches := []testMatches{} + rtest.OK(t, json.Unmarshal(results, &matches)) + + // run second restic find with --reverse, sort oldest to newest + resultsReverse := testRunFind(t, true, FindOptions{Reverse: true}, env.gopts, "testfile") + lines = strings.Split(string(resultsReverse), "\n") + rtest.Assert(t, len(lines) == 2, "expected two files found in repo (%v), found %d", datafile, len(lines)) + matchesReverse := []testMatches{} + rtest.OK(t, json.Unmarshal(resultsReverse, &matchesReverse)) + + // compare result sets + rtest.Assert(t, sn1.String() == matchesReverse[0].SnapshotID, "snapshot[0] must match old snapshot") + rtest.Assert(t, sn2.String() == matchesReverse[1].SnapshotID, "snapshot[1] must match new snapshot") + rtest.Assert(t, matches[0].SnapshotID == matchesReverse[1].SnapshotID, "matches should be sorted 1") + rtest.Assert(t, matches[1].SnapshotID == matchesReverse[0].SnapshotID, "matches should be sorted 2") +} From c2e404a0ee3c78a70e63905f8e922fd683eb5686 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 31 Jan 2025 18:53:03 +0100 Subject: [PATCH 873/893] doc: add edit on github link --- doc/conf.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/conf.py b/doc/conf.py index f40a8c79e16..6c89d0f4101 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -91,6 +91,12 @@ # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] +html_context = { + 'display_github': True, + 'github_user': 'restic', + 'github_repo': 'restic', + 'github_version': 'master/doc/', +} # -- Options for HTMLHelp output ------------------------------------------ From afe4fcc0d94a7704bc093fdb3ac2c72854c37efe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Feb 2025 01:43:14 +0000 Subject: [PATCH 874/893] build(deps): bump github.com/spf13/pflag from 1.0.5 to 1.0.6 Bumps [github.com/spf13/pflag](https://github.com/spf13/pflag) from 1.0.5 to 1.0.6. - [Release notes](https://github.com/spf13/pflag/releases) - [Commits](https://github.com/spf13/pflag/compare/v1.0.5...v1.0.6) --- updated-dependencies: - dependency-name: github.com/spf13/pflag dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 2193e738c40..8b64f072182 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/pkg/xattr v0.4.10 github.com/restic/chunker v0.4.0 github.com/spf13/cobra v1.8.1 - github.com/spf13/pflag v1.0.5 + github.com/spf13/pflag v1.0.6 go.uber.org/automaxprocs v1.6.0 golang.org/x/crypto v0.32.0 golang.org/x/net v0.34.0 diff --git a/go.sum b/go.sum index ef829bd9021..30a7b3d6392 100644 --- a/go.sum +++ b/go.sum @@ -197,8 +197,9 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= From 2b9113721ca484a9c03d54fd5a878ef76d23797f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Feb 2025 01:43:20 +0000 Subject: [PATCH 875/893] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.8.0 to 1.8.1. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.8.0...sdk/azidentity/v1.8.1) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azidentity dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 24 ++++++++++++------------ 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 2193e738c40..bd4dc444918 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,8 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.43.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 github.com/Backblaze/blazer v0.7.1 github.com/Microsoft/go-winio v0.6.2 @@ -44,7 +44,7 @@ require ( cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.2.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect diff --git a/go.sum b/go.sum index ef829bd9021..74df12c6913 100644 --- a/go.sum +++ b/go.sum @@ -13,12 +13,12 @@ cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTS cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 h1:1mvYtZfWQAnwNah/C+Z+Jb9rQH95LPE2vlmMuWAHJk8= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1/go.mod h1:75I/mXtme1JyWFtz8GocPHVFyH421IBoZErnO16dd0k= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1 h1:Bk5uOhSAenHyR5P61D/NzeQCv+4fEVV8mOkJ82NqpWw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1/go.mod h1:QZ4pw3or1WPmRBxf0cHd1tknzrT54WPBOQoGutCPvSU= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= @@ -27,8 +27,8 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4a github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Backblaze/blazer v0.7.1 h1:J43PbFj6hXLg1jvCNr+rQoAsxzKK0IP7ftl1ReCwpcQ= github.com/Backblaze/blazer v0.7.1/go.mod h1:MhntL1nMpIuoqrPP6TnZu/xTydMgOAe/Xm6KongbjKs= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -181,8 +181,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= -github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= +github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= +github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/restic/chunker v0.4.0 h1:YUPYCUn70MYP7VO4yllypp2SjmsRhRJaad3xKu1QFRw= github.com/restic/chunker v0.4.0/go.mod h1:z0cH2BejpW636LXw0R/BGyv+Ey8+m9QGiOanDHItzyw= github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s= @@ -208,8 +208,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= From c1781e0abbf4c3972f2b99c84bda2902f3ac393a Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Sat, 1 Feb 2025 08:14:01 +0100 Subject: [PATCH 876/893] ui/termstatus: Remove unused bytes.Buffer --- internal/ui/termstatus/status.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/internal/ui/termstatus/status.go b/internal/ui/termstatus/status.go index e6533095871..d3674ac09be 100644 --- a/internal/ui/termstatus/status.go +++ b/internal/ui/termstatus/status.go @@ -2,7 +2,6 @@ package termstatus import ( "bufio" - "bytes" "context" "fmt" "io" @@ -22,7 +21,6 @@ type Terminal struct { wr *bufio.Writer fd uintptr errWriter io.Writer - buf *bytes.Buffer msg chan message status chan status canUpdateStatus bool @@ -60,7 +58,6 @@ func New(wr io.Writer, errWriter io.Writer, disableStatus bool) *Terminal { t := &Terminal{ wr: bufio.NewWriter(wr), errWriter: errWriter, - buf: bytes.NewBuffer(nil), msg: make(chan message), status: make(chan status), closed: make(chan struct{}), From c4d988faf87837e0772ff1212232f2268cccfb63 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Feb 2025 15:56:10 +0000 Subject: [PATCH 877/893] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/storage/azblob Bumps [github.com/Azure/azure-sdk-for-go/sdk/storage/azblob](https://github.com/Azure/azure-sdk-for-go) from 1.5.0 to 1.6.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.5.0...sdk/azcore/v1.6.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/storage/azblob dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index bd4dc444918..57259ed90de 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ require ( cloud.google.com/go/storage v1.43.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 github.com/Backblaze/blazer v0.7.1 github.com/Microsoft/go-winio v0.6.2 github.com/anacrolix/fuse v0.3.1 diff --git a/go.sum b/go.sum index 74df12c6913..4e7fd8a4517 100644 --- a/go.sum +++ b/go.sum @@ -23,8 +23,8 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xP github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ= From 536ebefff44a3278fda42550137ac0f15df8d2fe Mon Sep 17 00:00:00 2001 From: Gilbert Gilb's Date: Sat, 1 Feb 2025 19:26:27 +0100 Subject: [PATCH 878/893] feat(backends/s3): add warmup support before repacks and restores (#5173) * feat(backends/s3): add warmup support before repacks and restores This commit introduces basic support for transitioning pack files stored in cold storage to hot storage on S3 and S3-compatible providers. To prevent unexpected behavior for existing users, the feature is gated behind new flags: - `s3.enable-restore`: opt-in flag (defaults to false) - `s3.restore-days`: number of days for the restored objects to remain in hot storage (defaults to `7`) - `s3.restore-timeout`: maximum time to wait for a single restoration (default to `1 day`) - `s3.restore-tier`: retrieval tier at which the restore will be processed. (default to `Standard`) As restoration times can be lengthy, this implementation preemptively restores selected packs to prevent incessant restore-delays during downloads. This is slightly sub-optimal as we could process packs out-of-order (as soon as they're transitioned), but this would really add too much complexity for a marginal gain in speed. To maintain simplicity and prevent resources exhautions with lots of packs, no new concurrency mechanisms or goroutines were added. This just hooks gracefully into the existing routines. **Limitations:** - Tests against the backend were not written due to the lack of cold storage class support in MinIO. Testing was done manually on Scaleway's S3-compatible object storage. If necessary, we could explore testing with LocalStack or mocks, though this requires further discussion. - Currently, this feature only warms up before restores and repacks (prune/copy), as those are the two main use-cases I came across. Support for other commands may be added in future iterations, as long as affected packs can be calculated in advance. - The feature is gated behind a new alpha `s3-restore` feature flag to make it explicit that the feature is still wet behind the ears. - There is no explicit user notification for ongoing pack restorations. While I think it is not necessary because of the opt-in flag, showing some notice may improve usability (but would probably require major refactoring in the progress bar which I didn't want to start). Another possibility would be to add a flag to send restores requests and fail early. See https://github.com/restic/restic/issues/3202 * ui: warn user when files are warming up from cold storage * refactor: remove the PacksWarmer struct It's easier to handle multiple handles in the backend directly, and it may open the door to reducing the number of requests made to the backend in the future. --- changelog/unreleased/issue-3202 | 10 ++ cmd/restic/cmd_copy.go | 10 +- cmd/restic/cmd_restore.go | 6 + doc/faq.rst | 30 ++++ internal/backend/azure/azure.go | 6 + internal/backend/b2/b2.go | 6 + internal/backend/backend.go | 15 ++ internal/backend/cache/backend.go | 10 ++ internal/backend/dryrun/dry_backend.go | 6 + internal/backend/gs/gs.go | 6 + internal/backend/local/local.go | 6 + internal/backend/mem/mem_backend.go | 6 + internal/backend/mock/backend.go | 18 ++ internal/backend/rclone/backend.go | 6 + internal/backend/rest/rest.go | 6 + internal/backend/retry/backend_retry.go | 8 + internal/backend/s3/config.go | 14 +- internal/backend/s3/config_test.go | 212 ++++++++++++------------ internal/backend/s3/s3.go | 167 ++++++++++++++++++- internal/backend/sftp/sftp.go | 6 + internal/backend/swift/swift.go | 6 + internal/feature/registry.go | 2 + internal/repository/prune.go | 2 +- internal/repository/repack.go | 42 ++++- internal/repository/repack_test.go | 8 +- internal/repository/warmup.go | 39 +++++ internal/repository/warmup_test.go | 73 ++++++++ internal/restic/repository.go | 10 ++ internal/restorer/filerestorer.go | 22 +++ internal/restorer/filerestorer_test.go | 41 ++++- internal/restorer/restorer.go | 4 +- 31 files changed, 680 insertions(+), 123 deletions(-) create mode 100644 changelog/unreleased/issue-3202 create mode 100644 internal/repository/warmup.go create mode 100644 internal/repository/warmup_test.go diff --git a/changelog/unreleased/issue-3202 b/changelog/unreleased/issue-3202 new file mode 100644 index 00000000000..e6fca05e163 --- /dev/null +++ b/changelog/unreleased/issue-3202 @@ -0,0 +1,10 @@ +Enhancement: Add warmup support on S3 backend before repacks and restores + +Introduce S3 backend options for transitioning pack files from cold to hot +storage on S3 and S3-compatible providers. Note: only works before repacks +(prune/copy) and restore for now, and gated behind a new "s3-restore" feature +flag. + +https://github.com/restic/restic/pull/5173 +https://github.com/restic/restic/issues/3202 +https://github.com/restic/restic/issues/2504 diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index cd92193ac38..301e0e18025 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -237,7 +237,15 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep } bar := newProgressMax(!quiet, uint64(len(packList)), "packs copied") - _, err = repository.Repack(ctx, srcRepo, dstRepo, packList, copyBlobs, bar) + _, err = repository.Repack( + ctx, + srcRepo, + dstRepo, + packList, + copyBlobs, + bar, + func(msg string, args ...interface{}) { fmt.Printf(msg+"\n", args...) }, + ) bar.Done() if err != nil { return errors.Fatal(err.Error()) diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 7a3b029daab..c930abc310b 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -179,6 +179,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, res.Warn = func(message string) { msg.E("Warning: %s\n", message) } + res.Info = func(message string) { + if gopts.JSON { + return + } + msg.P("Info: %s\n", message) + } selectExcludeFilter := func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) { matched := false diff --git a/doc/faq.rst b/doc/faq.rst index 74dd77d7143..3b62f641d40 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -242,3 +242,33 @@ collect a list of all files, causing the following error: List(data) returned error, retrying after 1s: [...]: request timeout In this case you can increase the timeout using the ``--stuck-request-timeout`` option. + +Are "cold storages" supported? +------------------------------ + +Generally, restic does not natively support "cold storage" solutions. However, +experimental support for restoring from **S3 Glacier** and **S3 Glacier Deep +Archive** storage classes is available: + +.. code-block:: console + + $ restic backup -o s3.storage-class=GLACIER somedir/ + $ RESTIC_FEATURES=s3-restore restic restore -o s3.enable-restore=1 -o s3.restore-days=7 -o s3.restore-timeout=1d latest + +**Notes:** + +- This feature is still in early alpha stage. Expect arbitrary breaking changes + in the future (although we'll do our best-effort to avoid them). +- Expect restores to hang from 1 up to 42 hours depending on your storage + class, provider and luck. Restores from cold storages are known to be + time-consuming. You may need to adjust the `s3.restore-timeout` if a restore + operation takes more than 24 hours. +- Restic will prevent sending metadata files (such as config files, lock files + or tree blobs) to Glacier or Deep Archive. Standard class is used instead to + ensure normal and fast operations for most tasks. +- Currently, only the following commands are known to work: + + - `backup` + - `copy` + - `prune` + - `restore` diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 27390ee13fe..b2ef7ec3031 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -475,3 +475,9 @@ func (be *Backend) Delete(ctx context.Context) error { // Close does nothing func (be *Backend) Close() error { return nil } + +// Warmup not implemented +func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { + return []backend.Handle{}, nil +} +func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil } diff --git a/internal/backend/b2/b2.go b/internal/backend/b2/b2.go index 3ef2bcbe3d1..6f66b3673c9 100644 --- a/internal/backend/b2/b2.go +++ b/internal/backend/b2/b2.go @@ -335,3 +335,9 @@ func (be *b2Backend) Delete(ctx context.Context) error { // Close does nothing func (be *b2Backend) Close() error { return nil } + +// Warmup not implemented +func (be *b2Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { + return []backend.Handle{}, nil +} +func (be *b2Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil } diff --git a/internal/backend/backend.go b/internal/backend/backend.go index f606e1123e8..2529dfab56d 100644 --- a/internal/backend/backend.go +++ b/internal/backend/backend.go @@ -75,6 +75,21 @@ type Backend interface { // Delete removes all data in the backend. Delete(ctx context.Context) error + + // Warmup ensures that the specified handles are ready for upcoming reads. + // This is particularly useful for transitioning files from cold to hot + // storage. + // + // The method is non-blocking. WarmupWait can be used to wait for + // completion. + // + // Returns: + // - Handles currently warming up. + // - An error if warmup fails. + Warmup(ctx context.Context, h []Handle) ([]Handle, error) + + // WarmupWait waits until all given handles are warm. + WarmupWait(ctx context.Context, h []Handle) error } type Unwrapper interface { diff --git a/internal/backend/cache/backend.go b/internal/backend/cache/backend.go index 3754266ba96..f323b16636e 100644 --- a/internal/backend/cache/backend.go +++ b/internal/backend/cache/backend.go @@ -258,3 +258,13 @@ func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(f backen return nil } + +// Warmup delegates to wrapped backend. +func (b *Backend) Warmup(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) { + return b.Backend.Warmup(ctx, h) +} + +// WarmupWait delegates to wrapped backend. +func (b *Backend) WarmupWait(ctx context.Context, h []backend.Handle) error { + return b.Backend.WarmupWait(ctx, h) +} diff --git a/internal/backend/dryrun/dry_backend.go b/internal/backend/dryrun/dry_backend.go index 8af0ce9adc4..fbce4191691 100644 --- a/internal/backend/dryrun/dry_backend.go +++ b/internal/backend/dryrun/dry_backend.go @@ -82,3 +82,9 @@ func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offse func (be *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) { return be.b.Stat(ctx, h) } + +// Warmup should not occur during dry-runs. +func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { + return []backend.Handle{}, nil +} +func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil } diff --git a/internal/backend/gs/gs.go b/internal/backend/gs/gs.go index ad50f194b52..ab20ca103ae 100644 --- a/internal/backend/gs/gs.go +++ b/internal/backend/gs/gs.go @@ -363,3 +363,9 @@ func (be *Backend) Delete(ctx context.Context) error { // Close does nothing. func (be *Backend) Close() error { return nil } + +// Warmup not implemented +func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { + return []backend.Handle{}, nil +} +func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil } diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go index ee87ae5d6e6..e2065742f70 100644 --- a/internal/backend/local/local.go +++ b/internal/backend/local/local.go @@ -371,3 +371,9 @@ func (b *Local) Close() error { // same function. return nil } + +// Warmup not implemented +func (b *Local) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { + return []backend.Handle{}, nil +} +func (b *Local) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil } diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go index 981c0a18250..e5ee297a111 100644 --- a/internal/backend/mem/mem_backend.go +++ b/internal/backend/mem/mem_backend.go @@ -249,3 +249,9 @@ func (be *MemoryBackend) Delete(ctx context.Context) error { func (be *MemoryBackend) Close() error { return nil } + +// Warmup not implemented +func (be *MemoryBackend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { + return []backend.Handle{}, nil +} +func (be *MemoryBackend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil } diff --git a/internal/backend/mock/backend.go b/internal/backend/mock/backend.go index a0319844333..2083f7e88cd 100644 --- a/internal/backend/mock/backend.go +++ b/internal/backend/mock/backend.go @@ -20,6 +20,8 @@ type Backend struct { ListFn func(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error RemoveFn func(ctx context.Context, h backend.Handle) error DeleteFn func(ctx context.Context) error + WarmupFn func(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) + WarmupWaitFn func(ctx context.Context, h []backend.Handle) error ConnectionsFn func() uint HasherFn func() hash.Hash HasAtomicReplaceFn func() bool @@ -150,5 +152,21 @@ func (m *Backend) Delete(ctx context.Context) error { return m.DeleteFn(ctx) } +func (m *Backend) Warmup(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) { + if m.WarmupFn == nil { + return []backend.Handle{}, errors.New("not implemented") + } + + return m.WarmupFn(ctx, h) +} + +func (m *Backend) WarmupWait(ctx context.Context, h []backend.Handle) error { + if m.WarmupWaitFn == nil { + return errors.New("not implemented") + } + + return m.WarmupWaitFn(ctx, h) +} + // Make sure that Backend implements the backend interface. var _ backend.Backend = &Backend{} diff --git a/internal/backend/rclone/backend.go b/internal/backend/rclone/backend.go index 8294aa8c419..fb5ed34eb41 100644 --- a/internal/backend/rclone/backend.go +++ b/internal/backend/rclone/backend.go @@ -340,3 +340,9 @@ func (be *Backend) Close() error { debug.Log("wait for rclone returned: %v", be.waitResult) return be.waitResult } + +// Warmup not implemented +func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { + return []backend.Handle{}, nil +} +func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil } diff --git a/internal/backend/rest/rest.go b/internal/backend/rest/rest.go index 7bdedff397f..2c5f59b4eb0 100644 --- a/internal/backend/rest/rest.go +++ b/internal/backend/rest/rest.go @@ -439,3 +439,9 @@ func (b *Backend) Close() error { func (b *Backend) Delete(ctx context.Context) error { return util.DefaultDelete(ctx, b) } + +// Warmup not implemented +func (b *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { + return []backend.Handle{}, nil +} +func (b *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil } diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index de8a520ec9e..aa48bde77a3 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -289,3 +289,11 @@ func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend func (be *Backend) Unwrap() backend.Backend { return be.Backend } + +// Warmup delegates to wrapped backend +func (be *Backend) Warmup(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) { + return be.Backend.Warmup(ctx, h) +} +func (be *Backend) WarmupWait(ctx context.Context, h []backend.Handle) error { + return be.Backend.WarmupWait(ctx, h) +} diff --git a/internal/backend/s3/config.go b/internal/backend/s3/config.go index be2a78ce5cc..77f27408ec0 100644 --- a/internal/backend/s3/config.go +++ b/internal/backend/s3/config.go @@ -5,6 +5,7 @@ import ( "os" "path" "strings" + "time" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/errors" @@ -23,6 +24,11 @@ type Config struct { Layout string `option:"layout" help:"use this backend layout (default: auto-detect) (deprecated)"` StorageClass string `option:"storage-class" help:"set S3 storage class (STANDARD, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or REDUCED_REDUNDANCY)"` + EnableRestore bool `option:"enable-restore" help:"restore objects from GLACIER or DEEP_ARCHIVE storage classes (default: false, requires \"s3-restore\" feature flag)"` + RestoreDays int `option:"restore-days" help:"lifetime in days of restored object (default: 7)"` + RestoreTimeout time.Duration `option:"restore-timeout" help:"maximum time to wait for objects transition (default: 1d)"` + RestoreTier string `option:"restore-tier" help:"Retrieval tier at which the restore will be processed. (Standard, Bulk or Expedited) (default: Standard)"` + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` MaxRetries uint `option:"retries" help:"set the number of retries attempted"` Region string `option:"region" help:"set region"` @@ -34,8 +40,12 @@ type Config struct { // NewConfig returns a new Config with the default values filled in. func NewConfig() Config { return Config{ - Connections: 5, - ListObjectsV1: false, + Connections: 5, + ListObjectsV1: false, + EnableRestore: false, + RestoreDays: 7, + RestoreTimeout: 24 * time.Hour, + RestoreTier: "Standard", } } diff --git a/internal/backend/s3/config_test.go b/internal/backend/s3/config_test.go index 085dbeedb17..74f959a6542 100644 --- a/internal/backend/s3/config_test.go +++ b/internal/backend/s3/config_test.go @@ -3,117 +3,117 @@ package s3 import ( "strings" "testing" + "time" "github.com/restic/restic/internal/backend/test" ) +func newTestConfig(cfg Config) Config { + if cfg.Connections == 0 { + cfg.Connections = 5 + } + if cfg.RestoreDays == 0 { + cfg.RestoreDays = 7 + } + if cfg.RestoreTimeout == 0 { + cfg.RestoreTimeout = 24 * time.Hour + } + if cfg.RestoreTier == "" { + cfg.RestoreTier = "Standard" + } + return cfg +} + var configTests = []test.ConfigTestData[Config]{ - {S: "s3://eu-central-1/bucketname", Cfg: Config{ - Endpoint: "eu-central-1", - Bucket: "bucketname", - Prefix: "", - Connections: 5, - }}, - {S: "s3://eu-central-1/bucketname/", Cfg: Config{ - Endpoint: "eu-central-1", - Bucket: "bucketname", - Prefix: "", - Connections: 5, - }}, - {S: "s3://eu-central-1/bucketname/prefix/directory", Cfg: Config{ - Endpoint: "eu-central-1", - Bucket: "bucketname", - Prefix: "prefix/directory", - Connections: 5, - }}, - {S: "s3://eu-central-1/bucketname/prefix/directory/", Cfg: Config{ - Endpoint: "eu-central-1", - Bucket: "bucketname", - Prefix: "prefix/directory", - Connections: 5, - }}, - {S: "s3:eu-central-1/foobar", Cfg: Config{ - Endpoint: "eu-central-1", - Bucket: "foobar", - Prefix: "", - Connections: 5, - }}, - {S: "s3:eu-central-1/foobar/", Cfg: Config{ - Endpoint: "eu-central-1", - Bucket: "foobar", - Prefix: "", - Connections: 5, - }}, - {S: "s3:eu-central-1/foobar/prefix/directory", Cfg: Config{ - Endpoint: "eu-central-1", - Bucket: "foobar", - Prefix: "prefix/directory", - Connections: 5, - }}, - {S: "s3:eu-central-1/foobar/prefix/directory/", Cfg: Config{ - Endpoint: "eu-central-1", - Bucket: "foobar", - Prefix: "prefix/directory", - Connections: 5, - }}, - {S: "s3:hostname.foo/foobar", Cfg: Config{ - Endpoint: "hostname.foo", - Bucket: "foobar", - Prefix: "", - Connections: 5, - }}, - {S: "s3:hostname.foo/foobar/prefix/directory", Cfg: Config{ - Endpoint: "hostname.foo", - Bucket: "foobar", - Prefix: "prefix/directory", - Connections: 5, - }}, - {S: "s3:https://hostname/foobar", Cfg: Config{ - Endpoint: "hostname", - Bucket: "foobar", - Prefix: "", - Connections: 5, - }}, - {S: "s3:https://hostname:9999/foobar", Cfg: Config{ - Endpoint: "hostname:9999", - Bucket: "foobar", - Prefix: "", - Connections: 5, - }}, - {S: "s3:https://hostname:9999/foobar/", Cfg: Config{ - Endpoint: "hostname:9999", - Bucket: "foobar", - Prefix: "", - Connections: 5, - }}, - {S: "s3:http://hostname:9999/foobar", Cfg: Config{ - Endpoint: "hostname:9999", - Bucket: "foobar", - Prefix: "", - UseHTTP: true, - Connections: 5, - }}, - {S: "s3:http://hostname:9999/foobar/", Cfg: Config{ - Endpoint: "hostname:9999", - Bucket: "foobar", - Prefix: "", - UseHTTP: true, - Connections: 5, - }}, - {S: "s3:http://hostname:9999/bucket/prefix/directory", Cfg: Config{ - Endpoint: "hostname:9999", - Bucket: "bucket", - Prefix: "prefix/directory", - UseHTTP: true, - Connections: 5, - }}, - {S: "s3:http://hostname:9999/bucket/prefix/directory/", Cfg: Config{ - Endpoint: "hostname:9999", - Bucket: "bucket", - Prefix: "prefix/directory", - UseHTTP: true, - Connections: 5, - }}, + {S: "s3://eu-central-1/bucketname", Cfg: newTestConfig(Config{ + Endpoint: "eu-central-1", + Bucket: "bucketname", + Prefix: "", + })}, + {S: "s3://eu-central-1/bucketname/", Cfg: newTestConfig(Config{ + Endpoint: "eu-central-1", + Bucket: "bucketname", + Prefix: "", + })}, + {S: "s3://eu-central-1/bucketname/prefix/directory", Cfg: newTestConfig(Config{ + Endpoint: "eu-central-1", + Bucket: "bucketname", + Prefix: "prefix/directory", + })}, + {S: "s3://eu-central-1/bucketname/prefix/directory/", Cfg: newTestConfig(Config{ + Endpoint: "eu-central-1", + Bucket: "bucketname", + Prefix: "prefix/directory", + })}, + {S: "s3:eu-central-1/foobar", Cfg: newTestConfig(Config{ + Endpoint: "eu-central-1", + Bucket: "foobar", + Prefix: "", + })}, + {S: "s3:eu-central-1/foobar/", Cfg: newTestConfig(Config{ + Endpoint: "eu-central-1", + Bucket: "foobar", + Prefix: "", + })}, + {S: "s3:eu-central-1/foobar/prefix/directory", Cfg: newTestConfig(Config{ + Endpoint: "eu-central-1", + Bucket: "foobar", + Prefix: "prefix/directory", + })}, + {S: "s3:eu-central-1/foobar/prefix/directory/", Cfg: newTestConfig(Config{ + Endpoint: "eu-central-1", + Bucket: "foobar", + Prefix: "prefix/directory", + })}, + {S: "s3:hostname.foo/foobar", Cfg: newTestConfig(Config{ + Endpoint: "hostname.foo", + Bucket: "foobar", + Prefix: "", + })}, + {S: "s3:hostname.foo/foobar/prefix/directory", Cfg: newTestConfig(Config{ + Endpoint: "hostname.foo", + Bucket: "foobar", + Prefix: "prefix/directory", + })}, + {S: "s3:https://hostname/foobar", Cfg: newTestConfig(Config{ + Endpoint: "hostname", + Bucket: "foobar", + Prefix: "", + })}, + {S: "s3:https://hostname:9999/foobar", Cfg: newTestConfig(Config{ + Endpoint: "hostname:9999", + Bucket: "foobar", + Prefix: "", + })}, + {S: "s3:https://hostname:9999/foobar/", Cfg: newTestConfig(Config{ + Endpoint: "hostname:9999", + Bucket: "foobar", + Prefix: "", + })}, + {S: "s3:http://hostname:9999/foobar", Cfg: newTestConfig(Config{ + Endpoint: "hostname:9999", + Bucket: "foobar", + Prefix: "", + UseHTTP: true, + })}, + {S: "s3:http://hostname:9999/foobar/", Cfg: newTestConfig(Config{ + Endpoint: "hostname:9999", + Bucket: "foobar", + Prefix: "", + UseHTTP: true, + })}, + {S: "s3:http://hostname:9999/bucket/prefix/directory", Cfg: newTestConfig(Config{ + Endpoint: "hostname:9999", + Bucket: "bucket", + Prefix: "prefix/directory", + UseHTTP: true, + })}, + {S: "s3:http://hostname:9999/bucket/prefix/directory/", Cfg: newTestConfig(Config{ + Endpoint: "hostname:9999", + Bucket: "bucket", + Prefix: "prefix/directory", + UseHTTP: true, + })}, } func TestParseConfig(t *testing.T) { diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index e3d4cc499dc..e0d8ea62337 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -8,8 +8,11 @@ import ( "net/http" "os" "path" + "slices" "strings" + "time" + "github.com/cenkalti/backoff/v4" "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/layout" "github.com/restic/restic/internal/backend/location" @@ -32,6 +35,17 @@ type Backend struct { // make sure that *Backend implements backend.Backend var _ backend.Backend = &Backend{} +var archiveClasses = []string{"GLACIER", "DEEP_ARCHIVE"} + +type warmupStatus int + +const ( + warmupStatusCold warmupStatus = iota + warmupStatusWarmingUp + warmupStatusWarm + warmupStatusLukewarm +) + func NewFactory() location.Factory { return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open) } @@ -39,6 +53,10 @@ func NewFactory() location.Factory { func open(cfg Config, rt http.RoundTripper) (*Backend, error) { debug.Log("open, config %#v", cfg) + if cfg.EnableRestore && !feature.Flag.Enabled(feature.S3Restore) { + return nil, fmt.Errorf("feature flag `s3-restore` is required to use `-o s3.enable-restore=true`") + } + if cfg.KeyID == "" && cfg.Secret.String() != "" { return nil, errors.Fatalf("unable to open S3 backend: Key ID ($AWS_ACCESS_KEY_ID) is empty") } else if cfg.KeyID != "" && cfg.Secret.String() == "" { @@ -266,9 +284,9 @@ func (be *Backend) Path() string { // For archive storage classes, only data files are stored using that class; metadata // must remain instantly accessible. func (be *Backend) useStorageClass(h backend.Handle) bool { - notArchiveClass := be.cfg.StorageClass != "GLACIER" && be.cfg.StorageClass != "DEEP_ARCHIVE" isDataFile := h.Type == backend.PackFile && !h.IsMetadata - return isDataFile || notArchiveClass + isArchiveClass := slices.Contains(archiveClasses, be.cfg.StorageClass) + return !isArchiveClass || isDataFile } // Save stores data in the backend at the handle. @@ -440,3 +458,148 @@ func (be *Backend) Delete(ctx context.Context) error { // Close does nothing func (be *Backend) Close() error { return nil } + +// Warmup transitions handles from cold to hot storage if needed. +func (be *Backend) Warmup(ctx context.Context, handles []backend.Handle) ([]backend.Handle, error) { + handlesWarmingUp := []backend.Handle{} + + if be.cfg.EnableRestore { + for _, h := range handles { + filename := be.Filename(h) + isWarmingUp, err := be.requestRestore(ctx, filename) + if err != nil { + return handlesWarmingUp, err + } + if isWarmingUp { + debug.Log("s3 file is being restored: %s", filename) + handlesWarmingUp = append(handlesWarmingUp, h) + } + } + } + + return handlesWarmingUp, nil +} + +// requestRestore sends a glacier restore request on a given file. +func (be *Backend) requestRestore(ctx context.Context, filename string) (bool, error) { + objectInfo, err := be.client.StatObject(ctx, be.cfg.Bucket, filename, minio.StatObjectOptions{}) + if err != nil { + return false, err + } + + ws := be.getWarmupStatus(objectInfo) + switch ws { + case warmupStatusWarm: + return false, nil + case warmupStatusWarmingUp: + return true, nil + } + + opts := minio.RestoreRequest{} + opts.SetDays(be.cfg.RestoreDays) + opts.SetGlacierJobParameters(minio.GlacierJobParameters{Tier: minio.TierType(be.cfg.RestoreTier)}) + + if err := be.client.RestoreObject(ctx, be.cfg.Bucket, filename, "", opts); err != nil { + var e minio.ErrorResponse + if errors.As(err, &e) { + switch e.Code { + case "InvalidObjectState": + return false, nil + case "RestoreAlreadyInProgress": + return true, nil + } + } + return false, err + } + + isWarmingUp := ws != warmupStatusLukewarm + return isWarmingUp, nil +} + +// getWarmupStatus returns the warmup status of the provided object. +func (be *Backend) getWarmupStatus(objectInfo minio.ObjectInfo) warmupStatus { + // We can't use objectInfo.StorageClass to get the storage class of the + // object because this field is only set during ListObjects operations. + // The response header is the documented way to get the storage class + // for GetObject/StatObject operations. + storageClass := objectInfo.Metadata.Get("X-Amz-Storage-Class") + isArchiveClass := slices.Contains(archiveClasses, storageClass) + if !isArchiveClass { + return warmupStatusWarm + } + + restore := objectInfo.Restore + if restore != nil { + if restore.OngoingRestore { + return warmupStatusWarmingUp + } + + minExpiryTime := time.Now().Add(time.Duration(be.cfg.RestoreDays) * 24 * time.Hour) + expiryTime := restore.ExpiryTime + if !expiryTime.IsZero() { + if minExpiryTime.Before(expiryTime) { + return warmupStatusWarm + } + return warmupStatusLukewarm + } + } + + return warmupStatusCold +} + +// WarmupWait waits until all handles are in hot storage. +func (be *Backend) WarmupWait(ctx context.Context, handles []backend.Handle) error { + timeoutCtx, timeoutCtxCancel := context.WithTimeout(ctx, be.cfg.RestoreTimeout) + defer timeoutCtxCancel() + + if be.cfg.EnableRestore { + for _, h := range handles { + filename := be.Filename(h) + err := be.waitForRestore(timeoutCtx, filename) + if err != nil { + return err + } + debug.Log("s3 file is restored: %s", filename) + } + } + + return nil +} + +// waitForRestore waits for a given file to be restored. +func (be *Backend) waitForRestore(ctx context.Context, filename string) error { + for { + var objectInfo minio.ObjectInfo + + // Restore requests can last many hours, therefore network may fail + // temporarily. We don't need to die in such even. + b := backoff.WithMaxRetries(backoff.NewExponentialBackOff(), 10) + b = backoff.WithContext(b, ctx) + err := backoff.Retry( + func() (err error) { + objectInfo, err = be.client.StatObject(ctx, be.cfg.Bucket, filename, minio.StatObjectOptions{}) + return + }, + b, + ) + if err != nil { + return err + } + + ws := be.getWarmupStatus(objectInfo) + switch ws { + case warmupStatusLukewarm: + fallthrough + case warmupStatusWarm: + return nil + case warmupStatusCold: + return errors.New("waiting on S3 handle that is not warming up") + } + + select { + case <-time.After(1 * time.Minute): + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index 14819a2df6f..df7c3b14aac 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -588,3 +588,9 @@ func (r *SFTP) deleteRecursive(ctx context.Context, name string) error { func (r *SFTP) Delete(ctx context.Context) error { return r.deleteRecursive(ctx, r.p) } + +// Warmup not implemented +func (r *SFTP) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { + return []backend.Handle{}, nil +} +func (r *SFTP) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil } diff --git a/internal/backend/swift/swift.go b/internal/backend/swift/swift.go index dfa2055cdf1..090d0051288 100644 --- a/internal/backend/swift/swift.go +++ b/internal/backend/swift/swift.go @@ -269,3 +269,9 @@ func (be *beSwift) Delete(ctx context.Context) error { // Close does nothing func (be *beSwift) Close() error { return nil } + +// Warmup not implemented +func (be *beSwift) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) { + return []backend.Handle{}, nil +} +func (be *beSwift) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil } diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 0773ea13650..99979727182 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -9,6 +9,7 @@ const ( DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" ExplicitS3AnonymousAuth FlagName = "explicit-s3-anonymous-auth" SafeForgetKeepTags FlagName = "safe-forget-keep-tags" + S3Restore FlagName = "s3-restore" ) func init() { @@ -17,5 +18,6 @@ func init() { DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, ExplicitS3AnonymousAuth: {Type: Stable, Description: "forbid anonymous S3 authentication unless `-o s3.unsafe-anonymous-auth=true` is set"}, SafeForgetKeepTags: {Type: Stable, Description: "prevent deleting all snapshots if the tag passed to `forget --keep-tags tagname` does not exist"}, + S3Restore: {Type: Alpha, Description: "restore S3 objects from cold storage classes when `-o s3.enable-restore=true` is set"}, }) } diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 1f58322392d..ba13ba1a3f3 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -557,7 +557,7 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) er printer.P("repacking packs\n") bar := printer.NewCounter("packs repacked") bar.SetMax(uint64(len(plan.repackPacks))) - _, err := Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar) + _, err := Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar, printer.P) bar.Done() if err != nil { return errors.Fatal(err.Error()) diff --git a/internal/repository/repack.go b/internal/repository/repack.go index 8c9ca28bb58..9291914782f 100644 --- a/internal/repository/repack.go +++ b/internal/repository/repack.go @@ -6,6 +6,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/progress" @@ -18,6 +19,8 @@ type repackBlobSet interface { Len() int } +type LogFunc func(msg string, args ...interface{}) + // Repack takes a list of packs together with a list of blobs contained in // these packs. Each pack is loaded and the blobs listed in keepBlobs is saved // into a new pack. Returned is the list of obsolete packs which can then @@ -25,9 +28,21 @@ type repackBlobSet interface { // // The map keepBlobs is modified by Repack, it is used to keep track of which // blobs have been processed. -func Repack(ctx context.Context, repo restic.Repository, dstRepo restic.Repository, packs restic.IDSet, keepBlobs repackBlobSet, p *progress.Counter) (obsoletePacks restic.IDSet, err error) { +func Repack( + ctx context.Context, + repo restic.Repository, + dstRepo restic.Repository, + packs restic.IDSet, + keepBlobs repackBlobSet, + p *progress.Counter, + logf LogFunc, +) (obsoletePacks restic.IDSet, err error) { debug.Log("repacking %d packs while keeping %d blobs", len(packs), keepBlobs.Len()) + if logf == nil { + logf = func(_ string, _ ...interface{}) {} + } + if repo == dstRepo && dstRepo.Connections() < 2 { return nil, errors.New("repack step requires a backend connection limit of at least two") } @@ -37,7 +52,7 @@ func Repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito dstRepo.StartPackUploader(wgCtx, wg) wg.Go(func() error { var err error - obsoletePacks, err = repack(wgCtx, repo, dstRepo, packs, keepBlobs, p) + obsoletePacks, err = repack(wgCtx, repo, dstRepo, packs, keepBlobs, p, logf) return err }) @@ -47,9 +62,30 @@ func Repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito return obsoletePacks, nil } -func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Repository, packs restic.IDSet, keepBlobs repackBlobSet, p *progress.Counter) (obsoletePacks restic.IDSet, err error) { +func repack( + ctx context.Context, + repo restic.Repository, + dstRepo restic.Repository, + packs restic.IDSet, + keepBlobs repackBlobSet, + p *progress.Counter, + logf LogFunc, +) (obsoletePacks restic.IDSet, err error) { wg, wgCtx := errgroup.WithContext(ctx) + if feature.Flag.Enabled(feature.S3Restore) { + job, err := repo.StartWarmup(ctx, packs) + if err != nil { + return nil, err + } + if job.HandleCount() != 0 { + logf("warming up %d packs from cold storage, this may take a while...", job.HandleCount()) + if err := job.Wait(ctx); err != nil { + return nil, err + } + } + } + var keepMutex sync.Mutex downloadQueue := make(chan restic.PackBlobs) wg.Go(func() error { diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index 0691cdbbb04..9248e42c256 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -160,7 +160,7 @@ func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSe } func repack(t *testing.T, repo restic.Repository, be backend.Backend, packs restic.IDSet, blobs restic.BlobSet) { - repackedBlobs, err := repository.Repack(context.TODO(), repo, repo, packs, blobs, nil) + repackedBlobs, err := repository.Repack(context.TODO(), repo, repo, packs, blobs, nil, nil) if err != nil { t.Fatal(err) } @@ -279,7 +279,7 @@ func testRepackCopy(t *testing.T, version uint) { _, keepBlobs := selectBlobs(t, random, repo, 0.2) copyPacks := findPacksForBlobs(t, repo, keepBlobs) - _, err := repository.Repack(context.TODO(), repoWrapped, dstRepoWrapped, copyPacks, keepBlobs, nil) + _, err := repository.Repack(context.TODO(), repoWrapped, dstRepoWrapped, copyPacks, keepBlobs, nil, nil) if err != nil { t.Fatal(err) } @@ -318,7 +318,7 @@ func testRepackWrongBlob(t *testing.T, version uint) { _, keepBlobs := selectBlobs(t, random, repo, 0) rewritePacks := findPacksForBlobs(t, repo, keepBlobs) - _, err := repository.Repack(context.TODO(), repo, repo, rewritePacks, keepBlobs, nil) + _, err := repository.Repack(context.TODO(), repo, repo, rewritePacks, keepBlobs, nil, nil) if err == nil { t.Fatal("expected repack to fail but got no error") } @@ -366,7 +366,7 @@ func testRepackBlobFallback(t *testing.T, version uint) { rtest.OK(t, repo.Flush(context.Background())) // repack must fallback to valid copy - _, err = repository.Repack(context.TODO(), repo, repo, rewritePacks, keepBlobs, nil) + _, err = repository.Repack(context.TODO(), repo, repo, rewritePacks, keepBlobs, nil, nil) rtest.OK(t, err) keepBlobs = restic.NewBlobSet(restic.BlobHandle{Type: restic.DataBlob, ID: id}) diff --git a/internal/repository/warmup.go b/internal/repository/warmup.go new file mode 100644 index 00000000000..7d96185a78e --- /dev/null +++ b/internal/repository/warmup.go @@ -0,0 +1,39 @@ +package repository + +import ( + "context" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/restic" +) + +type WarmupJob struct { + repo *Repository + handlesWarmingUp []backend.Handle +} + +// HandleCount returns the number of handles that are currently warming up. +func (job *WarmupJob) HandleCount() int { + return len(job.handlesWarmingUp) +} + +// Wait waits for all handles to be warm. +func (job *WarmupJob) Wait(ctx context.Context) error { + return job.repo.be.WarmupWait(ctx, job.handlesWarmingUp) +} + +// StartWarmup creates a new warmup job, requesting the backend to warmup the specified packs. +func (repo *Repository) StartWarmup(ctx context.Context, packs restic.IDSet) (restic.WarmupJob, error) { + handles := make([]backend.Handle, 0, len(packs)) + for pack := range packs { + handles = append( + handles, + backend.Handle{Type: restic.PackFile, Name: pack.String()}, + ) + } + handlesWarmingUp, err := repo.be.Warmup(ctx, handles) + return &WarmupJob{ + repo: repo, + handlesWarmingUp: handlesWarmingUp, + }, err +} diff --git a/internal/repository/warmup_test.go b/internal/repository/warmup_test.go new file mode 100644 index 00000000000..a555a22ae3b --- /dev/null +++ b/internal/repository/warmup_test.go @@ -0,0 +1,73 @@ +package repository + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/mock" + "github.com/restic/restic/internal/restic" +) + +func TestWarmupRepository(t *testing.T) { + warmupCalls := [][]backend.Handle{} + warmupWaitCalls := [][]backend.Handle{} + simulateWarmingUp := false + + be := mock.NewBackend() + be.WarmupFn = func(ctx context.Context, handles []backend.Handle) ([]backend.Handle, error) { + warmupCalls = append(warmupCalls, handles) + if simulateWarmingUp { + return handles, nil + } + return []backend.Handle{}, nil + } + be.WarmupWaitFn = func(ctx context.Context, handles []backend.Handle) error { + warmupWaitCalls = append(warmupWaitCalls, handles) + return nil + } + + repo, _ := New(be, Options{}) + + id1, _ := restic.ParseID("1111111111111111111111111111111111111111111111111111111111111111") + id2, _ := restic.ParseID("2222222222222222222222222222222222222222222222222222222222222222") + id3, _ := restic.ParseID("3333333333333333333333333333333333333333333333333333333333333333") + job, err := repo.StartWarmup(context.TODO(), restic.NewIDSet(id1, id2)) + if err != nil { + t.Fatalf("error when starting warmup: %v", err) + } + if len(warmupCalls) != 1 { + t.Fatalf("expected %d calls to warmup, got %d", 1, len(warmupCalls)) + } + if len(warmupCalls[0]) != 2 { + t.Fatalf("expected warmup on %d handles, got %d", 2, len(warmupCalls[0])) + } + if job.HandleCount() != 0 { + t.Fatalf("expected all files to be warm, got %d cold", job.HandleCount()) + } + + simulateWarmingUp = true + job, err = repo.StartWarmup(context.TODO(), restic.NewIDSet(id3)) + if err != nil { + t.Fatalf("error when starting warmup: %v", err) + } + if len(warmupCalls) != 2 { + t.Fatalf("expected %d calls to warmup, got %d", 2, len(warmupCalls)) + } + if len(warmupCalls[1]) != 1 { + t.Fatalf("expected warmup on %d handles, got %d", 1, len(warmupCalls[1])) + } + if job.HandleCount() != 1 { + t.Fatalf("expected %d file to be warming up, got %d", 1, job.HandleCount()) + } + + if err := job.Wait(context.TODO()); err != nil { + t.Fatalf("error when waiting warmup: %v", err) + } + if len(warmupWaitCalls) != 1 { + t.Fatalf("expected %d calls to warmupWait, got %d", 1, len(warmupCalls)) + } + if len(warmupWaitCalls[0]) != 1 { + t.Fatalf("expected warmupWait to be called with %d handles, got %d", 1, len(warmupWaitCalls[0])) + } +} diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 07ef9cbc085..977950f59b8 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -60,6 +60,9 @@ type Repository interface { SaveUnpacked(ctx context.Context, t WriteableFileType, buf []byte) (ID, error) // RemoveUnpacked removes a file from the repository. This will eventually be restricted to deleting only snapshots. RemoveUnpacked(ctx context.Context, t WriteableFileType, id ID) error + + // StartWarmup creates a new warmup job, requesting the backend to warmup the specified packs. + StartWarmup(ctx context.Context, packs IDSet) (WarmupJob, error) } type FileType = backend.FileType @@ -157,3 +160,10 @@ type Unpacked[FT FileTypes] interface { type ListBlobser interface { ListBlobs(ctx context.Context, fn func(PackedBlob)) error } + +type WarmupJob interface { + // HandleCount returns the number of handles that are currently warming up. + HandleCount() int + // Wait waits for all handles to be warm. + Wait(ctx context.Context) error +} diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go index 31234b96098..e39115b70c5 100644 --- a/internal/restorer/filerestorer.go +++ b/internal/restorer/filerestorer.go @@ -2,6 +2,7 @@ package restorer import ( "context" + "fmt" "path/filepath" "sync" @@ -9,6 +10,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui/restore" @@ -41,12 +43,15 @@ type packInfo struct { } type blobsLoaderFn func(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error +type startWarmupFn func(context.Context, restic.IDSet) (restic.WarmupJob, error) // fileRestorer restores set of files type fileRestorer struct { idx func(restic.BlobType, restic.ID) []restic.PackedBlob blobsLoader blobsLoaderFn + startWarmup startWarmupFn + workerCount int filesWriter *filesWriter zeroChunk restic.ID @@ -58,6 +63,7 @@ type fileRestorer struct { dst string files []*fileInfo Error func(string, error) error + Info func(string) } func newFileRestorer(dst string, @@ -66,6 +72,7 @@ func newFileRestorer(dst string, connections uint, sparse bool, allowRecursiveDelete bool, + startWarmup startWarmupFn, progress *restore.Progress) *fileRestorer { // as packs are streamed the concurrency is limited by IO @@ -74,6 +81,7 @@ func newFileRestorer(dst string, return &fileRestorer{ idx: idx, blobsLoader: blobsLoader, + startWarmup: startWarmup, filesWriter: newFilesWriter(workerCount, allowRecursiveDelete), zeroChunk: repository.ZeroChunk(), sparse: sparse, @@ -82,6 +90,7 @@ func newFileRestorer(dst string, workerCount: workerCount, dst: dst, Error: restorerAbortOnAllErrors, + Info: func(_ string) {}, } } @@ -192,6 +201,19 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error { // drop no longer necessary file list r.files = nil + if feature.Flag.Enabled(feature.S3Restore) { + warmupJob, err := r.startWarmup(ctx, restic.NewIDSet(packOrder...)) + if err != nil { + return err + } + if warmupJob.HandleCount() != 0 { + r.Info(fmt.Sprintf("warming up %d packs from cold storage, this may take a while...", warmupJob.HandleCount())) + if err := warmupJob.Wait(ctx); err != nil { + return err + } + } + } + wg, ctx := errgroup.WithContext(ctx) downloadCh := make(chan *packInfo) diff --git a/internal/restorer/filerestorer_test.go b/internal/restorer/filerestorer_test.go index f594760e4af..62d93d64daf 100644 --- a/internal/restorer/filerestorer_test.go +++ b/internal/restorer/filerestorer_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -23,6 +24,11 @@ type TestFile struct { blobs []TestBlob } +type TestWarmupJob struct { + handlesCount int + waitCalled bool +} + type TestRepo struct { packsIDToData map[restic.ID][]byte @@ -31,6 +37,8 @@ type TestRepo struct { files []*fileInfo filesPathToContent map[string]string + warmupJobs []*TestWarmupJob + // loader blobsLoaderFn } @@ -44,6 +52,21 @@ func (i *TestRepo) fileContent(file *fileInfo) string { return i.filesPathToContent[file.location] } +func (i *TestRepo) StartWarmup(ctx context.Context, packs restic.IDSet) (restic.WarmupJob, error) { + job := TestWarmupJob{handlesCount: len(packs)} + i.warmupJobs = append(i.warmupJobs, &job) + return &job, nil +} + +func (job *TestWarmupJob) HandleCount() int { + return job.handlesCount +} + +func (job *TestWarmupJob) Wait(_ context.Context) error { + job.waitCalled = true + return nil +} + func newTestRepo(content []TestFile) *TestRepo { type Pack struct { name string @@ -111,6 +134,7 @@ func newTestRepo(content []TestFile) *TestRepo { blobs: blobs, files: files, filesPathToContent: filesPathToContent, + warmupJobs: []*TestWarmupJob{}, } repo.loader = func(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { blobs = append([]restic.Blob{}, blobs...) @@ -141,10 +165,12 @@ func newTestRepo(content []TestFile) *TestRepo { } func restoreAndVerify(t *testing.T, tempdir string, content []TestFile, files map[string]bool, sparse bool) { + defer feature.TestSetFlag(t, feature.Flag, feature.S3Restore, true)() + t.Helper() repo := newTestRepo(content) - r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, sparse, false, nil) + r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, sparse, false, repo.StartWarmup, nil) if files == nil { r.files = repo.files @@ -177,6 +203,15 @@ func verifyRestore(t *testing.T, r *fileRestorer, repo *TestRepo) { t.Errorf("file %v has wrong content: want %q, got %q", file.location, content, data) } } + + if len(repo.warmupJobs) == 0 { + t.Errorf("warmup did not occur") + } + for i, warmupJob := range repo.warmupJobs { + if !warmupJob.waitCalled { + t.Errorf("warmup job %d was not waited", i) + } + } } func TestFileRestorerBasic(t *testing.T) { @@ -285,7 +320,7 @@ func TestErrorRestoreFiles(t *testing.T) { return loadError } - r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, false, nil) + r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, false, repo.StartWarmup, nil) r.files = repo.files err := r.restoreFiles(context.TODO()) @@ -326,7 +361,7 @@ func TestFatalDownloadError(t *testing.T) { }) } - r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, false, nil) + r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, false, repo.StartWarmup, nil) r.files = repo.files var errors []string diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index cce175ebc1a..977ed42a673 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -28,6 +28,7 @@ type Restorer struct { Error func(location string, err error) error Warn func(message string) + Info func(message string) // SelectFilter determines whether the item is selectedForRestore or whether a childMayBeSelected. // selectedForRestore must not depend on isDir as `removeUnexpectedFiles` always passes false to isDir. SelectFilter func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) @@ -357,8 +358,9 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error) idx := NewHardlinkIndex[string]() filerestorer := newFileRestorer(dst, res.repo.LoadBlobsFromPack, res.repo.LookupBlob, - res.repo.Connections(), res.opts.Sparse, res.opts.Delete, res.opts.Progress) + res.repo.Connections(), res.opts.Sparse, res.opts.Delete, res.repo.StartWarmup, res.opts.Progress) filerestorer.Error = res.Error + filerestorer.Info = res.Info debug.Log("first pass for %q", dst) From d71ddfb89bd7bc834b4e8982207a4a2b2825357b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 2 Feb 2025 15:05:47 +0100 Subject: [PATCH 879/893] bump minimum go version to 1.22 --- .github/workflows/tests.yml | 5 ----- build.go | 2 +- changelog/unreleased/pull-4938 | 8 +++++--- doc/020_installation.rst | 2 +- go.mod | 4 ++-- 5 files changed, 9 insertions(+), 12 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index d28d2e46fab..647cb5de76c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -49,11 +49,6 @@ jobs: os: ubuntu-latest test_fuse: true - - job_name: Linux - go: 1.21.x - os: ubuntu-latest - test_fuse: true - name: ${{ matrix.job_name }} Go ${{ matrix.go }} runs-on: ${{ matrix.os }} diff --git a/build.go b/build.go index 5a4baf1c645..34bdebaf83d 100644 --- a/build.go +++ b/build.go @@ -58,7 +58,7 @@ var config = Config{ Main: "./cmd/restic", // package name for the main package DefaultBuildTags: []string{"selfupdate"}, // specify build tags which are always used Tests: []string{"./..."}, // tests to run - MinVersion: GoVersion{Major: 1, Minor: 21, Patch: 0}, // minimum Go version supported + MinVersion: GoVersion{Major: 1, Minor: 22, Patch: 0}, // minimum Go version supported } // Config configures the build. diff --git a/changelog/unreleased/pull-4938 b/changelog/unreleased/pull-4938 index 0fa876ca094..d95bca16dd0 100644 --- a/changelog/unreleased/pull-4938 +++ b/changelog/unreleased/pull-4938 @@ -1,7 +1,9 @@ -Change: Update dependencies and require Go 1.21 or newer +Change: Update dependencies and require Go 1.22 or newer We have updated all dependencies. Since some libraries require newer Go standard -library features, support for Go 1.19 and 1.20 has been dropped, which means that -restic now requires at least Go 1.21 to build. +library features, support for Go 1.19, 1.20 and 1.21 has been dropped, which means +that restic now requires at least Go 1.22 to build. + +This also disables support for TLS versions older than TLS 1.2. https://github.com/restic/restic/pull/4938 diff --git a/doc/020_installation.rst b/doc/020_installation.rst index 8566c109eb6..a5388822121 100644 --- a/doc/020_installation.rst +++ b/doc/020_installation.rst @@ -284,7 +284,7 @@ From Source *********** restic is written in the Go programming language and you need at least -Go version 1.21. Building restic may also work with older versions of Go, +Go version 1.22. Building restic may also work with older versions of Go, but that's not supported. See the `Getting started `__ guide of the Go project for instructions how to install Go. diff --git a/go.mod b/go.mod index e0b33d39de4..fa1f1707f07 100644 --- a/go.mod +++ b/go.mod @@ -1,5 +1,7 @@ module github.com/restic/restic +go 1.22 + require ( cloud.google.com/go/storage v1.43.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 @@ -81,5 +83,3 @@ require ( google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) - -go 1.21 From 58f58a995d8419b911b3baf31441b09c74934963 Mon Sep 17 00:00:00 2001 From: Srigovind Nayak <5201843+konidev20@users.noreply.github.com> Date: Sun, 15 Dec 2024 13:26:09 +0530 Subject: [PATCH 880/893] parallel: increment progress bar before report function which may absorb the error * sometimes, the report function may absorb the error and return nil, in those cases the bar.Add(1) method would execute even if the file deletion had failed --- internal/restic/parallel.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/restic/parallel.go b/internal/restic/parallel.go index 1c56f684861..eacb54baede 100644 --- a/internal/restic/parallel.go +++ b/internal/restic/parallel.go @@ -77,13 +77,16 @@ func ParallelRemove[FT FileTypes](ctx context.Context, repo RemoverUnpacked[FT], wg.Go(func() error { for id := range fileChan { err := repo.RemoveUnpacked(ctx, fileType, id) + if err == nil { + // increment counter only if no error + bar.Add(1) + } if report != nil { err = report(id, err) } if err != nil { return err } - bar.Add(1) } return nil }) From 8d970e36cf238ddbfccc558466e01f2fff29f64b Mon Sep 17 00:00:00 2001 From: Srigovind Nayak <5201843+konidev20@users.noreply.github.com> Date: Sun, 2 Feb 2025 20:18:56 +0530 Subject: [PATCH 881/893] tests: add unit test to check the progress counter for forget/prune --- internal/restic/parallel_test.go | 106 +++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 internal/restic/parallel_test.go diff --git a/internal/restic/parallel_test.go b/internal/restic/parallel_test.go new file mode 100644 index 00000000000..e203ff7d773 --- /dev/null +++ b/internal/restic/parallel_test.go @@ -0,0 +1,106 @@ +package restic + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/ui/progress" +) + +type mockRemoverUnpacked struct { + removeUnpacked func(ctx context.Context, t FileType, id ID) error +} + +func (m *mockRemoverUnpacked) Connections() uint { + return 2 +} + +func (m *mockRemoverUnpacked) RemoveUnpacked(ctx context.Context, t FileType, id ID) error { + return m.removeUnpacked(ctx, t, id) +} + +func NewTestID(i byte) ID { + return Hash([]byte{i}) +} + +func TestParallelRemove(t *testing.T) { + ctx := context.Background() + + fileType := SnapshotFile // this can be any FileType + + tests := []struct { + name string + removeUnpacked func(ctx context.Context, t FileType, id ID) error + fileList IDSet + wantRemoved IDSet + wantReportIDSet IDSet + wantBarCount int + }{ + { + name: "remove files", + removeUnpacked: func(ctx context.Context, t FileType, id ID) error { + return nil + }, + fileList: NewIDSet(NewTestID(1), NewTestID(2), NewTestID(3)), + wantRemoved: NewIDSet(NewTestID(1), NewTestID(2), NewTestID(3)), + wantReportIDSet: NewIDSet(NewTestID(1), NewTestID(2), NewTestID(3)), + wantBarCount: 3, + }, + { + name: "remove files with error", + removeUnpacked: func(ctx context.Context, t FileType, id ID) error { + return errors.New("error") + }, + fileList: NewIDSet(NewTestID(1), NewTestID(2), NewTestID(3)), + wantRemoved: NewIDSet(), + wantReportIDSet: NewIDSet(), + wantBarCount: 0, + }, + { + name: "fail 2 files", + removeUnpacked: func(ctx context.Context, t FileType, id ID) error { + if id == NewTestID(2) { + return errors.New("error") + } + if id == NewTestID(3) { + return errors.New("error") + } + return nil + }, + fileList: NewIDSet(NewTestID(1), NewTestID(2), NewTestID(3), NewTestID(4)), + wantRemoved: NewIDSet(NewTestID(1), NewTestID(4)), + wantReportIDSet: NewIDSet(NewTestID(1), NewTestID(4)), + wantBarCount: 2, + }, + } + + mu := sync.Mutex{} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + repo := &mockRemoverUnpacked{removeUnpacked: test.removeUnpacked} + reportIDSet := NewIDSet() + bar := progress.NewCounter(time.Millisecond, 0, func(value uint64, total uint64, runtime time.Duration, final bool) {}) + report := func(id ID, err error) error { + if err == nil { + mu.Lock() + reportIDSet.Insert(id) + mu.Unlock() + return nil + } + return nil + } + _ = ParallelRemove(ctx, repo, test.fileList, fileType, report, bar) + barCount, _ := bar.Get() + if barCount != uint64(test.wantBarCount) { + t.Errorf("ParallelRemove() barCount = %d, want %d", barCount, test.wantBarCount) + } + if !reportIDSet.Equals(test.wantReportIDSet) { + t.Errorf("ParallelRemove() reportIDSet = %v, want %v", reportIDSet, test.wantReportIDSet) + } + }) + } +} From 90e2c419e4112c3a5577418eccab32a9417957bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Feb 2025 20:16:08 +0000 Subject: [PATCH 882/893] build(deps): bump google.golang.org/api from 0.204.0 to 0.219.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.204.0 to 0.219.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.204.0...v0.219.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 30 +++++++++++++------------- go.sum | 66 ++++++++++++++++++++++++++++++---------------------------- 2 files changed, 49 insertions(+), 47 deletions(-) diff --git a/go.mod b/go.mod index fa1f1707f07..af2fdddb4ba 100644 --- a/go.mod +++ b/go.mod @@ -30,20 +30,20 @@ require ( go.uber.org/automaxprocs v1.6.0 golang.org/x/crypto v0.32.0 golang.org/x/net v0.34.0 - golang.org/x/oauth2 v0.23.0 + golang.org/x/oauth2 v0.25.0 golang.org/x/sync v0.10.0 golang.org/x/sys v0.29.0 golang.org/x/term v0.28.0 golang.org/x/text v0.21.0 - golang.org/x/time v0.7.0 - google.golang.org/api v0.204.0 + golang.org/x/time v0.9.0 + google.golang.org/api v0.219.0 ) require ( cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/auth v0.10.0 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect - cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/auth v0.14.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect cloud.google.com/go/iam v1.2.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect @@ -58,10 +58,10 @@ require ( github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect - github.com/google/s2a-go v0.1.8 // indirect + github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect - github.com/googleapis/gax-go/v2 v2.13.0 // indirect + github.com/googleapis/gax-go/v2 v2.14.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/kr/fs v0.1.0 // indirect @@ -73,13 +73,13 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect - go.opentelemetry.io/otel v1.29.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect - go.opentelemetry.io/otel/trace v1.29.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47 // indirect + google.golang.org/grpc v1.70.0 // indirect + google.golang.org/protobuf v1.36.4 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 25d5f5874b4..80b1a44ca43 100644 --- a/go.sum +++ b/go.sum @@ -1,12 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= -cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= -cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= -cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= -cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= -cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/auth v0.14.0 h1:A5C4dKV/Spdvxcl0ggWwWEzzP7AZMJSEIgrkngwhGYM= +cloud.google.com/go/auth v0.14.0/go.mod h1:CYsoRL1PdiDuqeQpZE0bP2pnPrGqFcOkI0nldEQis+A= +cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= +cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= @@ -120,15 +120,15 @@ github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1 github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= -github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= -github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= -github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= @@ -221,14 +221,16 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -265,8 +267,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -313,8 +315,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -329,8 +331,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= -google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= +google.golang.org/api v0.219.0 h1:nnKIvxKs/06jWawp2liznTBnMRQBEPpGo7I+oEypTX0= +google.golang.org/api v0.219.0/go.mod h1:K6OmjGm+NtLrIkHxv1U3a0qIf/0JOvAHd5O/6AoyKYE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -338,17 +340,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47 h1:91mG8dNTpkC0uChJUQ9zCiRqx3GEEFOWaRZ0mI6Oj2I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -358,8 +360,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= +google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 060a44202fb0da073414e6c6672a5abfc28ac9ed Mon Sep 17 00:00:00 2001 From: Winfried Plappert Date: Mon, 3 Feb 2025 21:07:04 +0000 Subject: [PATCH 883/893] ls: sort output by size, atime, ctime, mtime, time(=mtime), extension (#5182) Enhancement: create ability to sort output of restic ls -l by name, size, atime, ctime, mtime, time(=mtime), X(=extension), extension --------- Co-authored-by: Michael Eischer --- changelog/unreleased/issue-4179 | 11 ++ cmd/restic/cmd_ls.go | 138 ++++++++++++++++++++++++- cmd/restic/cmd_ls_integration_test.go | 61 ++++++++++- cmd/restic/testdata/backup-data.tar.gz | Bin 11704 -> 12126 bytes doc/045_working_with_repos.rst | 47 ++++++++- 5 files changed, 250 insertions(+), 7 deletions(-) create mode 100644 changelog/unreleased/issue-4179 diff --git a/changelog/unreleased/issue-4179 b/changelog/unreleased/issue-4179 new file mode 100644 index 00000000000..37955b06070 --- /dev/null +++ b/changelog/unreleased/issue-4179 @@ -0,0 +1,11 @@ +Enhancement: add sort options for `ls` command + +in the past, the output of ls -l was sorted by name. Now it can be sorted by +one of the specifiers (name|size|time=mtime|atime|ctime|extension). +Use --sort to achieve this. + +Reverse sorting also has been implemtented. Use --reverse to indicate reverse +sorting. + +https://github.com/restic/restic/issues/4179 +https://github.com/restic/restic/pull/5182 diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 06ae6cc2054..6e0d230b18a 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -1,11 +1,14 @@ package main import ( + "cmp" "context" "encoding/json" "fmt" "io" "os" + "path/filepath" + "slices" "strings" "time" @@ -36,6 +39,10 @@ will allow traversing into matching directories' subfolders. Any directory paths specified must be absolute (starting with a path separator); paths use the forward slash '/' as separator. +File listings can be sorted by specifying --sort followed by one of the +sort specifiers '(name|size|time=mtime|atime|ctime|extension)'. +The sorting can be reversed by specifying --reverse. + EXIT STATUS =========== @@ -59,6 +66,8 @@ type LsOptions struct { Recursive bool HumanReadable bool Ncdu bool + Sort string + Reverse bool } var lsOptions LsOptions @@ -72,6 +81,8 @@ func init() { flags.BoolVar(&lsOptions.Recursive, "recursive", false, "include files in subfolders of the listed directories") flags.BoolVar(&lsOptions.HumanReadable, "human-readable", false, "print sizes in human readable format") flags.BoolVar(&lsOptions.Ncdu, "ncdu", false, "output NCDU export format (pipe into 'ncdu -f -')") + flags.StringVarP(&lsOptions.Sort, "sort", "s", "name", "sort output by (name|size|time=mtime|atime|ctime|extension)") + flags.BoolVar(&lsOptions.Reverse, "reverse", false, "reverse sorted output") } type lsPrinter interface { @@ -277,6 +288,12 @@ func (p *textLsPrinter) Close() error { return nil } +// for ls -l output sorting +type toSortOutput struct { + nodepath string + node *restic.Node +} + func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []string) error { if len(args) == 0 { return errors.Fatal("no snapshot ID specified, specify snapshot ID or use special ID 'latest'") @@ -284,6 +301,18 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri if opts.Ncdu && gopts.JSON { return errors.Fatal("only either '--json' or '--ncdu' can be specified") } + if opts.Sort != "name" && opts.Ncdu { + return errors.Fatal("--sort and --ncdu are mutually exclusive") + } + if opts.Reverse && opts.Ncdu { + return errors.Fatal("--reverse and --ncdu are mutually exclusive") + } + + sortMode := SortModeName + err := sortMode.Set(opts.Sort) + if err != nil { + return err + } // extract any specific directories to walk var dirs []string @@ -347,6 +376,8 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri } var printer lsPrinter + collector := []toSortOutput{} + outputSort := sortMode != SortModeName || opts.Reverse if gopts.JSON { printer = &jsonLsPrinter{ @@ -356,6 +387,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri printer = &ncduLsPrinter{ out: globalOptions.stdout, } + outputSort = false } else { printer = &textLsPrinter{ dirs: dirs, @@ -393,8 +425,12 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri printedDir := false if withinDir(nodepath) { // if we're within a target path, print the node - if err := printer.Node(nodepath, node, false); err != nil { - return err + if outputSort { + collector = append(collector, toSortOutput{nodepath, node}) + } else { + if err := printer.Node(nodepath, node, false); err != nil { + return err + } } printedDir = true @@ -409,7 +445,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri // there yet), signal the walker to descend into any subdirs if approachingMatchingTree(nodepath) { // print node leading up to the target paths - if !printedDir { + if !printedDir && !outputSort { return printer.Node(nodepath, node, true) } return nil @@ -444,5 +480,101 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return err } + if outputSort { + printSortedOutput(printer, opts, sortMode, collector) + } + return printer.Close() } + +func printSortedOutput(printer lsPrinter, opts LsOptions, sortMode SortMode, collector []toSortOutput) { + switch sortMode { + case SortModeName: + case SortModeSize: + slices.SortStableFunc(collector, func(a, b toSortOutput) int { + return cmp.Or( + cmp.Compare(a.node.Size, b.node.Size), + cmp.Compare(a.nodepath, b.nodepath), + ) + }) + case SortModeMtime: + slices.SortStableFunc(collector, func(a, b toSortOutput) int { + return cmp.Or( + a.node.ModTime.Compare(b.node.ModTime), + cmp.Compare(a.nodepath, b.nodepath), + ) + }) + case SortModeAtime: + slices.SortStableFunc(collector, func(a, b toSortOutput) int { + return cmp.Or( + a.node.AccessTime.Compare(b.node.AccessTime), + cmp.Compare(a.nodepath, b.nodepath), + ) + }) + case SortModeCtime: + slices.SortStableFunc(collector, func(a, b toSortOutput) int { + return cmp.Or( + a.node.ChangeTime.Compare(b.node.ChangeTime), + cmp.Compare(a.nodepath, b.nodepath), + ) + }) + case SortModeExt: + // map name to extension + mapExt := make(map[string]string, len(collector)) + for _, item := range collector { + ext := filepath.Ext(item.nodepath) + mapExt[item.nodepath] = ext + } + + slices.SortStableFunc(collector, func(a, b toSortOutput) int { + return cmp.Or( + cmp.Compare(mapExt[a.nodepath], mapExt[b.nodepath]), + cmp.Compare(a.nodepath, b.nodepath), + ) + }) + } + + if opts.Reverse { + slices.Reverse(collector) + } + for _, elem := range collector { + _ = printer.Node(elem.nodepath, elem.node, false) + } +} + +// SortMode defines the allowed sorting modes +type SortMode string + +// Allowed sort modes +const ( + SortModeName SortMode = "name" + SortModeSize SortMode = "size" + SortModeAtime SortMode = "atime" + SortModeCtime SortMode = "ctime" + SortModeMtime SortMode = "mtime" + SortModeExt SortMode = "extension" + SortModeInvalid SortMode = "--invalid--" +) + +// Set implements the method needed for pflag command flag parsing. +func (c *SortMode) Set(s string) error { + switch s { + case "name": + *c = SortModeName + case "size": + *c = SortModeSize + case "atime": + *c = SortModeAtime + case "ctime": + *c = SortModeCtime + case "mtime", "time": + *c = SortModeMtime + case "extension": + *c = SortModeExt + default: + *c = SortModeInvalid + return fmt.Errorf("invalid sort mode %q, must be one of (name|size|atime|ctime|mtime=time|extension)", s) + } + + return nil +} diff --git a/cmd/restic/cmd_ls_integration_test.go b/cmd/restic/cmd_ls_integration_test.go index f5655bdff9b..29e153419d0 100644 --- a/cmd/restic/cmd_ls_integration_test.go +++ b/cmd/restic/cmd_ls_integration_test.go @@ -19,7 +19,7 @@ func testRunLsWithOpts(t testing.TB, gopts GlobalOptions, opts LsOptions, args [ } func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string { - out := testRunLsWithOpts(t, gopts, LsOptions{}, []string{snapshotID}) + out := testRunLsWithOpts(t, gopts, LsOptions{Sort: "name"}, []string{snapshotID}) return strings.Split(string(out), "\n") } @@ -45,7 +45,64 @@ func TestRunLsNcdu(t *testing.T) { {"latest", "/0"}, {"latest", "/0", "/0/9"}, } { - ncdu := testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true}, paths) + ncdu := testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true, Sort: "name"}, paths) assertIsValidJSON(t, ncdu) } } + +func TestRunLsSort(t *testing.T) { + compareName := []string{ + "/for_cmd_ls", + "/for_cmd_ls/file1.txt", + "/for_cmd_ls/file2.txt", + "/for_cmd_ls/python.py", + "", // last empty line + } + + compareSize := []string{ + "/for_cmd_ls", + "/for_cmd_ls/file2.txt", + "/for_cmd_ls/file1.txt", + "/for_cmd_ls/python.py", + "", + } + + compareExt := []string{ + "/for_cmd_ls", + "/for_cmd_ls/python.py", + "/for_cmd_ls/file1.txt", + "/for_cmd_ls/file2.txt", + "", + } + + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testSetupBackupData(t, env) + opts := BackupOptions{} + testRunBackup(t, env.testdata+"/0", []string{"for_cmd_ls"}, opts, env.gopts) + + // sort by size + out := testRunLsWithOpts(t, env.gopts, LsOptions{Sort: "size"}, []string{"latest"}) + fileList := strings.Split(string(out), "\n") + rtest.Assert(t, len(fileList) == 5, "invalid ls --sort size, expected 5 array elements, got %v", len(fileList)) + for i, item := range compareSize { + rtest.Assert(t, item == fileList[i], "invalid ls --sort size, expected element '%s', got '%s'", item, fileList[i]) + } + + // sort by file extension + out = testRunLsWithOpts(t, env.gopts, LsOptions{Sort: "extension"}, []string{"latest"}) + fileList = strings.Split(string(out), "\n") + rtest.Assert(t, len(fileList) == 5, "invalid ls --sort extension, expected 5 array elements, got %v", len(fileList)) + for i, item := range compareExt { + rtest.Assert(t, item == fileList[i], "invalid ls --sort extension, expected element '%s', got '%s'", item, fileList[i]) + } + + // explicit name sort + out = testRunLsWithOpts(t, env.gopts, LsOptions{Sort: "name"}, []string{"latest"}) + fileList = strings.Split(string(out), "\n") + rtest.Assert(t, len(fileList) == 5, "invalid ls --sort name, expected 5 array elements, got %v", len(fileList)) + for i, item := range compareName { + rtest.Assert(t, item == fileList[i], "invalid ls --sort name, expected element '%s', got '%s'", item, fileList[i]) + } +} diff --git a/cmd/restic/testdata/backup-data.tar.gz b/cmd/restic/testdata/backup-data.tar.gz index 6ba5881ae4f2a1824bd9a7978ba772a6f86d5384..5d7cd215cab0eb3da390acd74f11dc7000a682f8 100644 GIT binary patch literal 12126 zcmcgycUV-{wiiSg1s#Y55OClUrHQC0B?5yI;R*_(2nwMXR8Ub+q(~cy8WIacMNpBZ zK|rOb^sYuR7C@x8K@sUq+8{G?-Z`@t`QE+n-S2+yz2x!VS?s;`+H3t*-NQIfnc1(PXsVx?FMQU@IvDGGPU=lNFYfPoJZ56bFwAxNmTd;aG(Rp^-_S}_qW@jT z!2$EYe1neqO@aC2&aAXMR*5y^Q8_s~9@z8-1SV}iH1KZ6;FMOHReY<8mF$|-q$Jt+ zySfL;%IcP}<`h+SIp58S(s(u@%dk^haxRXgy|!a2B2MohC%nUK0Yf+6&1v2cF_jxF ze&noC+S$4tkEcA%`5jY31znMA7OF@z_*4#Z<+Yc2b$X_kj*e>C%p(klr1bhNiD`Nr zQ@Orf($*U9D_Qyb?L`NTPF9yGJh$YOKh>HTDE#hKr1h2LEH01jU>c#Idw1xC^=*ge z5i%S0cJ4B2%}v^0zr|H$>zWk7}Kyewa>MDFt1-dcsg96b!`ymj8uo&d`8>D zI(tox`3$3uTnR==^#%6mB)iqmmS9A&Ua@WxvXuo#$|4Q#oapNE&lqL<48`=Y4yOh` zpVEpvUqRk=|Lca|H64l9LBk8&ZQ9;*rEdiD#T|#HC#Rk3r|OAqzn8`lrG}iS+QJ(F z(bk%-$ypcV)*MW>zGDB}(*3%DZ-#ZSij~Lpw*#72R4i|-^?hQgsl4I>w|T>XlD!{) zODDbwv8g9MUSP>?2pCJ*a(l&AS>s5jo3dS{$)&T0OZZ$n4-M&aX`LU~ZBA<0ySP&s zvQyKe&bQ5+g@*d3au+u{G#Bt2oYgA)`JEHKm8G(&Mnuj=!%6+NV!~o*jN5FnpU1vn z;QMmeGcqDJ@R?>tsn@-QU(!FDcBLr)IGA1c!#&2sZcB&$7GlJ3DCkIEafMvz`y@Kk z=fgxs?86sdMQa&MzBsKi5cs;WBPZ}M@1BZ;!^e`*Wide$J8r)Wc6N78JTmQ>&Z(2r z=wCPfm>1XLLGUV9E@Qg)MKg+hLKoK+PE@$ADmr*+`QT8YDf{GL)Av^wTLpHWGAs41 zY1DNbsq2imS~BvKJrK+bFK-L(=m{2;&ushKB>&{oHM&~dD^W_hT3KZk(`|n5_h^*p zPjD2Lw75Q_$9Yv>HuHG&w04?5!pVrss;%D13UYk4P}gXxtR{wDl0M$lddDh1Ud9rEgRVZ*IL`D2Zr zJ1qBnpF7~R+x>M#w1@4|g7Wja9>W9i?9~Uyb}g*C{<-C|v;O{}9r4~)*ZRcmV#nTG ztzdWbYt$7TAC|K)tnn*0iCC8Xu|VM+gLo`l@0m5ex6dH^Wkqb5yw8XF^0=aR>+5|M zFpqV5n+37*wR2m&yNW{|^rVWj-rF(0j06R84-T3zgV_4fvHEuGg>512MHWW9J|&C6 zl4o}d4b~p3)}lLp`nffoJ#t4uEZdOP|HFp#{NKvXTJ9O(bW}1^IvyYKWA?O1I|WSS z-<~uq<$3dLd%T;D*KZ53YlQ z;U^j4fqB6nGHY}FM?Xus8cmN|R8E<*lb`oDxaCjNZ{$2#;@6f_QKXv|(5ioL75_Ee zsv-DUM_uv3rl87I!q*P0 z%rs|KHn&yN>ExG&^LLI;f4uGRozbKwJ9xxG<*4rfo!$Q^s5HvWsiE(zLq>kl_)o{Q z(sO!yG*}-_$%V)8^TS6xdAv^MzA;C;{)ha~4UUEHoyrYDjLXLs)&+Af#xPnrm(400 zZxg?D#QX2hn64}B`RU}UrMyDz>3%<=DTd7s_-LQO^{xI8vy9VUUh2b_%9ZDTK2|`abjC@(s4koL;(^vO!>J3o@kM)o?wHi?NzYCWK3+B7 zp{<&A)_YoEoKv4%;rBVBME7{1?viVhouL-do)d1{u(Ln)crLNsGV zxzGpj*&r({dCFKvxX^f(u;dF<7kLrIS)m>x&rRS{7(Q2Qf=@ddxvY`aFA6-i0FP@D z@TrC@gMGhEC?K|bcm`P{*!DBW+J@c2N4m5g-Da**TBRh}Z6xCK<1A99i)9+H!+$Xd zk+D&5OuG*pQzQg@b(Llb(e^11F9Bir^MIU7k-t^0Y7~Rr-mu%{DC~ZX7o{63iiGy^ zO_FLjQO0QvtnOm#`FG#5Qh!WLPWkbdL!?p;DGo1$J={gWDjDwEz7EJY7r=?yiXezK zlIH^X5sj=ld%Z(s0cuDIG^B@ug`tM}fXoqvBNv1LZ`72R2#`zWfPkK0&GrqrYZd%E{0!`} zmOND!sJOkA<=(e{n*Pjpn*N4n!mh1S8eJ2axN1iu?KK_v&=CgY7X^c{vx5D`O!!<$ z!&PVpB_ty{Hlyz<)}(8Wq*}&wP1oVZ-Q|{YV6jeVb4I3Lh+z8={N0@z&a1*@k0!(0@f|_K{ZS` zv3(hm#eqDH0Aj+4%MdB2@fONR{sGJtuUHi$nKH;+WG>K<<1{^u5-2uNJ0Q^Qg!~=t zfSqpSId9C`|7?^{@q@DrqzOlWt^pzSmIxC+8v7854X0hj$NQO!G)S2)HFhW6%^2Hd z?>t}M#+iggBdwt{be6~ysZ18>ooAF?2vK24npqitzKEh^_$i(o8JgsUrgh1Xq$fP# zNroaXk$yILnfL#bjYD~?ous|)zoi_$<`J=7y_xhdemyU8!@8eWy#4y^w(4UlaEn3I zX)CIFHj+spDPJu{tI`25ljujJ)u_7|AmEv3Y$YIb=fY|_u9l+6kCsp|D78T#QZ`X_ zfJl_xxLn;@e?v?3H@PRANjV-$HOzyZ4zSaOR0nlVkQ9)c(fwpV$Py7ij~1@p0IMZ1 z7EFNbG8jY^(9aM|rWN11eBzq~b~p%U@2KaSJmZ^aUmgiy+V`j#CPh02l#?e@xZaH- zFhB$*LX!?Y;)UrV1V!nmn$Eqlsjc;|-AW=71czD2EOl$VEluq8$Gm;41WKHj|8b zrv*Xrn+Zl!p-~IpWZ_!5Vw>@+CDnF5W4p@Nds|XoFPS3tfA;Br8w{>JrPLFt(fUYb z*fRAnk@!97VTr5Hsu|o{%x7eLZxZdd_%!8YyAhNc=uOMvzV|Ur?LwoeAsN$DExOM) zphHmvlOlZD1BkYR!j0u9s=LTt(oP$x82z$vh?%v^d?=IHuGT<0{c) zejx1@h8Uy)w`Gu5%Ur=g{#Za*A>6lsWCobrlX0_zNWg;7xG9I)M^<-PMCtCq{7UQk zO4iCR)5IHE7(3&s376{{y(apBm&hOjBNh(G69)wNp{eOe#=2?}fnfyawje;oP*JD? zJ_f@uAh&mwlH$Inai+8+yA^1!xl<8WpXZ7kN=oO2Q3b|NJCp3Yem$fRkaTEQ8Wm{N zfjbwy!VYCw3gquFQ}zLOum2Omb7oPdIlRtj`F3reex{B3a!R1n1Y4~1wqbkwVb(wf zk3A=JBdB3Fru29WVmE-ykpN@Wp*J<*YE)VuqRSBEzmH0radvNHVgO8ZNP`5d5w7Kh z?Vv9(u6aPL?r0_Rr=;dvar;}NL7Iun1CCi3C(NaBzXDT9BNjHHdNBT;2P}Ba1-5sv zTHJxFu)q=cB&qV_K;|w5p6_ro);;ND(bUGIWXf^MA-3P8fP2$m{9Z_5T1Z&XW*&J# zGBsi49*F*61uXj!o5jE~lMeDsAh1K=_6?X+oJqf=vaJx1^&~+aF_hi~c*<8x2s~<; zyh94<-Y%qjZ`rWR#Lp(}?yhW8yS}PRuw4evev5}-vT+6EtX=}lHvqm|q=ESb6u}=p zx1xi5i`T&*(26`rme<}$Q6*Tt`%J>hB@>pOTJJ-BpT_(%1vb$Xrj|G{GIwdU?;HD z_mll|i*`~SP9upO`8Q61>k5eQvy)7t7E>dg$`y7-yzYYh>F>wW1*ySUTY1 z!^&_6dGsg(6)+cft$|&ZSn_WHvYxE4w>*XA6`e48Ia6e=CzaC8kNnYi{Xj}3vRSov`Jy*_Coff07Dfmz>_M)j($-@T zq#Nau1OZN90H}LX!>q?0faQtv$I4{gR-<`DD z-bJ?yo~1laE=Mx)`$8ip#>q&Fq(5vT}K87b1l4yq2T z))4A8kePK<=%~=!+*1|UjC!F47NEuJK?B3{;coI+C3+%kXh06aWXwhd`JutEY?a3j z_coFR`6XNOT0U8+&dd`iZz~XMRQu)Z%6*SK?F;cTIBemlg9RqgY;kA-JU0+lyD&hU zeuNVb&dbCWwFE=iY#@7K5wL)QQ`Jh!`v)#s&8^w0K$bw}F%-CMW!NLu&$Z)5-bxpT zM}V0VkXsZwYdLCYE~)|pqy>WFAds^#5Fy}KqJ*6&Ap+hOuXm2RUazTOAO#Z_GLdOz zQ!|&|WWN_yZ_hi8nX*fNyooXX9o`5Fy$WPgiR2ZaybAQ2Mo`{0-0Tgjh7rD8VZcon zn%Dp?aFYsME=#D1i|Qgl{TwjCI7g0N-u!fB?R4$_^19v~j(1;qh~!}Wkw)La0{%zP zN4o_a&RGjG+kBV(QUfI@p$bn zR1&6*ziL2~U?$Mh()h1Zu9Iz#dgo;l-M>Ai8*++^d1B7L^0Jv^Aj!iKQ#4|ADVS&# zCe8h5xy4Av3Lpcnuv!6NV+Wht&B37GVL?F6L#XQF5Rzwy+EYPEbUjtilB1UsJhJtf zdwpSrU%7cj?q$2rZ*+NZh_iN*VfM73isZjWZ)%O(o5H#k=}*~^i6lZ%Zf&M(>*!+na- zm+H}%&=K3v5o6$Ve+*YF)MztBz9l8o^e=|NSI$zysdXs=p~&*Fl?>a$KL?Z?<6K$! zb<3ibMT*1mwEv&+FDWQwnrl9k-P8wWT+VSQhp0NEccXa1y|>G$s?vm&9Ir%`Kt~ZD zLx2x2F@Vh=hD}FMNgfV5Sg0vOBufLDCNXM+fSL`^#$v#8*bK=v;uOBK7Ryn?PBJc; zlYPinqU&l;DKUD^issSs$jQggv>?pa3;>QX3wE!>>=c11XCa8d!C(`GQL`Gz?g+-U z@VOgH^H&&>P6E&Sq@e50QPwLm71Xwse3N{t`tKQdIluMh@ksC5pG$frS~1o-V0_bn z-5xl3J%VW3jATq2dr`ozK%*sCB@Mvp)!2DAV@|}ex-X^RVNbyjKgc}cKyL#p#m0Vs zxlTD@wxh_2gSoMfGYSSsFKwh1VV<1H{W!00L(4hf@mL$SQ8{1oqOln!-`D8z`4|_l zjWR;hokTK&5?)_0U`(tw3=|%Uq~u!a{-^Fz=`3-KJZoh24bYA7uBz#$k;cDi{}Wy!NmGZ=b6 zg@8ZCRTsx=e{ppiEfEwjivRoB^S_-_(O}xWT19cSH#sdAn2U0=Ib^#1yO#P@XZ6KN zw`@M0+Hr7Js5o9+3G}`S-RBwHFb}7XGZl*(uEJQJftzQHhK|sKo6%kDQ1ycG%g}7W zL)nO0=nX1x?|48|MHVqEH+(8rvplvmIJuXDb>0#PF~-hdurWOH2UNWzNU{XweFEg& zC@%vIbqmQ75TDMYlg$)=g4G#DH|nv0xBaMdYbN;JOYJ?06KZuXaX zIoUIZgP1ZH+N>q@27b6cO+PzRm3&W)&ew)1^oB<(9D0-9csr)J<4;Lt)&C-u{qe3T zC<9!a$Xe3Uj2h;r3X+=}k!X`i4kN}U>XR=AOkTFP-lCSOU;L}$@F49zChM!_oo7D2uqjX`~D~tQLeQ7ZW($14X}xGNOCyV2C;Z(llVicEHURxS71* z31vZ9@`sWHRY_Ua{U}~Ul)@JwO)?|wQ(mnF8u^5X<^tQl1o!v-1kZ}&?G2mAi9S5L z?)m+jz?V(=n}(ul$I1S_`=E#|rV4YAWH-Lo-@^dD6UjJ-yN9_GU(hA66fg!gWMRpM zVHZp8MWE5`=QazPq?wEsNxk4vuZky36=K(L(it$-dK-IR^u-ZVUPte*P6tuQv0vY<^HBI04(@y3+OP=H; zt2X1yQzp1%eQOz3@~l_$;@xQIkm_;^+d%zo5*=&p1p>Hgi%ak_>{XF2MNKT z@+w>$*N@4d7(~j{g{U!shnCU{a#K4zZm@%}Hv) zzHDb`SMRZ92&Q0|+_DgznC$B?)FdMua8P1|D9J`m;csN>P*VjU{chA$A8P6mkU1?L zr>QKm-9zC2jV#q1>Lq_TtaR{GM#sHJ^I3z&gT|8GQX&(0`ky|`B0-8{PCZOhVVhtC z5cz7RZ%YzPZ*y2ehu3J42s04oIM=Ph2x$S>YVW{HW7IN2 zj3uGw)&a)s@e`k)qLAnD*7KC6YB-U`Z^-wf?P|G{rpgxhW=;w>h%pC!$>_FEdH64i zh*Xox#|MgN0Ko9eX~FxsyWm$~WQ*jkwU}y=xSw=O1Ebjr(gnYf@uZRV9Y&_}u^3gFeA_E#C6O{xXxZpJP7(+C z@_*^6WG&QA{*9vpc4Ur-W?Al})KO?nCdYZF!dqhAgIr w{Q1NPw~+8IDjg*+(l@o%22CdzQ~x?vzf63G|=5ZYdzt z@y6#|S>rIXr>ru+((H!~bFa+~J$||BvcgZPF~8cUo|4*E@PmTJK%IZ`f{H(9OU+rS zwL-+dK-x4h)o#sW>FiH&vOS|((q1PNya$iIDR{3K?CJff_ej#{@V*i!`H{m;gWnB# zJiGpKr_zkZt&j4U?9#HnuCTMQRxy@*Srnga;C12^7Ki5-RVDm@5OtGd)aT3tP9TEcetWBXHb?YR@$r@#Kg#S-S-$R z54!(;n%k!N6}@McYx45;HceC7t=&~?e4x&KBuwez==H6y?A_FL-Yn9*-2ZKql1q2V z@zu4yB^5t2`gtXJO@{WV=j(HViW;A!yVmcEk!_CfAKm9_xj$q^Q;pYIABTGH%9&2u zGppa{`0rfR88#?UKazeqTxr#zUu|tue70pQDY@cWUQ?_$61TJK(D2g2THS_MQ*Byu z-|dMLzZ`TtJT>{s@-~BnIX^U-M=H1v7rylUp#9yMO zZHtq&PQlHf;m%c>9iQIyg%tHam(sS@wH~~a=i7NxK5*i7aBr8E@6R2!bw`GxcvdZU zt-Gpvf|rg;*ROU-UwS?wxuji2ZIxqLxoC!d-V4`eg`SB~H#2?v`h>zIgBJH|*3GCl z7`YZR$0SLcnK77T;Bh&^-+8NUV$7%8@~?gU9?wkrxcpG(o1`e0T9xdj8g3<48IdW4 z`g&)Mrj5QF9q{vhXQ`*8-F@T9n(QlH`x$%b%!K9(jaIJrhhmRKS#}T3sMYy!J3;;9 zXv^)8-RaNo37wzk)2Dk=?2D=~>fqUft~%ADcs&xAhY8UwfR$m9f5E zKkA=6yq~#~6gbqmwCO_s2(O~PapcsfZPv&y*PirU`BWR>*Oz@TY1QiLx7T}$jl<=} zs){1rvR_JS+k9&4oRj|McFn^QMg0RW9)ua*A3M!cJ2WU|`h2u0!lA2TG*L=%p5`m7 zJAuh}2h~s1W)`Ur);NZKDhTix-LS-Zc~6$hK;);`T_#Z;>)WKRKYGo4xb|||F~flO zX>BHMeIr8;w6`z~)4E&&I;(vy5AVuY=hx@g`zELEXiIUV{F$n^IbWX7^i5S#@a@{Q zZ>MY3r`~6$Ry4d*ajnW_*3{JAt+t8F^|LFiytC7{|J9-QuQT_IN%{BH9w~ED>g?ad zh@Odfn{`X+URq6H$+C>%J2O?5jlLa!)NI`roe}8UUS6Q@TPEsoIlWER>QZfWsYBUg z|3J6)UN`-T9{KeyUj3@?uLKOI4Q`e_^p{L>yfJvIWetuc9(sDAV3FFPs*pRj6Mv2@ zxs=-Wo4>RFyERVYN1TtpcgXzijiONa(V%O=#&rfvyHmz|?>!k0vkGles(MudIrThh7^=Vjc{I9I!t8I9t)M zzgo2EQdd>ycveaGpAC=wZTsE^zvt=AVx9&6fp@r-`&uQ_b;rKxmfXV$zFP10$8 z)s>juk+~}HwNt~jJ0-f#^$tlniGvO`q1AiS4g5+x29ES9-FYDG(>}@*GqP({cAqA! zED-E0JRvYe%t%>2*mME=m;DRfW$AcXh=8#2FQNGd#f$_5tHgFsAu7YQ0)n%inlx;g z^}T@bZOP2pg0sFi(h}UchK?PHdQ6s%U$eX%wWV0ujNpw-OleYlsOkvs@z%2w#s*IU zQ*B=km==BeKN4R?vq@ik?>e%)4dd}$^rmeCu2$i7n9 ze4(H^-e*t3Ps2D%NQ$o%Nl6Bk7g25Em{}9bgKCaoM~KmAz)r8`h-ZV0Oku~!s++^Z zVfOWmZCI-K(np)q#6^=FWV0lVS$Fi4rYTeCU0OfVT_$wbJVAPWC{cAUA&Jg7HUl$! zsRebViMbL^vm$lv77H)Sie;_goS5@P>A<(;LEXJdZC@+2=ucD~NjtZ+vHg0$OwuCh zIBDTU7iB8ayS2&9cH%_k3SG^JYG?+IPY|;$DW@S#%o&K09rT1+5%y&b^2+8P4Ojf` zs6H*tV0*vPvk_kZh4^0#4G{k2OM+j@%%p~j6FNP3{H=`$o}OR8^E#qmIWKDO)<@%{WN!) z;&i&?5iZf8OU@t$ekBcbrjrKijVUqakp}Z;5c7L5>l5>4F#inZX~e9;F(2e7)VJia zS3!$^?^9}BUc9eBH%!)a_)F8OttZ&9S0^!cxX&h-%#lF%`Oc*Kl0hX%QtKc!5>zvY zY7Rn83skr(8&o{OyUH5zjQI`3(7~5MHwuiTZ`+2oU;D-D)eo#TK}f=VV3|)BZ$SWT zAX|(n;26pD9Ubcnk%=6j#(Yxa3PRhEEZ2=U*!*a;el3^b%gSwLAN0S;29peDpn>3{ z8cPQsL?^T#JCnv^@o!|I`%1cLLYxFR!3Rs}I0;JklhT<}NV|QYT1Zr`h!tBLYvN7> zMJkx53f9ZJMpoMx#~y5vVCfbrwZQ1yL?L9z5f$y3>Iyniz!|uN;cSy>k1)d z6-hBcFij__F-cM}2s-^v!n=Fb7YGEq3y+PNvUop#iXp?ofY|!zIz#3*YbeK?la8xk z9cQs}N9gvmnU0nrS#E5yREaqQZe2)p3apb3>u8W@XJnEBs9+s$Xi`j?R8A!Yvyp~K z6%#HFS`x)JwHnTi-E3ypvu!&Q^DibwQE2&3UVeYZ7MsO>Sj0xb{)MG5&)4++5xOT@ ziT)V~EU<_1W{_@b$gxJ~hMd@eXg^81wcrK6gUTar4I4&_S5)xBh}Ep0jiRAP^<_=T z_Jf-rMd|&bH%qvBYVcC5PtGE25C?+el~t2ls5bD?wVT zsNXC_4`lV zy|VXI)!uZA^KbUniv@2ZsqS)Qr_BHpSrV!-c(Y#9{zA9T`mEjJaMNY z-C2w~r?P#>Ykn+A^Q&4pYqJraSH!X>o`s^mPds10tx~upC%hW9b3MuWzgaC9)lox% zbclrx;kk!F}dHh{45y-gBm#PF0r-XtJ(rLGDMv%FF4& zYRI!FgwZq{Pb2eOM;NUjVI$=CXyE5mBA!deDitH1SPNm*@sKA?laYO!b)1Oqt3>ra z4>es^MOVdHL)wDx5h~N^VjJ`%PV_iml#e+izFUN->R?wZqPhTq8Mq4;sl#3VxJyZJ z){W_0G*x~Y;T3UBh{W*L>P^-MG;i(ItlGr3wsveq<+^A~5~neXLgp4-Fi)7?wVlMI zAkKEcA96%xfjD!5AzjEei3liu|8ZN=LPl_#5~oY5?cZGphmMcj0$&$&RSXYx%ob%8 z$rUA$T__5oB%=jh8ODQ+NyeBM>1&AIjQtmvtQL4i4tzL^9vO)C@;II&CLBAP3!nIV z1HT#7aBv;Xn1@XZ?C&rfP4?`}TQk5y@~kuYSE0jaj-ziDvQ6V$C*HJ8dv3NE&V4qM zb=bVp;AMhYQ+)1qWOi^klTE6-^XNsLaQIQuFq3sSH!kofSnbecC?a@_iOLsH76vH$ z7i@`kDhzP(Dd`y3GiHk@bK01$<6!aH>c`hsyT>MZmUZ}#*5xLP1V@lU8fcOakU~a2 zbY~yH*O?>_Vankkm76T))A0oakS7Ajm6)F+fQ~=}>x|MDvpBFm>clnHECl6_u^tPF zsD3un@@J28Z3my-Yc1=)sd*4{&KM?z1lZ$q1$)Oyqw7p*(r}0)Th=`8aB#gu?PPeA zm-j_IqwoD$DBe`JJ7=|1nJlm$n+IGTn&M?ny3zv5sd&n>xgIt@kFDk{t~CG zfHh*@j_^XS^Fc^zJY??%ZYo?E@Ty#++il0O@>ywe${Mo5ifDb_lMr91vISM43&$vz zTcEfsiP1+h(hQVBGctiGTL#DfpeVIr>$k2jmua4qX=cpZLLOT^=EbFHUfP=AXL+l2 zO_)VFKX$`N0V9U(+r`#@jvvtr2l7~$Ub}4)hd2WXZFcuQtYan7^}TzR0UL*2-!zEP zTjv9MuF<31Q4r2xfIx4og9={&LkSb}B>?qVFr($$4CZ*Uvd&CFMu=baQzwz=4;G0& zUUIutZtr#dR#Ju6t!4Z^qo3|cSxuJJLVlf~dny5k#-Lgb+(X4T!|_*OMg%m084+NG zU`$}bxcvWHBi2faLc5->O}GAK{CcD`KY>+=bslyw()4I?!W*L{Bu4~ky`BCy5iL_W zP#6`i0w85cS6awZYP2DB-Ln^PBenPa3FB*AevO>>Xy|fT6#3VIlXaVf)WN)tm@|QJ z=YeoM9M7dY&jaB+NTJgx9#TX#gsy)A#cv59SBhSf#8GR^=8|U(gX6vQ z(B0^;43T(SaSV?Z!gUD1ouCRs;>|&Z{g}UkzurtV_(J1&JZqiB{N=~xfyZ}qoRh!1 zmm{LT$Db8kC8m#<%|kSzfa;@wI*`4))#&0Y0K8tv6QyGtfaobY_C-bg@5`qMZ?3Ku z72Q2ATz!Qg~O$Hs7ptn%Z}j!$Pb;_v@hd)2^qkkJW%vWEG9ZiBFZ4dMjN z?x)RptcHOqgCXiFp~t z96Xvk{=H~i*~V_HN>9~o-r|-paeY)q_@QKJ9_yXIe+)^Fvl}LM4wnoL!`o3uR|5*; z@OCtHQ3yU;9P{hw3c+Jp z+C$v9&9NoFYk69sZ0kR!2D=^4A4DVy5l0`wcnnX)f+i3VsRSReBOI5VYA6aeL~SuS ztig;Aag6=YDOS0=|6q&!xsCi3V|(c${s=@(V%E0g)-}DB)FcAkKSy_nJMB~lxhC<6 z2iGL7XJXz<$Yz}s7h*p8HL!o!a)H5g2tcTW9NkAx4*(Euf&K6$V>Je!JOtKME?5r= zTW0?+p#m}mu^;l5mno{iivKd;YQ%}-R}bloh!H+jmXB7z}66LZpGK2Y}&pa_|9 z20r{3Vgq1Y0A>p?1B$S%aK~bLGnhBb`ab_Pr_6wEv>C48meMcN|BPPhGR^8?_)od264n1>?Z;nx z@DY-AW;)rT5>5OgvU`dFVTm`u?F^_;AoLLJGGwuK>>uB`&PqqTz4qp zC8J-a*S@pDM(JHA#IATv$cN8o=Ww;6g1# z#O>oR7phrjnK3ck!1L()0qtmeqXMn`b@}TA1Uo{FFv5!vNJQhKAluP#JL?oxvc(Qy zbSqtKfsdX}#ALUWv`R$r@Pbz8$J;<7*4B*N&_Xh38A|iCP4}ArMS1V5j$+o zZSU6RO>XbFz4Y^pjuQQ*FMLYRI$ySG_Bv^90DG&D%bzu09ywe|4>W?fYC)kFMguP{|fVMIiAH;qyQ^o~VwOqg4!tPCZ0z z0S_&K*!5=v+CL4S;Db8;Sv4wosjvYb{ zg;@#~kOOEmVAV=2AVpD3jEH$Y`h#P{>@G%jlDH+x87ba++YIh=LMT_$4j*2lYw6}0 z0_|ixF`5 zNE;o@)6KY3keJ8#NFigyeZ12$yL>VKLmWGq@qg`PLAyXm$_caDnnme9yS#H24n&=J zj0nc($Us#8FL8`;2!~cLNqh@-toR@`mx#>?DvhuSxroyrj1>9)n*91(y1R@W?ZrJ9 zo__z1ymF>~2ctX~B*uN6r60k-?Q>*;Qut?_?(6oXx2i!0TSVM09HRqiB<2)!AUmKN zD@Q=^^7P;h;~|0D;udKP$KPa1`CGGYQ+ox|$6Hgs)h*u|(JCg6AjzT^TfpFLsJ7^A zoq?C3q?-odMILV1Nz9cvjwBqg(rS_-UW{aJ$Mo)sEt? zyES~+>duabpQ$I~K7Q!nmzUy@gJpklo3pvtRiPRApkg?joj$hICu@RD0~VT*-q{G& z&-Q)cHMjU!9RBwQa?WFia?F5>P<|`-MP(|5eI7>EC~UKxc(kTv1i3mP6rPEw-a#J47!M z_St@A=-*Zb1+SG$OC3IxN-kXA7td)GAmzLG{6?2PCAmfYN*CoKBjx2Unl>hn@EV(* zxaMr^7+yE-@NksJ9GS2k_iyX)o|jdY9?7eB4C(N$4s;AW^VNW1?xnpS>&L^^Zi|Vj z9BNFKxp=biw~6%z2fEX1LR9fT4%QOmoG^PG~7$%oF)foFV)3%&x{wcK5vmid; tAv-{M3`bsOk^^(hRkfwF`&H9bHZl?2&2FccFY=sFYLyZQ6eyJx_%9L!OgsPp diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index f31e75c8427..f8ff390f00f 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -121,7 +121,7 @@ as separator. .. code-block:: console $ restic ls latest /home - + snapshot 073a90db of [/home/user/work.txt] filtered by [/home] at 2024-01-21 16:51:18.474558607 +0100 CET): /home /home/user @@ -153,6 +153,49 @@ outputting information about a snapshot in the NCDU format using the ``--ncdu`` You can use it as follows: ``restic ls latest --ncdu | ncdu -f -`` +You can use the options ``--sort`` and ``--reverse`` to tailor ``ls`` output to your needs. +``--sort`` can be one of ``name | size | time=mtime | atime | ctime | extension``. The default +sorting option is ``name``. The sorting order can be reversed by specifying ``--reverse``. + +.. code-block:: console + + $ restic ls --long latest --sort size --reverse + + snapshot 711b0bb6 of [/tmp/restic] at 2025-02-03 08:16:05.310764668 +0000 UTC filtered by []: + -rw-rw-r-- 1000 1000 16772 2025-02-03 08:09:11 /tmp/restic/cmd_find.go + -rw-rw-r-- 1000 1000 3077 2025-02-03 08:15:46 /tmp/restic/conf.py + -rw-rw-r-- 1000 1000 2834 2025-02-03 08:09:35 /tmp/restic/find.go + -rw-rw-r-- 1000 1000 1473 2025-02-03 08:15:30 /tmp/restic/010_introduction.rst + drwxrwxr-x 1000 1000 0 2025-02-03 08:15:46 /tmp/restic + dtrwxrwxrwx 0 0 0 2025-02-03 08:14:22 /tmp + +.. code-block:: console + + $ restic ls --long latest --sort time + + snapshot 711b0bb6 of [/tmp/restic] at 2025-02-03 08:16:05.310764668 +0000 UTC filtered by []: + -rw-rw-r-- 1000 1000 16772 2025-02-03 08:09:11 /tmp/restic/cmd_find.go + -rw-rw-r-- 1000 1000 2834 2025-02-03 08:09:35 /tmp/restic/find.go + dtrwxrwxrwx 0 0 0 2025-02-03 08:14:22 /tmp + -rw-rw-r-- 1000 1000 1473 2025-02-03 08:15:30 /tmp/restic/010_introduction.rst + drwxrwxr-x 1000 1000 0 2025-02-03 08:15:46 /tmp/restic + -rw-rw-r-- 1000 1000 3077 2025-02-03 08:15:46 /tmp/restic/conf.py + +Sorting works with option ``--json`` as well. Sorting and option ``--ncdu`` are mutually exclusive. +It works also without specifying the option ``--long``. + +.. code-block:: console + + $ restic ls latest --sort extension + + snapshot 711b0bb6 of [/tmp/restic] at 2025-02-03 08:16:05.310764668 +0000 UTC filtered by []: + /tmp + /tmp/restic + /tmp/restic/cmd_find.go + /tmp/restic/find.go + /tmp/restic/conf.py + /tmp/restic/010_introduction.rst + Copying snapshots between repositories ====================================== @@ -317,7 +360,7 @@ Modifying metadata of snapshots =============================== Sometimes it may be desirable to change the metadata of an existing snapshot. -Currently, rewriting the hostname and the time of the backup is supported. +Currently, rewriting the hostname and the time of the backup is supported. This is possible using the ``rewrite`` command with the option ``--new-host`` followed by the desired new hostname or the option ``--new-time`` followed by the desired new timestamp. .. code-block:: console From 36d8916354f7e0bf10aeb4639246cbb8e392a89b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 3 Feb 2025 21:52:57 +0100 Subject: [PATCH 884/893] ls: use numeric based enum for SortMode --- cmd/restic/cmd_ls.go | 61 +++++++++++++++++---------- cmd/restic/cmd_ls_integration_test.go | 12 +++--- 2 files changed, 46 insertions(+), 27 deletions(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 6e0d230b18a..d12dd3a314d 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -66,7 +66,7 @@ type LsOptions struct { Recursive bool HumanReadable bool Ncdu bool - Sort string + Sort SortMode Reverse bool } @@ -81,7 +81,7 @@ func init() { flags.BoolVar(&lsOptions.Recursive, "recursive", false, "include files in subfolders of the listed directories") flags.BoolVar(&lsOptions.HumanReadable, "human-readable", false, "print sizes in human readable format") flags.BoolVar(&lsOptions.Ncdu, "ncdu", false, "output NCDU export format (pipe into 'ncdu -f -')") - flags.StringVarP(&lsOptions.Sort, "sort", "s", "name", "sort output by (name|size|time=mtime|atime|ctime|extension)") + flags.VarP(&lsOptions.Sort, "sort", "s", "sort output by (name|size|time=mtime|atime|ctime|extension)") flags.BoolVar(&lsOptions.Reverse, "reverse", false, "reverse sorted output") } @@ -301,19 +301,13 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri if opts.Ncdu && gopts.JSON { return errors.Fatal("only either '--json' or '--ncdu' can be specified") } - if opts.Sort != "name" && opts.Ncdu { + if opts.Sort != SortModeName && opts.Ncdu { return errors.Fatal("--sort and --ncdu are mutually exclusive") } if opts.Reverse && opts.Ncdu { return errors.Fatal("--reverse and --ncdu are mutually exclusive") } - sortMode := SortModeName - err := sortMode.Set(opts.Sort) - if err != nil { - return err - } - // extract any specific directories to walk var dirs []string if len(args) > 1 { @@ -377,7 +371,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri var printer lsPrinter collector := []toSortOutput{} - outputSort := sortMode != SortModeName || opts.Reverse + outputSort := opts.Sort != SortModeName || opts.Reverse if gopts.JSON { printer = &jsonLsPrinter{ @@ -481,13 +475,13 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri } if outputSort { - printSortedOutput(printer, opts, sortMode, collector) + printSortedOutput(printer, opts.Sort, opts.Reverse, collector) } return printer.Close() } -func printSortedOutput(printer lsPrinter, opts LsOptions, sortMode SortMode, collector []toSortOutput) { +func printSortedOutput(printer lsPrinter, sortMode SortMode, reverse bool, collector []toSortOutput) { switch sortMode { case SortModeName: case SortModeSize: @@ -534,7 +528,7 @@ func printSortedOutput(printer lsPrinter, opts LsOptions, sortMode SortMode, col }) } - if opts.Reverse { + if reverse { slices.Reverse(collector) } for _, elem := range collector { @@ -543,17 +537,17 @@ func printSortedOutput(printer lsPrinter, opts LsOptions, sortMode SortMode, col } // SortMode defines the allowed sorting modes -type SortMode string +type SortMode uint // Allowed sort modes const ( - SortModeName SortMode = "name" - SortModeSize SortMode = "size" - SortModeAtime SortMode = "atime" - SortModeCtime SortMode = "ctime" - SortModeMtime SortMode = "mtime" - SortModeExt SortMode = "extension" - SortModeInvalid SortMode = "--invalid--" + SortModeName SortMode = iota + SortModeSize + SortModeAtime + SortModeCtime + SortModeMtime + SortModeExt + SortModeInvalid ) // Set implements the method needed for pflag command flag parsing. @@ -573,8 +567,31 @@ func (c *SortMode) Set(s string) error { *c = SortModeExt default: *c = SortModeInvalid - return fmt.Errorf("invalid sort mode %q, must be one of (name|size|atime|ctime|mtime=time|extension)", s) + return fmt.Errorf("invalid sort mode %q, must be one of (name|size|time=mtime|atime|ctime|extension)", s) } return nil } + +func (c *SortMode) String() string { + switch *c { + case SortModeName: + return "name" + case SortModeSize: + return "size" + case SortModeAtime: + return "atime" + case SortModeCtime: + return "ctime" + case SortModeMtime: + return "mtime" + case SortModeExt: + return "extension" + default: + return "invalid" + } +} + +func (c *SortMode) Type() string { + return "mode" +} diff --git a/cmd/restic/cmd_ls_integration_test.go b/cmd/restic/cmd_ls_integration_test.go index 29e153419d0..4675814b379 100644 --- a/cmd/restic/cmd_ls_integration_test.go +++ b/cmd/restic/cmd_ls_integration_test.go @@ -19,7 +19,7 @@ func testRunLsWithOpts(t testing.TB, gopts GlobalOptions, opts LsOptions, args [ } func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string { - out := testRunLsWithOpts(t, gopts, LsOptions{Sort: "name"}, []string{snapshotID}) + out := testRunLsWithOpts(t, gopts, LsOptions{}, []string{snapshotID}) return strings.Split(string(out), "\n") } @@ -45,7 +45,7 @@ func TestRunLsNcdu(t *testing.T) { {"latest", "/0"}, {"latest", "/0", "/0/9"}, } { - ncdu := testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true, Sort: "name"}, paths) + ncdu := testRunLsWithOpts(t, env.gopts, LsOptions{Ncdu: true}, paths) assertIsValidJSON(t, ncdu) } } @@ -83,7 +83,7 @@ func TestRunLsSort(t *testing.T) { testRunBackup(t, env.testdata+"/0", []string{"for_cmd_ls"}, opts, env.gopts) // sort by size - out := testRunLsWithOpts(t, env.gopts, LsOptions{Sort: "size"}, []string{"latest"}) + out := testRunLsWithOpts(t, env.gopts, LsOptions{Sort: SortModeSize}, []string{"latest"}) fileList := strings.Split(string(out), "\n") rtest.Assert(t, len(fileList) == 5, "invalid ls --sort size, expected 5 array elements, got %v", len(fileList)) for i, item := range compareSize { @@ -91,7 +91,7 @@ func TestRunLsSort(t *testing.T) { } // sort by file extension - out = testRunLsWithOpts(t, env.gopts, LsOptions{Sort: "extension"}, []string{"latest"}) + out = testRunLsWithOpts(t, env.gopts, LsOptions{Sort: SortModeExt}, []string{"latest"}) fileList = strings.Split(string(out), "\n") rtest.Assert(t, len(fileList) == 5, "invalid ls --sort extension, expected 5 array elements, got %v", len(fileList)) for i, item := range compareExt { @@ -99,10 +99,12 @@ func TestRunLsSort(t *testing.T) { } // explicit name sort - out = testRunLsWithOpts(t, env.gopts, LsOptions{Sort: "name"}, []string{"latest"}) + out = testRunLsWithOpts(t, env.gopts, LsOptions{Sort: SortModeName}, []string{"latest"}) fileList = strings.Split(string(out), "\n") rtest.Assert(t, len(fileList) == 5, "invalid ls --sort name, expected 5 array elements, got %v", len(fileList)) for i, item := range compareName { rtest.Assert(t, item == fileList[i], "invalid ls --sort name, expected element '%s', got '%s'", item, fileList[i]) } + + rtest.Equals(t, SortMode(0), SortModeName, "unexpected default sort mode") } From 993eb112cd344d216ec0178ab37b9da182e8f59e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 3 Feb 2025 21:58:38 +0100 Subject: [PATCH 885/893] ls: deduplicate sorting test --- cmd/restic/cmd_ls_integration_test.go | 87 ++++++++++++--------------- 1 file changed, 40 insertions(+), 47 deletions(-) diff --git a/cmd/restic/cmd_ls_integration_test.go b/cmd/restic/cmd_ls_integration_test.go index 4675814b379..b9d565364d9 100644 --- a/cmd/restic/cmd_ls_integration_test.go +++ b/cmd/restic/cmd_ls_integration_test.go @@ -3,6 +3,7 @@ package main import ( "context" "encoding/json" + "fmt" "strings" "testing" @@ -51,29 +52,7 @@ func TestRunLsNcdu(t *testing.T) { } func TestRunLsSort(t *testing.T) { - compareName := []string{ - "/for_cmd_ls", - "/for_cmd_ls/file1.txt", - "/for_cmd_ls/file2.txt", - "/for_cmd_ls/python.py", - "", // last empty line - } - - compareSize := []string{ - "/for_cmd_ls", - "/for_cmd_ls/file2.txt", - "/for_cmd_ls/file1.txt", - "/for_cmd_ls/python.py", - "", - } - - compareExt := []string{ - "/for_cmd_ls", - "/for_cmd_ls/python.py", - "/for_cmd_ls/file1.txt", - "/for_cmd_ls/file2.txt", - "", - } + rtest.Equals(t, SortMode(0), SortModeName, "unexpected default sort mode") env, cleanup := withTestEnvironment(t) defer cleanup() @@ -82,29 +61,43 @@ func TestRunLsSort(t *testing.T) { opts := BackupOptions{} testRunBackup(t, env.testdata+"/0", []string{"for_cmd_ls"}, opts, env.gopts) - // sort by size - out := testRunLsWithOpts(t, env.gopts, LsOptions{Sort: SortModeSize}, []string{"latest"}) - fileList := strings.Split(string(out), "\n") - rtest.Assert(t, len(fileList) == 5, "invalid ls --sort size, expected 5 array elements, got %v", len(fileList)) - for i, item := range compareSize { - rtest.Assert(t, item == fileList[i], "invalid ls --sort size, expected element '%s', got '%s'", item, fileList[i]) - } - - // sort by file extension - out = testRunLsWithOpts(t, env.gopts, LsOptions{Sort: SortModeExt}, []string{"latest"}) - fileList = strings.Split(string(out), "\n") - rtest.Assert(t, len(fileList) == 5, "invalid ls --sort extension, expected 5 array elements, got %v", len(fileList)) - for i, item := range compareExt { - rtest.Assert(t, item == fileList[i], "invalid ls --sort extension, expected element '%s', got '%s'", item, fileList[i]) - } - - // explicit name sort - out = testRunLsWithOpts(t, env.gopts, LsOptions{Sort: SortModeName}, []string{"latest"}) - fileList = strings.Split(string(out), "\n") - rtest.Assert(t, len(fileList) == 5, "invalid ls --sort name, expected 5 array elements, got %v", len(fileList)) - for i, item := range compareName { - rtest.Assert(t, item == fileList[i], "invalid ls --sort name, expected element '%s', got '%s'", item, fileList[i]) + for _, test := range []struct { + mode SortMode + expected []string + }{ + { + SortModeSize, + []string{ + "/for_cmd_ls", + "/for_cmd_ls/file2.txt", + "/for_cmd_ls/file1.txt", + "/for_cmd_ls/python.py", + "", + }, + }, + { + SortModeExt, + []string{ + "/for_cmd_ls", + "/for_cmd_ls/python.py", + "/for_cmd_ls/file1.txt", + "/for_cmd_ls/file2.txt", + "", + }, + }, + { + SortModeName, + []string{ + "/for_cmd_ls", + "/for_cmd_ls/file1.txt", + "/for_cmd_ls/file2.txt", + "/for_cmd_ls/python.py", + "", // last empty line + }, + }, + } { + out := testRunLsWithOpts(t, env.gopts, LsOptions{Sort: test.mode}, []string{"latest"}) + fileList := strings.Split(string(out), "\n") + rtest.Equals(t, test.expected, fileList, fmt.Sprintf("mismatch for mode %v", test.mode)) } - - rtest.Equals(t, SortMode(0), SortModeName, "unexpected default sort mode") } From 1807627dda45ccf147dab9f79baab82708be46e6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 3 Feb 2025 22:05:40 +0100 Subject: [PATCH 886/893] ls: refactor sorting into sortedPrinter struct --- cmd/restic/cmd_ls.go | 71 +++++++++++++++++++++++++++----------------- 1 file changed, 44 insertions(+), 27 deletions(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index d12dd3a314d..f617e21d5dc 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -370,8 +370,6 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri } var printer lsPrinter - collector := []toSortOutput{} - outputSort := opts.Sort != SortModeName || opts.Reverse if gopts.JSON { printer = &jsonLsPrinter{ @@ -381,7 +379,6 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri printer = &ncduLsPrinter{ out: globalOptions.stdout, } - outputSort = false } else { printer = &textLsPrinter{ dirs: dirs, @@ -389,6 +386,13 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri HumanReadable: opts.HumanReadable, } } + if opts.Sort != SortModeName || opts.Reverse { + printer = &sortedPrinter{ + printer: printer, + sortMode: opts.Sort, + reverse: opts.Reverse, + } + } sn, subfolder, err := (&restic.SnapshotFilter{ Hosts: opts.Hosts, @@ -419,12 +423,8 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri printedDir := false if withinDir(nodepath) { // if we're within a target path, print the node - if outputSort { - collector = append(collector, toSortOutput{nodepath, node}) - } else { - if err := printer.Node(nodepath, node, false); err != nil { - return err - } + if err := printer.Node(nodepath, node, false); err != nil { + return err } printedDir = true @@ -439,7 +439,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri // there yet), signal the walker to descend into any subdirs if approachingMatchingTree(nodepath) { // print node leading up to the target paths - if !printedDir && !outputSort { + if !printedDir { return printer.Node(nodepath, node, true) } return nil @@ -474,39 +474,55 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return err } - if outputSort { - printSortedOutput(printer, opts.Sort, opts.Reverse, collector) - } - return printer.Close() } -func printSortedOutput(printer lsPrinter, sortMode SortMode, reverse bool, collector []toSortOutput) { - switch sortMode { +type sortedPrinter struct { + printer lsPrinter + collector []toSortOutput + sortMode SortMode + reverse bool +} + +func (p *sortedPrinter) Snapshot(sn *restic.Snapshot) error { + return p.printer.Snapshot(sn) +} +func (p *sortedPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) error { + if !isPrefixDirectory { + p.collector = append(p.collector, toSortOutput{path, node}) + } + return nil +} + +func (p *sortedPrinter) LeaveDir(_ string) error { + return nil +} +func (p *sortedPrinter) Close() error { + switch p.sortMode { case SortModeName: case SortModeSize: - slices.SortStableFunc(collector, func(a, b toSortOutput) int { + slices.SortStableFunc(p.collector, func(a, b toSortOutput) int { return cmp.Or( cmp.Compare(a.node.Size, b.node.Size), cmp.Compare(a.nodepath, b.nodepath), ) }) case SortModeMtime: - slices.SortStableFunc(collector, func(a, b toSortOutput) int { + slices.SortStableFunc(p.collector, func(a, b toSortOutput) int { return cmp.Or( a.node.ModTime.Compare(b.node.ModTime), cmp.Compare(a.nodepath, b.nodepath), ) }) case SortModeAtime: - slices.SortStableFunc(collector, func(a, b toSortOutput) int { + slices.SortStableFunc(p.collector, func(a, b toSortOutput) int { return cmp.Or( a.node.AccessTime.Compare(b.node.AccessTime), cmp.Compare(a.nodepath, b.nodepath), ) }) case SortModeCtime: - slices.SortStableFunc(collector, func(a, b toSortOutput) int { + slices.SortStableFunc(p.collector, func(a, b toSortOutput) int { return cmp.Or( a.node.ChangeTime.Compare(b.node.ChangeTime), cmp.Compare(a.nodepath, b.nodepath), @@ -514,13 +530,13 @@ func printSortedOutput(printer lsPrinter, sortMode SortMode, reverse bool, colle }) case SortModeExt: // map name to extension - mapExt := make(map[string]string, len(collector)) - for _, item := range collector { + mapExt := make(map[string]string, len(p.collector)) + for _, item := range p.collector { ext := filepath.Ext(item.nodepath) mapExt[item.nodepath] = ext } - slices.SortStableFunc(collector, func(a, b toSortOutput) int { + slices.SortStableFunc(p.collector, func(a, b toSortOutput) int { return cmp.Or( cmp.Compare(mapExt[a.nodepath], mapExt[b.nodepath]), cmp.Compare(a.nodepath, b.nodepath), @@ -528,12 +544,13 @@ func printSortedOutput(printer lsPrinter, sortMode SortMode, reverse bool, colle }) } - if reverse { - slices.Reverse(collector) + if p.reverse { + slices.Reverse(p.collector) } - for _, elem := range collector { - _ = printer.Node(elem.nodepath, elem.node, false) + for _, elem := range p.collector { + _ = p.printer.Node(elem.nodepath, elem.node, false) } + return nil } // SortMode defines the allowed sorting modes From c32613a624eb089b75aa5b6c60f329a4d27efe59 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 3 Feb 2025 22:09:55 +0100 Subject: [PATCH 887/893] ls: extract comparator --- cmd/restic/cmd_ls.go | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index f617e21d5dc..513540b17bf 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -498,36 +498,37 @@ func (p *sortedPrinter) LeaveDir(_ string) error { return nil } func (p *sortedPrinter) Close() error { + var comparator func(a, b toSortOutput) int switch p.sortMode { case SortModeName: case SortModeSize: - slices.SortStableFunc(p.collector, func(a, b toSortOutput) int { + comparator = func(a, b toSortOutput) int { return cmp.Or( cmp.Compare(a.node.Size, b.node.Size), cmp.Compare(a.nodepath, b.nodepath), ) - }) + } case SortModeMtime: - slices.SortStableFunc(p.collector, func(a, b toSortOutput) int { + comparator = func(a, b toSortOutput) int { return cmp.Or( a.node.ModTime.Compare(b.node.ModTime), cmp.Compare(a.nodepath, b.nodepath), ) - }) + } case SortModeAtime: - slices.SortStableFunc(p.collector, func(a, b toSortOutput) int { + comparator = func(a, b toSortOutput) int { return cmp.Or( a.node.AccessTime.Compare(b.node.AccessTime), cmp.Compare(a.nodepath, b.nodepath), ) - }) + } case SortModeCtime: - slices.SortStableFunc(p.collector, func(a, b toSortOutput) int { + comparator = func(a, b toSortOutput) int { return cmp.Or( a.node.ChangeTime.Compare(b.node.ChangeTime), cmp.Compare(a.nodepath, b.nodepath), ) - }) + } case SortModeExt: // map name to extension mapExt := make(map[string]string, len(p.collector)) @@ -536,14 +537,17 @@ func (p *sortedPrinter) Close() error { mapExt[item.nodepath] = ext } - slices.SortStableFunc(p.collector, func(a, b toSortOutput) int { + comparator = func(a, b toSortOutput) int { return cmp.Or( cmp.Compare(mapExt[a.nodepath], mapExt[b.nodepath]), cmp.Compare(a.nodepath, b.nodepath), ) - }) + } } + if comparator != nil { + slices.SortStableFunc(p.collector, comparator) + } if p.reverse { slices.Reverse(p.collector) } From 6cc06e0812719c3a09e394e2f575d3d21dc70522 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 3 Feb 2025 22:15:32 +0100 Subject: [PATCH 888/893] ls: add missing error handling --- cmd/restic/cmd_ls.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 513540b17bf..373a31a4012 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -552,7 +552,9 @@ func (p *sortedPrinter) Close() error { slices.Reverse(p.collector) } for _, elem := range p.collector { - _ = p.printer.Node(elem.nodepath, elem.node, false) + if err := p.printer.Node(elem.nodepath, elem.node, false); err != nil { + return err + } } return nil } From 4104a8e6a567696c2708f02f228afedfc14869e8 Mon Sep 17 00:00:00 2001 From: Winfried Plappert Date: Wed, 5 Feb 2025 19:40:20 +0000 Subject: [PATCH 889/893] Issue: 4942: cmd_rewrite: add snapshot summary data to an existing snapshot. (#5185) Co-authored-by: Michael Eischer --- changelog/unreleased/issue-4942 | 11 +++++ cmd/restic/cmd_repair_snapshots.go | 5 ++- cmd/restic/cmd_rewrite.go | 50 ++++++++++++++++------ cmd/restic/cmd_rewrite_integration_test.go | 33 ++++++++++++++ doc/045_working_with_repos.rst | 4 ++ 5 files changed, 87 insertions(+), 16 deletions(-) create mode 100644 changelog/unreleased/issue-4942 diff --git a/changelog/unreleased/issue-4942 b/changelog/unreleased/issue-4942 new file mode 100644 index 00000000000..ee3820b59d9 --- /dev/null +++ b/changelog/unreleased/issue-4942 @@ -0,0 +1,11 @@ +Enhancement: support creating snapshot summary statistics for old snapshots + +When `rewrite` is used with the `--snapshot-summary` option, a new snapshot is +created containing statistics summary data. Only two fields in the summary will +be non-zero: `TotalFilesProcessed` and `TotalBytesProcessed`. + +When rewrite is called with one of the `--exclude` options, `TotalFilesProcessed` +and `TotalBytesProcessed` will be updated in the snapshot summary. + +https://github.com/restic/restic/issues/4942 +https://github.com/restic/restic/pull/5185 diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index ba952432afa..34c02b3ff88 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -143,8 +143,9 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) { Verbosef("\n%v\n", sn) changed, err := filterAndReplaceSnapshot(ctx, repo, sn, - func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) { - return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree) + func(ctx context.Context, sn *restic.Snapshot) (restic.ID, *restic.SnapshotSummary, error) { + id, err := rewriter.RewriteTree(ctx, repo, "/", *sn.Tree) + return id, nil, err }, opts.DryRun, opts.Forget, nil, "repaired") if err != nil { return errors.Fatalf("unable to rewrite snapshot ID %q: %v", sn.ID().Str(), err) diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 707f8af9baa..f847aa372b3 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -35,6 +35,13 @@ Please note that the --forget option only removes the snapshots and not the actu data stored in the repository. In order to delete the no longer referenced data, use the "prune" command. +When rewrite is used with the --snapshot-summary option, a new snapshot is +created containing statistics summary data. Only two fields in the summary will +be non-zero: TotalFilesProcessed and TotalBytesProcessed. + +When rewrite is called with one of the --exclude options, TotalFilesProcessed +and TotalBytesProcessed will be updated in the snapshot summary. + EXIT STATUS =========== @@ -83,8 +90,9 @@ func (sma snapshotMetadataArgs) convert() (*snapshotMetadata, error) { // RewriteOptions collects all options for the rewrite command. type RewriteOptions struct { - Forget bool - DryRun bool + Forget bool + DryRun bool + SnapshotSummary bool Metadata snapshotMetadataArgs restic.SnapshotFilter @@ -101,12 +109,15 @@ func init() { f.BoolVarP(&rewriteOptions.DryRun, "dry-run", "n", false, "do not do anything, just print what would be done") f.StringVar(&rewriteOptions.Metadata.Hostname, "new-host", "", "replace hostname") f.StringVar(&rewriteOptions.Metadata.Time, "new-time", "", "replace time of the backup") + f.BoolVarP(&rewriteOptions.SnapshotSummary, "snapshot-summary", "s", false, "create snapshot summary record if it does not exist") initMultiSnapshotFilter(f, &rewriteOptions.SnapshotFilter, true) rewriteOptions.ExcludePatternOptions.Add(f) } -type rewriteFilterFunc func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) +// rewriteFilterFunc returns the filtered tree ID or an error. If a snapshot summary is returned, the snapshot will +// be updated accordingly. +type rewriteFilterFunc func(ctx context.Context, sn *restic.Snapshot) (restic.ID, *restic.SnapshotSummary, error) func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *restic.Snapshot, opts RewriteOptions) (bool, error) { if sn.Tree == nil { @@ -126,7 +137,7 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti var filter rewriteFilterFunc - if len(rejectByNameFuncs) > 0 { + if len(rejectByNameFuncs) > 0 || opts.SnapshotSummary { selectByName := func(nodepath string) bool { for _, reject := range rejectByNameFuncs { if reject(nodepath) { @@ -146,22 +157,24 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti rewriter, querySize := walker.NewSnapshotSizeRewriter(rewriteNode) - filter = func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) { + filter = func(ctx context.Context, sn *restic.Snapshot) (restic.ID, *restic.SnapshotSummary, error) { id, err := rewriter.RewriteTree(ctx, repo, "/", *sn.Tree) if err != nil { - return restic.ID{}, err + return restic.ID{}, nil, err } ss := querySize() + summary := &restic.SnapshotSummary{} if sn.Summary != nil { - sn.Summary.TotalFilesProcessed = ss.FileCount - sn.Summary.TotalBytesProcessed = ss.FileSize + *summary = *sn.Summary } - return id, err + summary.TotalFilesProcessed = ss.FileCount + summary.TotalBytesProcessed = ss.FileSize + return id, summary, err } } else { - filter = func(_ context.Context, sn *restic.Snapshot) (restic.ID, error) { - return *sn.Tree, nil + filter = func(_ context.Context, sn *restic.Snapshot) (restic.ID, *restic.SnapshotSummary, error) { + return *sn.Tree, nil, nil } } @@ -176,9 +189,10 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r repo.StartPackUploader(wgCtx, wg) var filteredTree restic.ID + var summary *restic.SnapshotSummary wg.Go(func() error { var err error - filteredTree, err = filter(ctx, sn) + filteredTree, summary, err = filter(ctx, sn) if err != nil { return err } @@ -203,7 +217,12 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r return true, nil } - if filteredTree == *sn.Tree && newMetadata == nil { + matchingSummary := true + if summary != nil { + matchingSummary = sn.Summary != nil && *summary == *sn.Summary + } + + if filteredTree == *sn.Tree && newMetadata == nil && matchingSummary { debug.Log("Snapshot %v not modified", sn) return false, nil } @@ -230,6 +249,9 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r // Always set the original snapshot id as this essentially a new snapshot. sn.Original = sn.ID() sn.Tree = &filteredTree + if summary != nil { + sn.Summary = summary + } if !forget { sn.AddTags([]string{addTag}) @@ -263,7 +285,7 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r } func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, args []string) error { - if opts.ExcludePatternOptions.Empty() && opts.Metadata.empty() { + if !opts.SnapshotSummary && opts.ExcludePatternOptions.Empty() && opts.Metadata.empty() { return errors.Fatal("Nothing to do: no excludes provided and no new metadata provided") } diff --git a/cmd/restic/cmd_rewrite_integration_test.go b/cmd/restic/cmd_rewrite_integration_test.go index 6471d49ba21..18835333347 100644 --- a/cmd/restic/cmd_rewrite_integration_test.go +++ b/cmd/restic/cmd_rewrite_integration_test.go @@ -139,3 +139,36 @@ func TestRewriteMetadata(t *testing.T) { testRewriteMetadata(t, metadata) } } + +func TestRewriteSnaphotSummary(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + createBasicRewriteRepo(t, env) + + rtest.OK(t, runRewrite(context.TODO(), RewriteOptions{SnapshotSummary: true}, env.gopts, []string{})) + // no new snapshot should be created as the snapshot already has a summary + snapshots := testListSnapshots(t, env.gopts, 1) + + // replace snapshot by one without a summary + _, repo, unlock, err := openWithExclusiveLock(context.TODO(), env.gopts, false) + rtest.OK(t, err) + sn, err := restic.LoadSnapshot(context.TODO(), repo, snapshots[0]) + rtest.OK(t, err) + oldSummary := sn.Summary + sn.Summary = nil + rtest.OK(t, repo.RemoveUnpacked(context.TODO(), restic.WriteableSnapshotFile, snapshots[0])) + snapshots[0], err = restic.SaveSnapshot(context.TODO(), repo, sn) + rtest.OK(t, err) + unlock() + + // rewrite snapshot and lookup ID of new snapshot + rtest.OK(t, runRewrite(context.TODO(), RewriteOptions{SnapshotSummary: true}, env.gopts, []string{})) + newSnapshots := testListSnapshots(t, env.gopts, 2) + newSnapshot := restic.NewIDSet(newSnapshots...).Sub(restic.NewIDSet(snapshots...)).List()[0] + + sn, err = restic.LoadSnapshot(context.TODO(), repo, newSnapshot) + rtest.OK(t, err) + rtest.Assert(t, sn.Summary != nil, "snapshot should have summary attached") + rtest.Equals(t, oldSummary.TotalBytesProcessed, sn.Summary.TotalBytesProcessed, "unexpected TotalBytesProcessed value") + rtest.Equals(t, oldSummary.TotalFilesProcessed, sn.Summary.TotalFilesProcessed, "unexpected TotalFilesProcessed value") +} diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index f8ff390f00f..d5f2240b88d 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -332,6 +332,10 @@ command, see :ref:`backup-excluding-files` for details. It is possible to rewrite only a subset of snapshots by filtering them the same way as for the ``copy`` command, see :ref:`copy-filtering-snapshots`. +The option ``--snapshot-summary`` can be used to attach summary data to existing +snapshots that do not have this information. When a snapshot summary is created +the only fields added are ``TotalFilesProcessed`` and ``TotalBytesProcessed``. + By default, the ``rewrite`` command will keep the original snapshots and create new ones for every snapshot which was modified during rewriting. The new snapshots are marked with the tag ``rewrite`` to differentiate them from the From 79d435efb1571f887989118dd3b3df41683a4fcb Mon Sep 17 00:00:00 2001 From: Dark Dragon Date: Sun, 2 Feb 2025 17:30:45 +0100 Subject: [PATCH 890/893] Use printer.NewCounter() instead of newTerminalProgressMax() where possible (max is unknown). --- cmd/restic/cmd_check.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 8788b0caf33..3ae79149ec9 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -316,7 +316,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args wg.Add(1) go func() { defer wg.Done() - bar := newTerminalProgressMax(!gopts.Quiet, 0, "snapshots", term) + bar := printer.NewCounter("snapshots") defer bar.Done() chkr.Structure(ctx, bar, errChan) }() @@ -353,9 +353,8 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } doReadData := func(packs map[restic.ID]int64) { - packCount := uint64(len(packs)) - - p := newTerminalProgressMax(!gopts.Quiet, packCount, "packs", term) + p := printer.NewCounter("packs") + p.SetMax(uint64(len(packs))) errChan := make(chan error) go chkr.ReadPacks(ctx, packs, p, errChan) From a58a8f2ce0f536bc7c9d4696b3b688fdcebd1cbd Mon Sep 17 00:00:00 2001 From: Dark Dragon Date: Mon, 30 Dec 2024 22:33:01 +0100 Subject: [PATCH 891/893] Add JSON output to check command --- changelog/unreleased/issue-1378 | 7 +++++ cmd/restic/cmd_check.go | 50 ++++++++++++++++++++++++++++++++- doc/075_scripting.rst | 12 ++++++++ 3 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/issue-1378 diff --git a/changelog/unreleased/issue-1378 b/changelog/unreleased/issue-1378 new file mode 100644 index 00000000000..fd5d379cc9d --- /dev/null +++ b/changelog/unreleased/issue-1378 @@ -0,0 +1,7 @@ +Enhancement: Add JSON support to check + +Restic `check` now also supports the `--json` option and gives all +statistics in JSON format. + +https://github.com/restic/restic/issues/1378 +https://github.com/restic/restic/pull/5194 diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 3ae79149ec9..765e9374499 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -2,6 +2,7 @@ package main import ( "context" + "fmt" "math/rand" "os" "strconv" @@ -215,7 +216,12 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args return errors.Fatal("the check command expects no arguments, only options - please see `restic help check` for usage and flags") } - printer := newTerminalProgressPrinter(gopts.verbosity, term) + var printer progress.Printer + if !gopts.JSON { + printer = newTerminalProgressPrinter(gopts.verbosity, term) + } else { + printer = newJSONErrorPrinter(term) + } cleanup := prepareCheckCache(opts, &gopts, printer) defer cleanup() @@ -431,6 +437,13 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args return errors.Fatal("repository contains errors") } printer.P("no errors were found\n") + if gopts.JSON { + status := checkSuccess{ + MessageType: "checked", + Message: "no errors were found", + } + term.Print(ui.ToJSONString(status)) + } return nil } @@ -478,3 +491,38 @@ func selectRandomPacksByFileSize(allPacks map[restic.ID]int64, subsetSize int64, packs := selectRandomPacksByPercentage(allPacks, subsetPercentage) return packs } + +type checkSuccess struct { + MessageType string `json:"message_type"` // "checked" + Message string `json:"message"` +} + +type checkError struct { + MessageType string `json:"message_type"` // "error" + Message string `json:"message"` +} + +type jsonErrorPrinter struct { + term ui.Terminal +} + +func newJSONErrorPrinter(term ui.Terminal) *jsonErrorPrinter { + return &jsonErrorPrinter{ + term: term, + } +} + +func (*jsonErrorPrinter) NewCounter(_ string) *progress.Counter { + return nil +} + +func (p *jsonErrorPrinter) E(msg string, args ...interface{}) { + status := checkError{ + MessageType: "error", + Message: fmt.Sprintf(msg, args...), + } + p.term.Print(ui.ToJSONString(status)) +} +func (*jsonErrorPrinter) P(_ string, _ ...interface{}) {} +func (*jsonErrorPrinter) V(_ string, _ ...interface{}) {} +func (*jsonErrorPrinter) VV(_ string, _ ...interface{}) {} diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 57a8e2872f5..3cc009c15ab 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -245,6 +245,18 @@ are stored in JSON form. Specifying ``--json`` or ``--quiet`` will suppress any non-JSON messages the command generates. +check +----- + +The ``check`` command outputs JSON messages with the following format: + ++------------------+--------------------------------+ +| ``message_type`` | Either "checked" or "error" | ++------------------+--------------------------------+ +| ``message`` | Descriptive message | ++------------------+--------------------------------+ + + diff ---- From 7cc1aa0cd4b96f0b63a6e07e3d020c8899939833 Mon Sep 17 00:00:00 2001 From: Dark Dragon Date: Mon, 30 Dec 2024 23:44:46 +0100 Subject: [PATCH 892/893] Add check summary --- cmd/restic/cmd_check.go | 53 ++++++++++++++++++++++------------------- doc/075_scripting.rst | 32 ++++++++++++++++++++----- 2 files changed, 55 insertions(+), 30 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 765e9374499..0f710d30e6f 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -212,6 +212,7 @@ func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions, printer progress } func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args []string, term *termstatus.Terminal) error { + summary := checkSummary{MessageType: "summary"} if len(args) != 0 { return errors.Fatal("the check command expects no arguments, only options - please see `restic help check` for usage and flags") } @@ -249,26 +250,24 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } errorsFound := false - suggestIndexRebuild := false - mixedFound := false for _, hint := range hints { switch hint.(type) { case *checker.ErrDuplicatePacks: term.Print(hint.Error()) - suggestIndexRebuild = true + summary.HintRepairIndex = true case *checker.ErrMixedPack: term.Print(hint.Error()) - mixedFound = true + summary.HintPrune = true default: printer.E("error: %v\n", hint) errorsFound = true } } - if suggestIndexRebuild { + if summary.HintRepairIndex { term.Print("Duplicate packs are non-critical, you can run `restic repair index' to correct this.\n") } - if mixedFound { + if summary.HintPrune { term.Print("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n") } @@ -277,6 +276,8 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args printer.E("error: %v\n", err) } + summary.NumErrors += len(errs) + summary.HintRepairIndex = true printer.E("\nThe repository index is damaged and must be repaired. You must run `restic repair index' to correct this.\n\n") return errors.Fatal("repository contains errors") } @@ -299,6 +300,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args salvagePacks.Insert(packErr.ID) } errorsFound = true + summary.NumErrors++ printer.E("%v\n", err) } } else { @@ -307,9 +309,12 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } } - if orphanedPacks > 0 && !errorsFound { - // hide notice if repository is damaged - printer.P("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) + if orphanedPacks > 0 { + summary.HintPrune = true + if !errorsFound { + // hide notice if repository is damaged + printer.P("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) + } } if ctx.Err() != nil { return ctx.Err() @@ -332,9 +337,11 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args if e, ok := err.(*checker.TreeError); ok { printer.E("error for tree %v:\n", e.ID.Str()) for _, treeErr := range e.Errors { + summary.NumErrors++ printer.E(" %v\n", treeErr) } } else { + summary.NumErrors++ printer.E("error: %v\n", err) } } @@ -367,6 +374,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args for err := range errChan { errorsFound = true + summary.NumErrors++ printer.E("%v\n", err) if err, ok := err.(*repository.ErrPackData); ok { salvagePacks.Insert(err.PackID) @@ -418,11 +426,10 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args if len(salvagePacks) > 0 { printer.E("\nThe repository contains damaged pack files. These damaged files must be removed to repair the repository. This can be done using the following commands. Please read the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html first.\n\n") - var strIDs []string for id := range salvagePacks { - strIDs = append(strIDs, id.String()) + summary.BrokenPacks = append(summary.BrokenPacks, id.String()) } - printer.E("restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIDs, " ")) + printer.E("restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(summary.BrokenPacks, " ")) printer.E("Damaged pack files can be caused by backend problems, hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n") } @@ -430,6 +437,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args return ctx.Err() } + if gopts.JSON { + term.Print(ui.ToJSONString(summary)) + } if errorsFound { if len(salvagePacks) == 0 { printer.E("\nThe repository is damaged and must be repaired. Please follow the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html .\n\n") @@ -437,14 +447,6 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args return errors.Fatal("repository contains errors") } printer.P("no errors were found\n") - if gopts.JSON { - status := checkSuccess{ - MessageType: "checked", - Message: "no errors were found", - } - term.Print(ui.ToJSONString(status)) - } - return nil } @@ -492,9 +494,12 @@ func selectRandomPacksByFileSize(allPacks map[restic.ID]int64, subsetSize int64, return packs } -type checkSuccess struct { - MessageType string `json:"message_type"` // "checked" - Message string `json:"message"` +type checkSummary struct { + MessageType string `json:"message_type"` // "summary" + NumErrors int `json:"num_errors"` + BrokenPacks []string `json:"broken_packs"` // run "restic repair packs ID..." and "restic repair snapshots --forget" to remove damaged files + HintRepairIndex bool `json:"suggest_repair_index"` // run "restic repair index" + HintPrune bool `json:"suggest_prune"` // run "restic prune" } type checkError struct { @@ -521,7 +526,7 @@ func (p *jsonErrorPrinter) E(msg string, args ...interface{}) { MessageType: "error", Message: fmt.Sprintf(msg, args...), } - p.term.Print(ui.ToJSONString(status)) + p.term.Error(ui.ToJSONString(status)) } func (*jsonErrorPrinter) P(_ string, _ ...interface{}) {} func (*jsonErrorPrinter) V(_ string, _ ...interface{}) {} diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 3cc009c15ab..8e0846cc3f5 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -248,13 +248,33 @@ non-JSON messages the command generates. check ----- -The ``check`` command outputs JSON messages with the following format: +The ``check`` command uses the JSON lines format with the following message types. -+------------------+--------------------------------+ -| ``message_type`` | Either "checked" or "error" | -+------------------+--------------------------------+ -| ``message`` | Descriptive message | -+------------------+--------------------------------+ +Status +^^^^^^ + ++--------------------------+------------------------------------------------------------------------------------------------+ +| ``message_type`` | Always "summary" | ++--------------------------+------------------------------------------------------------------------------------------------+ +| ``num_errors`` | Number of errors | ++--------------------------+------------------------------------------------------------------------------------------------+ +| ``broken_packs`` | Run "restic repair packs ID..." and "restic repair snapshots --forget" to remove damaged files | ++--------------------------+------------------------------------------------------------------------------------------------+ +| ``suggest_repair_index`` | Run "restic repair index" | ++--------------------------+------------------------------------------------------------------------------------------------+ +| ``suggest_prune`` | Run "restic prune" | ++--------------------------+------------------------------------------------------------------------------------------------+ + +Error +^^^^^ + +These errors are printed on ``stderr``. + ++----------------------+---------------------------------------------------------------------+ +| ``message_type`` | Always "error" | ++----------------------+---------------------------------------------------------------------+ +| ``message`` | Error message. May change in arbitrary ways across restic versions. | ++----------------------+---------------------------------------------------------------------+ diff From 49a411f7acf479df6a03d4fd2f05bbe9ab803a87 Mon Sep 17 00:00:00 2001 From: Dark Dragon Date: Sun, 2 Feb 2025 17:43:54 +0100 Subject: [PATCH 893/893] Print JSON summary in all error cases --- cmd/restic/cmd_check.go | 40 +++++++++++++----------- cmd/restic/cmd_check_integration_test.go | 3 +- cmd/restic/cmd_migrate.go | 2 +- cmd/restic/cmd_prune_integration_test.go | 6 ++-- cmd/restic/integration_test.go | 3 +- 5 files changed, 31 insertions(+), 23 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 0f710d30e6f..99e933af2f3 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -46,7 +46,14 @@ Exit status is 12 if the password is incorrect. RunE: func(cmd *cobra.Command, args []string) error { term, cancel := setupTermstatus() defer cancel() - return runCheck(cmd.Context(), checkOptions, globalOptions, args, term) + summary, err := runCheck(cmd.Context(), checkOptions, globalOptions, args, term) + if globalOptions.JSON { + if err != nil && summary.NumErrors == 0 { + summary.NumErrors = 1 + } + term.Print(ui.ToJSONString(summary)) + } + return err }, PreRunE: func(_ *cobra.Command, _ []string) error { return checkFlags(checkOptions) @@ -211,10 +218,10 @@ func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions, printer progress return cleanup } -func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args []string, term *termstatus.Terminal) error { +func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args []string, term *termstatus.Terminal) (checkSummary, error) { summary := checkSummary{MessageType: "summary"} if len(args) != 0 { - return errors.Fatal("the check command expects no arguments, only options - please see `restic help check` for usage and flags") + return summary, errors.Fatal("the check command expects no arguments, only options - please see `restic help check` for usage and flags") } var printer progress.Printer @@ -232,21 +239,21 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, gopts.NoLock) if err != nil { - return err + return summary, err } defer unlock() chkr := checker.New(repo, opts.CheckUnused) err = chkr.LoadSnapshots(ctx) if err != nil { - return err + return summary, err } printer.P("load indexes\n") bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) hints, errs := chkr.LoadIndex(ctx, bar) if ctx.Err() != nil { - return ctx.Err() + return summary, ctx.Err() } errorsFound := false @@ -279,7 +286,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args summary.NumErrors += len(errs) summary.HintRepairIndex = true printer.E("\nThe repository index is damaged and must be repaired. You must run `restic repair index' to correct this.\n\n") - return errors.Fatal("repository contains errors") + return summary, errors.Fatal("repository contains errors") } orphanedPacks := 0 @@ -317,7 +324,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } } if ctx.Err() != nil { - return ctx.Err() + return summary, ctx.Err() } printer.P("check snapshots, trees and blobs\n") @@ -351,13 +358,13 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args // deadlocking in the case of errors. wg.Wait() if ctx.Err() != nil { - return ctx.Err() + return summary, ctx.Err() } if opts.CheckUnused { unused, err := chkr.UnusedBlobs(ctx) if err != nil { - return err + return summary, err } for _, id := range unused { printer.P("unused blob %v\n", id) @@ -409,7 +416,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args repoSize += size } if repoSize == 0 { - return errors.Fatal("Cannot read from a repository having size 0") + return summary, errors.Fatal("Cannot read from a repository having size 0") } subsetSize, _ := ui.ParseBytes(opts.ReadDataSubset) if subsetSize > repoSize { @@ -419,7 +426,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args printer.P("read %d bytes of data packs\n", subsetSize) } if packs == nil { - return errors.Fatal("internal error: failed to select packs to check") + return summary, errors.Fatal("internal error: failed to select packs to check") } doReadData(packs) } @@ -434,20 +441,17 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } if ctx.Err() != nil { - return ctx.Err() + return summary, ctx.Err() } - if gopts.JSON { - term.Print(ui.ToJSONString(summary)) - } if errorsFound { if len(salvagePacks) == 0 { printer.E("\nThe repository is damaged and must be repaired. Please follow the troubleshooting guide at https://restic.readthedocs.io/en/stable/077_troubleshooting.html .\n\n") } - return errors.Fatal("repository contains errors") + return summary, errors.Fatal("repository contains errors") } printer.P("no errors were found\n") - return nil + return summary, nil } // selectPacksByBucket selects subsets of packs by ranges of buckets. diff --git a/cmd/restic/cmd_check_integration_test.go b/cmd/restic/cmd_check_integration_test.go index f1e6517e093..f5a3dc39550 100644 --- a/cmd/restic/cmd_check_integration_test.go +++ b/cmd/restic/cmd_check_integration_test.go @@ -32,7 +32,8 @@ func testRunCheckOutput(gopts GlobalOptions, checkUnused bool) (string, error) { ReadData: true, CheckUnused: checkUnused, } - return runCheck(context.TODO(), opts, gopts, nil, term) + _, err := runCheck(context.TODO(), opts, gopts, nil, term) + return err }) return buf.String(), err } diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index 5c3e425edfc..f6c28e383d5 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -105,7 +105,7 @@ func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptio // the repository is already locked checkGopts.NoLock = true - err = runCheck(ctx, checkOptions, checkGopts, []string{}, term) + _, err = runCheck(ctx, checkOptions, checkGopts, []string{}, term) if err != nil { return err } diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index 536ec40d886..0561f82430f 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -112,7 +112,8 @@ func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) { createPrunableRepo(t, env) testRunPrune(t, env.gopts, pruneOpts) rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { - return runCheck(context.TODO(), checkOpts, env.gopts, nil, term) + _, err := runCheck(context.TODO(), checkOpts, env.gopts, nil, term) + return err })) } @@ -220,7 +221,8 @@ func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, o testRunCheck(t, env.gopts) } else { rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { - return runCheck(context.TODO(), optionsCheck, env.gopts, nil, term) + _, err := runCheck(context.TODO(), optionsCheck, env.gopts, nil, term) + return err }) != nil, "check should have reported an error") } diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 777573f263a..3ef98a16866 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -88,7 +88,8 @@ func TestListOnce(t *testing.T) { createPrunableRepo(t, env) testRunPrune(t, env.gopts, pruneOpts) rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { - return runCheck(context.TODO(), checkOpts, env.gopts, nil, term) + _, err := runCheck(context.TODO(), checkOpts, env.gopts, nil, term) + return err })) rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term)