Skip to content

Commit

Permalink
Merge pull request #4 from Netcracker/feature/unique_names
Browse files Browse the repository at this point in the history
Unique prefix ability for backup names
  • Loading branch information
mrMigles authored Dec 20, 2024
2 parents 4826914 + 4b32b53 commit 784cd6a
Show file tree
Hide file tree
Showing 6 changed files with 44 additions and 15 deletions.
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ The config file parameters are given below.
| s3_ssl_verify | | Whether or not to verify SSL certificates for S3 connections | S3_SSL_VERIFY |
| s3_certs_path | `""` | Path to folder with TLS certificates for S3 connections, only takes effect if `s3_ssl_verify` is `true`. Value `""` means that boto3 default certificates will be used | S3_CERTS_PATH |
| tls_enabled | `false` | Whether TLS is enabled | TLS_ENABLED |
| allow_prefix | | Allow specify additional prefix for granular backups | ALLOW_PREFIX |
| certs_path | `/tls/` | Path to folder with TLS certificates | CERTS_PATH |
| logs_to_stdout | `false` | Prints logs from .console and ${restore-id}.log to stdout (pod logs) | LOGS_TO_STDOUT |
<!-- markdownlint-enable line-length -->
Expand Down Expand Up @@ -235,6 +236,15 @@ For DBs with collections and queries use the following command:
curl -XPOST -u username:password -v -H "Content-Type: application/json" -d '{"dbs":["db_name1",{"db_name2":{"collections":["first",{"second":{"test1":"1"}}]}]}' localhost:8080/backup
```

It is possible to add custom prefix for folder (vault):
***NOTE***: to make it possible you have to specify ENV `ALLOW_PREFIX:true`
```bash
curl -XPOST -u username:password -v -H "Content-Type: application/json" -d '{"dbs":["db_name1","db_name2"], "prefix":"custom"}' localhost:8080/backup
```

Returned vault name will be like `<prefix>_<namespace>_<timestamp>`.
So `<prefix>` is optional, but `<namespace>` addition is default for `ALLOW_PREFIX:true` (namespace will be picked from `WATCH_NAMESPACE` ENV)

#### Run Manual Backup That Will Not Be Deleted Ever

If you do not want your backup to be evicted, add `allow_eviction":"False"` to your request. It works both for full
Expand Down
1 change: 1 addition & 0 deletions src/backup-daemon.conf
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@
custom_vars: []
publish_custom_vars: "false"

allow_prefix: ${?ALLOW_PREFIX}
log {
level: INFO
level: ${?LOG_LEVEL}
Expand Down
13 changes: 7 additions & 6 deletions src/backup-daemon.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,11 @@ def __init__(self, proc_config, proc_type):
self.s3Client = storage.S3Client(proc_config['s3_url'], proc_config['s3_bucket'], proc_config['s3_key_id'],
proc_config['s3_key_secret'], ssl_verify)
self.storage = storage.Storage(proc_config['storage_root'], proc_config['external_storage_root'],
file_system=storage.S3FileSystem(self.s3Client))
file_system=storage.S3FileSystem(self.s3Client), allow_prefix=proc_config['allow_prefix'])
self.s3_enabled = True
else:
self.storage = storage.Storage(
proc_config['storage_root'], proc_config['external_storage_root'])
proc_config['storage_root'], proc_config['external_storage_root'], allow_prefix=proc_config['allow_prefix'])

self.external_storage = proc_config['external_storage_root']
self.db = db.DB(self.storage.root + "/backup.sqlite")
Expand Down Expand Up @@ -114,7 +114,7 @@ def terminate_process_by_pid(self, pid):
except Exception as e:
log.error(f"Can't terminate pid {pid}, {e}")

def enqueue_backup(self, reason, custom_variables, allow_eviction=True, dbs=None, sharded=False, backup_path=None):
def enqueue_backup(self, reason, custom_variables, allow_eviction=True, dbs=None, sharded=False, backup_path=None, backup_prefix=None):
if dbs:
is_granular = True
else:
Expand All @@ -123,7 +123,7 @@ def enqueue_backup(self, reason, custom_variables, allow_eviction=True, dbs=None
if backup_path is None:
vault = self.storage.open_vault(vault_name=backup_path, allow_eviction=allow_eviction,
is_granular=is_granular,
is_sharded=sharded, is_external=False)
is_sharded=sharded, is_external=False, vault_path=backup_path, backup_prefix=backup_prefix)
else:
vault = self.storage.open_vault(allow_eviction=allow_eviction,
is_granular=False,
Expand Down Expand Up @@ -584,7 +584,7 @@ def get_processor(self, _type):
else:
return self.backup_processor

def enqueue_backup(self, reason, custom_variables, allow_eviction=True, dbs=None, proc_type=FULL, sharded=False, backup_path=None):
def enqueue_backup(self, reason, custom_variables, allow_eviction=True, dbs=None, proc_type=FULL, sharded=False, backup_path=None, backup_prefix=None):
processor = self.get_processor(proc_type)
dir_type = None
if dbs and backup_path is None:
Expand Down Expand Up @@ -614,7 +614,7 @@ def enqueue_backup(self, reason, custom_variables, allow_eviction=True, dbs=None
"Existing backups not found. Previous full or incremental backups must exist"
" before doing incremental backup")

return processor.enqueue_backup(reason, custom_variables, allow_eviction, dbs, sharded, backup_path)
return processor.enqueue_backup(reason, custom_variables, allow_eviction, dbs, sharded, backup_path, backup_prefix)

def enqueue_eviction(self, proc_type=FULL):
processor = self.get_processor(proc_type)
Expand Down Expand Up @@ -669,6 +669,7 @@ def fetch_config(config_type='full'):
'granular_eviction_policy': conf.get_string(config_prefix + 'granular_eviction'),
'incremental_enabled': conf.get_string('incremental_enabled'),
'termination_cmd': conf.get_string('termination_command', default=''),
'allow_prefix': conf.get_string('allow_prefix', default='')
}
if len(dict["scheduled_dbs"]) > 0:
dict["scheduled_dbs"] = dict["scheduled_dbs"].split(",")
Expand Down
3 changes: 2 additions & 1 deletion src/httpapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,10 +246,11 @@ def post(self):
proc_type = req_helper.get_proc_type()
sharded = req_helper.get_sharded()
backup_path = req_helper.get_backup_path()
backup_prefix = req_helper.get_backup_prefix()

try:
backup_id = backupExecutor.enqueue_backup("http call", custom_variables, allow_eviction,
dbs, proc_type, sharded, backup_path)
dbs, proc_type, sharded, backup_path, backup_prefix)
return Response(backup_id, mimetype="application/json")
except IllegalStateException as ise:
error_msg = "Error during backup process: %s" % ise.message
Expand Down
8 changes: 7 additions & 1 deletion src/request.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

safe_args = re.compile('[0-9a-z-_]', re.I)
log = logging.getLogger("RequestHandler")
allowed_api_keys = ["allow_eviction", "args", "dbs", "vault", "ts", "changeDbNames", "sharded", "externalBackupPath"]
allowed_api_keys = ["allow_eviction", "args", "dbs", "vault", "ts", "changeDbNames", "sharded", "externalBackupPath", "prefix"]
allowed_extensions = ["zip", "tgz"]


Expand Down Expand Up @@ -82,6 +82,12 @@ def get_backup_path(self):
else:
return None

def get_backup_prefix(self):
content = self.__content
if 'prefix' in content:
return content['prefix']
return None

def get_external(self):
content = self.__content

Expand Down
24 changes: 17 additions & 7 deletions src/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,13 +285,15 @@ def get_type(self):
class Storage(object):
__log = logging.getLogger("Storage")

def __init__(self, root, external_root=None, file_system=FileSystem()):
def __init__(self, root, external_root=None, file_system=FileSystem(), allow_prefix=False):
self.__log.info("Init storage object with storage root: %s external root: %s" % (root, external_root))
self.root = root
self.namespace = os.getenv("WATCH_NAMESPACE", "")
self.file_system = file_system
self.granular_folder = self.root + "/" + GRANULAR
self.restore_logs_folder = self.root + "/restore_logs"
self.s3_enabled = isinstance(file_system, S3FileSystem)
self.allow_prefix = allow_prefix

if external_root is not None:
self.external_root = external_root
Expand All @@ -301,6 +303,15 @@ def __init__(self, root, external_root=None, file_system=FileSystem()):
if not file_system.exists(self.restore_logs_folder):
self.file_system.makedirs(self.restore_logs_folder)

def get_vault_name(self, prefix, is_granular):
if not is_granular or not self.namespace or not self.allow_prefix:
return datetime.now().strftime(VAULT_NAME_FORMAT)
vault_name = ""
if prefix:
vault_name += prefix + "_"
vault_name += self.namespace + "_" + datetime.now().strftime(VAULT_NAME_FORMAT)
return vault_name

def get_nonevictable_vaults(self):
return [vault for vault in self.list() if vault.is_nonevictable()]

Expand Down Expand Up @@ -350,7 +361,7 @@ def list(self, timestamps_only=False, convert_to_ts=False, type="all", storage_p
vaults = [
self.get_vault(dirname, storage_path is not None, storage_path, skip_fs_check=True)
for dirname in dirs
if VAULT_DIRNAME_MATCHER.match(dirname.replace(GRANULAR + '/', '')) is not None
if VAULT_DIRNAME_MATCHER.match(dirname.split("_")[-1:][0].replace(GRANULAR + '/', '')) is not None
]

if type == SHARDED:
Expand Down Expand Up @@ -398,7 +409,7 @@ def fs_space(self):
return fsutil.get_fs_space(self.root)

def open_vault(self, vault_name=None, allow_eviction=True, is_granular=False, is_sharded=False,
is_external=False, vault_path=None):
is_external=False, vault_path=None, backup_prefix=None):
vault = self.get_vault(vault_name, is_external, vault_path)
self.__log.info(vault)
if vault is not None:
Expand All @@ -416,13 +427,13 @@ def open_vault(self, vault_name=None, allow_eviction=True, is_granular=False, is
folder = self.external_root + "/" + vault_path
if not vault_name:
try:
result = Vault("%s/%s" % (folder, datetime.now().strftime(VAULT_NAME_FORMAT)),
result = Vault("%s/%s" % (folder, self.get_vault_name(backup_prefix, is_granular)),
allow_eviction, is_sharded, file_system=self.file_system)
except StorageLocationAlreadyExistsException as e:
# sleep 2 secs if vault exists
time.sleep(2)
try:
result = Vault("%s/%s" % (folder, datetime.now().strftime(VAULT_NAME_FORMAT)),
result = Vault("%s/%s" % (folder, self.get_vault_name(backup_prefix, is_granular)),
allow_eviction, is_sharded, file_system=self.file_system)
except StorageLocationAlreadyExistsException as e:
logging.error(str(e))
Expand Down Expand Up @@ -463,7 +474,6 @@ def prot_get_as_stream(self, backup_id, archive_file):
@total_ordering
class Vault(object):
__log = logging.getLogger("VaultLock")

def __init__(self, folder, allow_eviction=True, sharded=False, external=False,
file_system=FileSystem()):
self.folder = folder
Expand Down Expand Up @@ -569,7 +579,7 @@ def __enter__(self):
def create_time(self):
try:
folder_name = self.get_name()
d = datetime.strptime(folder_name, VAULT_NAME_FORMAT)
d = datetime.strptime(folder_name.split("_")[-1:][0], VAULT_NAME_FORMAT)
return time.mktime(d.timetuple())
except ValueError:
self.__log.warning(
Expand Down

0 comments on commit 784cd6a

Please sign in to comment.