diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java index 489bc1966e8c7..07356875120c5 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java @@ -116,22 +116,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL; import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL_DEFAULT; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_STANDARD_OPTIONS; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CALL_APPEND; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CALL_CREATE; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CALL_CREATE_NON_RECURSIVE; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CALL_DELETE; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CALL_EXIST; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CALL_GET_DELEGATION_TOKEN; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CALL_GET_FILE_STATUS; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CALL_LIST_STATUS; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CALL_MKDIRS; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CALL_OPEN; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CALL_RENAME; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.DIRECTORIES_CREATED; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.DIRECTORIES_DELETED; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.ERROR_IGNORED; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.FILES_CREATED; -import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.FILES_DELETED; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.*; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.CPK_IN_NON_HNS_ACCOUNT_ERROR_MESSAGE; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.DATA_BLOCKS_BUFFER; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_IS_HNS_ENABLED; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index c69bfadc85d96..3a59630592eaf 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -21,8 +21,8 @@ import java.io.File; import java.io.IOException; import java.io.OutputStream; -import java.lang.reflect.InvocationTargetException; import java.io.UnsupportedEncodingException; +import java.lang.reflect.InvocationTargetException; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URI; @@ -51,36 +51,22 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.fs.azurebfs.constants.AbfsServiceType; -import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; -import org.apache.hadoop.fs.azurebfs.extensions.EncryptionContextProvider; -import org.apache.hadoop.fs.azurebfs.security.ContextProviderEncryptionAdapter; -import org.apache.hadoop.fs.azurebfs.security.ContextEncryptionAdapter; -import org.apache.hadoop.fs.azurebfs.security.NoContextEncryptionAdapter; -import org.apache.hadoop.fs.azurebfs.services.AbfsClientHandler; -import org.apache.hadoop.fs.azurebfs.utils.EncryptionType; -import org.apache.hadoop.fs.impl.BackReference; -import org.apache.hadoop.fs.PathIOException; - -import org.apache.hadoop.util.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.base.Strings; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.EtagSource; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; -import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes; +import org.apache.hadoop.fs.azurebfs.constants.AbfsServiceType; import org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations; +import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes; import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; @@ -90,44 +76,52 @@ import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidFileSystemPropertyException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidUriAuthorityException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidUriException; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.TrileanConversionException; import org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode; import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultEntrySchema; import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultSchema; -import org.apache.hadoop.fs.azurebfs.contracts.exceptions.TrileanConversionException; import org.apache.hadoop.fs.azurebfs.enums.Trilean; -import org.apache.hadoop.fs.azurebfs.extensions.SASTokenProvider; +import org.apache.hadoop.fs.azurebfs.extensions.EncryptionContextProvider; import org.apache.hadoop.fs.azurebfs.extensions.ExtensionHelper; +import org.apache.hadoop.fs.azurebfs.extensions.SASTokenProvider; import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider; import org.apache.hadoop.fs.azurebfs.oauth2.AzureADAuthenticator; import org.apache.hadoop.fs.azurebfs.oauth2.IdentityTransformer; import org.apache.hadoop.fs.azurebfs.oauth2.IdentityTransformerInterface; +import org.apache.hadoop.fs.azurebfs.security.ContextEncryptionAdapter; +import org.apache.hadoop.fs.azurebfs.security.ContextProviderEncryptionAdapter; +import org.apache.hadoop.fs.azurebfs.security.NoContextEncryptionAdapter; import org.apache.hadoop.fs.azurebfs.services.AbfsAclHelper; import org.apache.hadoop.fs.azurebfs.services.AbfsClient; import org.apache.hadoop.fs.azurebfs.services.AbfsClientContext; import org.apache.hadoop.fs.azurebfs.services.AbfsClientContextBuilder; +import org.apache.hadoop.fs.azurebfs.services.AbfsClientHandler; import org.apache.hadoop.fs.azurebfs.services.AbfsClientRenameResult; import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; +import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; import org.apache.hadoop.fs.azurebfs.services.AbfsInputStream; import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamContext; import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamStatisticsImpl; +import org.apache.hadoop.fs.azurebfs.services.AbfsLease; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStreamContext; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStreamStatisticsImpl; +import org.apache.hadoop.fs.azurebfs.services.AbfsPerfInfo; +import org.apache.hadoop.fs.azurebfs.services.AbfsPerfTracker; import org.apache.hadoop.fs.azurebfs.services.AbfsPermission; import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; import org.apache.hadoop.fs.azurebfs.services.AuthType; import org.apache.hadoop.fs.azurebfs.services.ExponentialRetryPolicy; -import org.apache.hadoop.fs.azurebfs.services.StaticRetryPolicy; -import org.apache.hadoop.fs.azurebfs.services.AbfsLease; -import org.apache.hadoop.fs.azurebfs.services.SharedKeyCredentials; -import org.apache.hadoop.fs.azurebfs.services.AbfsPerfTracker; -import org.apache.hadoop.fs.azurebfs.services.AbfsPerfInfo; import org.apache.hadoop.fs.azurebfs.services.ListingSupport; +import org.apache.hadoop.fs.azurebfs.services.SharedKeyCredentials; +import org.apache.hadoop.fs.azurebfs.services.StaticRetryPolicy; import org.apache.hadoop.fs.azurebfs.utils.Base64; import org.apache.hadoop.fs.azurebfs.utils.CRC64; import org.apache.hadoop.fs.azurebfs.utils.DateTimeUtils; +import org.apache.hadoop.fs.azurebfs.utils.EncryptionType; import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.apache.hadoop.fs.azurebfs.utils.UriUtils; +import org.apache.hadoop.fs.impl.BackReference; import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; @@ -136,7 +130,11 @@ import org.apache.hadoop.fs.store.DataBlocks; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.SemaphoredDelegatingExecutor; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.http.client.utils.URIBuilder; @@ -699,7 +697,7 @@ public OutputStream createFile(final Path path, if (getClient().getEncryptionType() == EncryptionType.ENCRYPTION_CONTEXT) { contextEncryptionAdapter = new ContextProviderEncryptionAdapter( - getClient().getEncryptionContextProvider(), getRelativePath(path)); + createClient.getEncryptionContextProvider(), getRelativePath(path)); } else { contextEncryptionAdapter = NoContextEncryptionAdapter.getInstance(); } @@ -714,7 +712,7 @@ public OutputStream createFile(final Path path, ); } else { - op = getClient().createPath(relativePath, true, + op = createClient.createPath(relativePath, true, overwrite, new Permissions(isNamespaceEnabled, permission, umask), isAppendBlob, @@ -763,7 +761,7 @@ private AbfsRestOperation conditionalCreateOverwriteFile(final String relativePa // Trigger a create with overwrite=false first so that eTag fetch can be // avoided for cases when no pre-existing file is present (major portion // of create file traffic falls into the case of no pre-existing file). - op = getClient().createPath(relativePath, true, false, permissions, + op = createClient.createPath(relativePath, true, false, permissions, isAppendBlob, null, contextEncryptionAdapter, tracingContext); } catch (AbfsRestOperationException e) { @@ -787,7 +785,7 @@ private AbfsRestOperation conditionalCreateOverwriteFile(final String relativePa try { // overwrite only if eTag matches with the file properties fetched befpre - op = getClient().createPath(relativePath, true, true, permissions, + op = createClient.createPath(relativePath, true, true, permissions, isAppendBlob, eTag, contextEncryptionAdapter, tracingContext); } catch (AbfsRestOperationException ex) { if (ex.getStatusCode() == HttpURLConnection.HTTP_PRECON_FAILED) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java index 7046aecb2982b..3d14ec848df9d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java @@ -325,10 +325,5 @@ public static ApiVersion getCurrentVersion() { public static final String COPY_STATUS_ABORTED = "aborted"; public static final String COPY_STATUS_FAILED = "failed"; - public static final String COPY_STATUS_SUCCESS = "success"; - public static final String COPY_STATUS_PENDING = "pending"; - public static final String COPY_STATUS_ABORTED = "aborted"; - public static final String COPY_STATUS_FAILED = "failed"; - private AbfsHttpConstants() {} } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java index e0167855938c4..e5909d22f48c6 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java @@ -109,6 +109,11 @@ public final class HttpHeaderConfigurations { * {@value} */ public static final String X_MS_BLOB_TYPE = "x-ms-blob-type"; + + /** + * Http Request Header for copy id. + * {@value} + */ public static final String X_MS_COPY_ID = "x-ms-copy-id"; /** diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsErrors.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsErrors.java index 40601e773afe6..a5864290b139c 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsErrors.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsErrors.java @@ -62,8 +62,10 @@ public final class AbfsErrors { * Exception message on filesystem init if token-provider-auth-type configs are provided */ public static final String UNAUTHORIZED_SAS = "Incorrect SAS token provider configured for non-hierarchical namespace account."; - public static final String ERR_RENAME_BLOB = "FNS-Blob rename was not successful for source and destination path: "; - public static final String ERR_DELETE_BLOB = "FNS-Blob delete was not successful for path: "; + public static final String ERR_RENAME_BLOB = + "FNS-Blob rename was not successful for source and destination path: "; + public static final String ERR_DELETE_BLOB = + "FNS-Blob delete was not successful for path: "; public static final String ATOMIC_DIR_RENAME_RECOVERY_ON_GET_PATH_EXCEPTION = "Path had to be recovered from atomic rename operation."; private AbfsErrors() {} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/UriUtils.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/UriUtils.java index 3ba8565b88c5f..959822a1fbc86 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/UriUtils.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/UriUtils.java @@ -58,24 +58,15 @@ public final class UriUtils { private static final Logger LOG = LoggerFactory.getLogger( UriUtils.class); - private static final String ABFS_URI_REGEX - = "[^.]+\\.dfs\\.(preprod\\.){0,1}core\\.windows\\.net"; - - private static final Pattern ABFS_URI_PATTERN = Pattern.compile( - ABFS_URI_REGEX); - + private static final String ABFS_URI_REGEX = "[^.]+\\.dfs\\.(preprod\\.){0,1}core\\.windows\\.net"; + private static final Pattern ABFS_URI_PATTERN = Pattern.compile(ABFS_URI_REGEX); private static final Set FULL_MASK_PARAM_KEYS = new HashSet<>( Collections.singleton(QUERY_PARAM_SIGNATURE)); - private static final Set PARTIAL_MASK_PARAM_KEYS = new HashSet<>( Arrays.asList(QUERY_PARAM_SKOID, QUERY_PARAM_SAOID, QUERY_PARAM_SUOID)); - private static final Character CHAR_MASK = 'X'; - private static final String FULL_MASK = "XXXXX"; - private static final int DEFAULT_QUERY_STRINGBUILDER_CAPACITY = 550; - private static final int PARTIAL_MASK_VISIBLE_LEN = 18; /** @@ -121,9 +112,7 @@ public static String extractAccountNameFromHostName(final String hostName) { */ public static String generateUniqueTestPath() { String testUniqueForkId = System.getProperty("test.unique.fork.id"); - return testUniqueForkId == null - ? "/test" - : "/" + testUniqueForkId + "/test"; + return testUniqueForkId == null ? "/test" : "/" + testUniqueForkId + "/test"; } public static String maskUrlQueryParameters(List keyValueList, @@ -150,8 +139,7 @@ public static String maskUrlQueryParameters(List keyValueList, for (NameValuePair keyValuePair : keyValueList) { String key = keyValuePair.getName(); if (key.isEmpty()) { - throw new IllegalArgumentException( - "Query param key should not be empty"); + throw new IllegalArgumentException("Query param key should not be empty"); } String value = keyValuePair.getValue(); maskedUrl.append(key); @@ -204,8 +192,7 @@ public static String getMaskedUrl(URL url) { */ public static URL changeUrlFromBlobToDfs(URL url) throws InvalidUriException { try { - url = new URL(replacedUrl(url.toString(), ABFS_BLOB_DOMAIN_NAME, - ABFS_DFS_DOMAIN_NAME)); + url = new URL(replacedUrl(url.toString(), ABFS_BLOB_DOMAIN_NAME, ABFS_DFS_DOMAIN_NAME)); } catch (MalformedURLException ex) { throw new InvalidUriException(url.toString()); } @@ -221,8 +208,7 @@ public static URL changeUrlFromBlobToDfs(URL url) throws InvalidUriException { */ public static URL changeUrlFromDfsToBlob(URL url) throws InvalidUriException { try { - url = new URL(replacedUrl(url.toString(), ABFS_DFS_DOMAIN_NAME, - ABFS_BLOB_DOMAIN_NAME)); + url = new URL(replacedUrl(url.toString(), ABFS_DFS_DOMAIN_NAME, ABFS_BLOB_DOMAIN_NAME)); } catch (MalformedURLException ex) { throw new InvalidUriException(url.toString()); } @@ -238,9 +224,7 @@ public static URL changeUrlFromDfsToBlob(URL url) throws InvalidUriException { * @param newString the string to be replaced with. * @return updated URL */ - private static String replacedUrl(String baseUrl, - String oldString, - String newString) { + private static String replacedUrl(String baseUrl, String oldString, String newString) { int startIndex = baseUrl.toString().indexOf("//") + 2; int endIndex = baseUrl.toString().indexOf("/", startIndex); if (oldString == null || newString == null || startIndex < 0 diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java index 363151bad2881..2cc5046fd14fc 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java @@ -29,49 +29,47 @@ import java.util.concurrent.Future; import org.assertj.core.api.Assertions; +import org.junit.Assume; import org.junit.Test; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; +import org.apache.hadoop.fs.azurebfs.contracts.services.StorageErrorResponseSchema; import org.apache.hadoop.fs.azurebfs.security.ContextEncryptionAdapter; import org.apache.hadoop.fs.azurebfs.services.AbfsBlobClient; -import org.apache.hadoop.fs.azurebfs.services.AbfsDfsClient; -import org.apache.hadoop.fs.azurebfs.contracts.services.StorageErrorResponseSchema; import org.apache.hadoop.fs.azurebfs.services.AbfsClient; import org.apache.hadoop.fs.azurebfs.services.AbfsClientTestUtil; -import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; +import org.apache.hadoop.fs.azurebfs.services.AbfsDfsClient; import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; import org.apache.hadoop.fs.azurebfs.services.ITestAbfsClient; import org.apache.hadoop.fs.azurebfs.services.TestAbfsPerfTracker; import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.test.ReflectionUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.test.ReflectionUtils; import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; +import static java.net.HttpURLConnection.HTTP_FORBIDDEN; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; import static java.net.HttpURLConnection.HTTP_OK; -import static java.net.HttpURLConnection.HTTP_FORBIDDEN; - -import static org.junit.Assume.assumeTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doCallRealMethod; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.ROOT_PATH; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.HTTP_METHOD_DELETE; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.ROOT_PATH; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_DELETE_CONSIDERED_IDEMPOTENT; import static org.apache.hadoop.fs.azurebfs.services.AbfsRestOperationType.DeletePath; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertDeleted; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doCallRealMethod; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Test delete operation. @@ -180,7 +178,7 @@ public Void call() throws Exception { @Test public void testDeleteIdempotency() throws Exception { - assumeTrue(DEFAULT_DELETE_CONSIDERED_IDEMPOTENT); + Assume.assumeTrue(DEFAULT_DELETE_CONSIDERED_IDEMPOTENT); // Config to reduce the retry and maxBackoff time for test run AbfsConfiguration abfsConfig = TestAbfsConfigurationFieldsValidation.updateRetryConfigs( @@ -242,7 +240,7 @@ public void testDeleteIdempotencyTriggerHttp404() throws Exception { * endpoint, the orchestration would be done by the client. The idempotency * issue would not happen for blob endpoint. */ - assumeTrue(fs.getAbfsClient() instanceof AbfsDfsClient); + Assume.assumeTrue(fs.getAbfsClient() instanceof AbfsDfsClient); AbfsClient client = ITestAbfsClient.createTestClientFromCurrentContext( fs.getAbfsStore().getClient(), this.getConfiguration()); @@ -355,7 +353,7 @@ public void deleteBlobDirParallelThreadToDeleteOnDifferentTracingContext() * @throws IOException if the file system client cannot be retrieved */ private void assumeBlobClient() throws IOException { - assumeTrue(getFileSystem().getAbfsClient() instanceof AbfsBlobClient); + Assume.assumeTrue(getFileSystem().getAbfsClient() instanceof AbfsBlobClient); } /**