diff --git a/.github/workflows/ci-integration-tests.yml b/.github/workflows/ci-integration-tests.yml new file mode 100644 index 00000000..a3dcf297 --- /dev/null +++ b/.github/workflows/ci-integration-tests.yml @@ -0,0 +1,31 @@ +name: Integration tests +run-name: Integration tests for ${{ github.ref }} + +on: + pull_request: + branches: + - 'main' + merge_group: + types: [checks_requested] + +env: + AWS_REGION: "us-east-1" + +jobs: + ci-integration-tests: + permissions: + id-token: write # This is required for requesting the JWT with configure-aws-credentials + contents: read # This is required for actions/checkout + runs-on: ubuntu-latest + steps: + - name: Checkout Source Code + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3 + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@5fd3084fc36e372ff1fff382a39b10d03659f355 # v2 + with: + role-to-assume: ${{ vars.AMPLIFY_UI_ANDROID_CI_TESTS_ROLE }} + aws-region: ${{ env.AWS_REGION }} + - name: Run Integration Tests + uses: aws-actions/aws-codebuild-run-build@f202c327329cbbebd13f986f74af162a8539b5fd # v1 + with: + project-name: Amplify-UI-Android-Integration-Test diff --git a/authenticator/src/test/java/com/amplifyframework/ui/authenticator/AuthenticatorViewModelTest.kt b/authenticator/src/test/java/com/amplifyframework/ui/authenticator/AuthenticatorViewModelTest.kt index 45cd2d8f..016fa2d4 100644 --- a/authenticator/src/test/java/com/amplifyframework/ui/authenticator/AuthenticatorViewModelTest.kt +++ b/authenticator/src/test/java/com/amplifyframework/ui/authenticator/AuthenticatorViewModelTest.kt @@ -57,6 +57,7 @@ class AuthenticatorViewModelTest { @Test fun `start only executes once`() = runTest { + coEvery { authProvider.fetchAuthSession() } returns AmplifyResult.Error(mockAuthException()) viewModel.start(mockAuthConfiguration()) viewModel.start(mockAuthConfiguration()) advanceUntilIdle() diff --git a/build-logic/plugins/src/main/kotlin/AndroidLibraryConventionPlugin.kt b/build-logic/plugins/src/main/kotlin/AndroidLibraryConventionPlugin.kt index c97afd55..d59295c0 100644 --- a/build-logic/plugins/src/main/kotlin/AndroidLibraryConventionPlugin.kt +++ b/build-logic/plugins/src/main/kotlin/AndroidLibraryConventionPlugin.kt @@ -113,8 +113,11 @@ class AndroidLibraryConventionPlugin : Plugin { // Needed when running integration tests. The oauth2 library uses relies on two // dependencies (Apache's httpcore and httpclient), both of which include // META-INF/DEPENDENCIES. Tried a couple other options to no avail. + // More collisions occurred using JUnit, so also adding LICENSE-*.md files. packaging { resources.excludes.add("META-INF/DEPENDENCIES") + resources.excludes.add("META-INF/LICENSE.md") + resources.excludes.add("META-INF/LICENSE-notice.md") } buildFeatures { diff --git a/build.gradle.kts b/build.gradle.kts index 2c05cb5f..fb92cf53 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -79,3 +79,5 @@ dependencies { kover(project(":authenticator")) kover(project(":liveness")) } + +apply(from = rootProject.file("configuration/instrumentation-tests.gradle")) diff --git a/configuration/instrumentation-tests.gradle b/configuration/instrumentation-tests.gradle new file mode 100644 index 00000000..b8d9bec9 --- /dev/null +++ b/configuration/instrumentation-tests.gradle @@ -0,0 +1,45 @@ +/* +Need to create backends for s3, pinpoint, predictions, core + */ +def module_backends = [ + 'liveness' : 'LivenessIntegTests', +] + +def canaryTests = [ +// 'liveness' : [''], +] + +subprojects { + afterEvaluate { project -> + if (module_backends.containsKey(project.name)) { + task runTestsInDeviceFarm { + doLast { + exec { + commandLine("$rootDir.path/scripts/run_test_in_devicefarm.sh") + args([project.name]) + } + } + } +// task runNightlyTestsInDeviceFarmPool { +// dependsOn(assembleAndroidTest) +// doLast { +// exec { +// commandLine("$rootDir.path/scripts/run_nightly_tests_in_devicefarm_pool.sh") +// args([project.name]) +// } +// } +// } +// task runCanaryInDeviceFarm { +// dependsOn(assembleAndroidTest) +// doLast { +// for (canary in canaryTests[project.name]) { +// exec { +// commandLine("$rootDir.path/scripts/run_canary_in_devicefarm.sh") +// args(project.name, canary) +// } +// } +// } +// } + } + } +} diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 74260867..4bffb3a4 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -46,15 +46,19 @@ zxing = { module = "com.google.zxing:core", version.ref = "zxing" } # Testing libraries test-androidx-junit = "androidx.test.ext:junit:1.1.4" +test-androidx-monitor = "androidx.test:monitor:1.5.0" +test-androidx-rules = "androidx.test:rules:1.5.0" test-compose-junit = { module = "androidx.compose.ui:ui-test-junit4", version.ref = "compose" } test-compose-manifest = { module = "androidx.compose.ui:ui-test-manifest", version.ref = "compose" } test-coroutines = { module = "org.jetbrains.kotlinx:kotlinx-coroutines-test", version.ref = "coroutines" } test-espresso = "androidx.test.espresso:espresso-core:3.5.1" test-junit = "junit:junit:4.13.2" test-kotest-assertions = { module = "io.kotest:kotest-assertions-core", version.ref = "kotest" } -test-mockk = "io.mockk:mockk:1.13.4" +test-mockk = "io.mockk:mockk:1.13.8" +test-mockk-android = "io.mockk:mockk-android:1.13.8" test-robolectric = "org.robolectric:robolectric:4.9.2" test-turbine = { module = "app.cash.turbine:turbine", version.ref = "turbine" } +debug-ui-test-manifest = "androidx.compose.ui:ui-test-manifest:1.5.0-beta01" # Dependencies for convention plugins plugin-android-gradle = { module = "com.android.tools.build:gradle", version.ref = "agp" } diff --git a/liveness/build.gradle.kts b/liveness/build.gradle.kts index 76dd1647..25417ecc 100644 --- a/liveness/build.gradle.kts +++ b/liveness/build.gradle.kts @@ -33,6 +33,11 @@ android { androidResources { noCompress += "tflite" } + + packagingOptions { + resources.excludes.add("META-INF/LICENSE.md") + resources.excludes.add("META-INF/LICENSE-notice.md") + } } dependencies { @@ -52,4 +57,11 @@ dependencies { implementation(libs.tensorflow.support) testImplementation(projects.testing) + androidTestImplementation(libs.amplify.auth) + androidTestImplementation(libs.test.compose.junit) + androidTestImplementation(libs.test.androidx.monitor) + androidTestImplementation(libs.test.androidx.rules) + androidTestImplementation(libs.test.junit) + androidTestImplementation(libs.test.mockk.android) + debugImplementation(libs.debug.ui.test.manifest) } diff --git a/liveness/src/androidTest/java/com/amplifyframework/ui/liveness/LivenessFlowInstrumentationTest.kt b/liveness/src/androidTest/java/com/amplifyframework/ui/liveness/LivenessFlowInstrumentationTest.kt new file mode 100644 index 00000000..1bcce84c --- /dev/null +++ b/liveness/src/androidTest/java/com/amplifyframework/ui/liveness/LivenessFlowInstrumentationTest.kt @@ -0,0 +1,457 @@ +package com.amplifyframework.ui.liveness + +import android.Manifest +import android.content.Context +import android.graphics.RectF +import androidx.compose.ui.test.assertIsDisplayed +import androidx.compose.ui.test.junit4.createComposeRule +import androidx.compose.ui.test.onAllNodesWithText +import androidx.compose.ui.test.onNodeWithText +import androidx.compose.ui.test.performClick +import androidx.test.platform.app.InstrumentationRegistry +import androidx.test.rule.GrantPermissionRule +import com.amplifyframework.auth.AWSCredentials +import com.amplifyframework.auth.AWSCredentialsProvider +import com.amplifyframework.auth.AuthException +import com.amplifyframework.auth.AuthSession +import com.amplifyframework.auth.cognito.AWSCognitoAuthPlugin +import com.amplifyframework.core.Action +import com.amplifyframework.core.Amplify +import com.amplifyframework.core.Consumer +import com.amplifyframework.predictions.aws.AWSPredictionsPlugin +import com.amplifyframework.predictions.aws.models.ColorChallenge +import com.amplifyframework.predictions.aws.models.ColorChallengeType +import com.amplifyframework.predictions.aws.models.ColorDisplayInformation +import com.amplifyframework.predictions.aws.models.FaceTargetChallenge +import com.amplifyframework.predictions.aws.models.FaceTargetMatchingParameters +import com.amplifyframework.predictions.aws.models.RgbColor +import com.amplifyframework.predictions.models.FaceLivenessSession +import com.amplifyframework.predictions.models.FaceLivenessSessionInformation +import com.amplifyframework.predictions.options.FaceLivenessSessionOptions +import com.amplifyframework.ui.liveness.camera.FrameAnalyzer +import com.amplifyframework.ui.liveness.ml.FaceDetector +import com.amplifyframework.ui.liveness.model.LivenessCheckState +import com.amplifyframework.ui.liveness.state.LivenessState +import com.amplifyframework.ui.liveness.ui.FaceLivenessDetector +import io.mockk.CapturingSlot +import io.mockk.InvokeMatcher +import io.mockk.OfTypeMatcher +import io.mockk.Runs +import io.mockk.every +import io.mockk.just +import io.mockk.mockk +import io.mockk.mockkConstructor +import io.mockk.mockkObject +import io.mockk.mockkStatic +import io.mockk.slot +import io.mockk.unmockkConstructor +import java.util.Date +import org.junit.Assert.assertEquals +import org.junit.Assert.assertTrue +import org.junit.Before +import org.junit.BeforeClass +import org.junit.Rule +import org.junit.Test + +class MockCredentialsProvider : AWSCredentialsProvider { + override fun fetchAWSCredentials( + onSuccess: Consumer, + onError: Consumer, + ) { + val creds: AWSCredentials = AWSCredentials.createAWSCredentials("asdf", "asdf", "asdf", 1000000L)!! + onSuccess.accept(creds) + } +} + +class LivenessFlowInstrumentationTest { + private lateinit var livenessSessionInformation: CapturingSlot + private lateinit var livenessSessionOptions: CapturingSlot + private lateinit var onSessionStarted: CapturingSlot> + private lateinit var onLivenessComplete: CapturingSlot + private lateinit var tooCloseString: String + private lateinit var beginCheckString: String + private lateinit var noFaceString: String + private lateinit var multipleFaceString: String + private lateinit var connectingString: String + private lateinit var moveCloserString: String + private lateinit var holdStillString: String + private lateinit var verifyingString: String + private lateinit var mockCredentialsProvider: MockCredentialsProvider + + private var framesSent = 0 + + @get:Rule + val composeTestRule = createComposeRule() + + @get:Rule + var mRuntimePermissionRule: GrantPermissionRule = GrantPermissionRule.grant(Manifest.permission.CAMERA) + + @Before + fun setup() { + val context = InstrumentationRegistry.getInstrumentation().targetContext.applicationContext + + livenessSessionInformation = slot() + livenessSessionOptions = slot() + onSessionStarted = slot() + onLivenessComplete = slot() + mockkStatic(AWSPredictionsPlugin::class) + every { + AWSPredictionsPlugin.startFaceLivenessSession( + any(), // sessionId + capture(livenessSessionInformation), // sessionInformation + capture(livenessSessionOptions), // options + any(), // version + capture(onSessionStarted), // onSessionStarted + capture(onLivenessComplete), // onComplete + any(), // onError + ) + } just Runs + + mockkConstructor(FaceLivenessSession::class) + every { anyConstructed().sendVideoEvent(any()) }.answers { + framesSent++ + } + + // string resources + beginCheckString = context.getString(R.string.amplify_ui_liveness_get_ready_begin_check) + tooCloseString = context.getString(R.string.amplify_ui_liveness_challenge_instruction_move_face_further) + noFaceString = context.getString(R.string.amplify_ui_liveness_challenge_instruction_move_face) + multipleFaceString = context.getString( + R.string.amplify_ui_liveness_challenge_instruction_multiple_faces_detected, + ) + connectingString = context.getString(R.string.amplify_ui_liveness_challenge_connecting) + moveCloserString = context.getString(R.string.amplify_ui_liveness_challenge_instruction_move_face_closer) + holdStillString = context.getString( + R.string.amplify_ui_liveness_challenge_instruction_hold_face_during_freshness, + ) + verifyingString = context.getString(R.string.amplify_ui_liveness_challenge_verifying) + + mockCredentialsProvider = MockCredentialsProvider() + } + + @Test + fun testLivenessDefaultCameraGivesNoFaceError() { + val sessionId = "sessionId" + composeTestRule.setContent { + FaceLivenessDetector(sessionId = sessionId, region = "us-east-1", onComplete = { + }, onError = { assertTrue(false) }) + } + + composeTestRule.onNodeWithText(beginCheckString).assertExists() + composeTestRule.onNodeWithText(beginCheckString).performClick() + composeTestRule.waitUntil(5000) { + composeTestRule.onAllNodesWithText(noFaceString) + .fetchSemanticsNodes().size == 1 + } + // make sure compose flow reaches this point + composeTestRule.onNodeWithText(noFaceString).assertIsDisplayed() + } + + @Test + fun testLivenessFlowTooClose() { + mockkConstructor(FrameAnalyzer::class) + var livenessState: LivenessState? = null + every { + constructedWith( + OfTypeMatcher(Context::class), + InvokeMatcher { + livenessState = it + }, + ).analyze(any()) + } answers { + assert(livenessState != null) + + livenessState?.onFrameFaceCountUpdate(1) + + // Features too far apart, this face must be too close to the camera + livenessState?.onFrameFaceUpdate( + RectF(0f, 0f, 400f, 400f), + FaceDetector.Landmark(120f, 120f), + FaceDetector.Landmark(280f, 120f), + FaceDetector.Landmark(200f, 320f), + ) + } + + val sessionId = "sessionId" + composeTestRule.setContent { + FaceLivenessDetector(sessionId = sessionId, region = "us-east-1", onComplete = { + }, onError = { assertTrue(false) }) + } + + composeTestRule.onNodeWithText(beginCheckString).assertExists() + composeTestRule.onNodeWithText(beginCheckString).performClick() + composeTestRule.waitUntil(5000) { + composeTestRule.onAllNodesWithText(tooCloseString) + .fetchSemanticsNodes().size == 1 + } + + // make sure compose flow reaches this point + composeTestRule.onNodeWithText(tooCloseString).assertIsDisplayed() + + unmockkConstructor(FrameAnalyzer::class) + } + + @Test + fun testLivenessFlowTooManyFaces() { + mockkConstructor(FrameAnalyzer::class) + var livenessState: LivenessState? = null + every { + constructedWith( + OfTypeMatcher(Context::class), + InvokeMatcher { + livenessState = it + }, + ).analyze(any()) + } answers { + assert(livenessState != null) + + livenessState?.onFrameFaceCountUpdate(2) + } + + val sessionId = "sessionId" + composeTestRule.setContent { + FaceLivenessDetector(sessionId = sessionId, region = "us-east-1", onComplete = { + }, onError = { assertTrue(false) }) + } + + composeTestRule.onNodeWithText(beginCheckString).assertExists() + composeTestRule.onNodeWithText(beginCheckString).performClick() + composeTestRule.waitUntil(5000) { + composeTestRule.onAllNodesWithText(multipleFaceString) + .fetchSemanticsNodes().size == 1 + } + + // make sure compose flow reaches this point + composeTestRule.onNodeWithText(multipleFaceString).assertIsDisplayed() + + unmockkConstructor(FrameAnalyzer::class) + } + + @Test + fun testLivenessFlowNoChallenges() { + mockkConstructor(FrameAnalyzer::class) + var livenessState: LivenessState? = null + every { + constructedWith( + OfTypeMatcher(Context::class), + InvokeMatcher { + livenessState = it + }, + ).analyze(any()) + } answers { + assert(livenessState != null) + + livenessState?.onFrameFaceCountUpdate(1) + + // Features should be sized correctly here + livenessState?.onFrameFaceUpdate( + RectF(0f, 0f, 200f, 200f), + FaceDetector.Landmark(60f, 60f), + FaceDetector.Landmark(140f, 60f), + FaceDetector.Landmark(100f, 160f), + ) + } + + val sessionId = "sessionId" + var completesSuccessfully = false + composeTestRule.setContent { + FaceLivenessDetector(sessionId = sessionId, region = "us-east-1", onComplete = { + completesSuccessfully = true + }, onError = { assertTrue(false) }) + } + + composeTestRule.onNodeWithText(beginCheckString).assertExists() + composeTestRule.onNodeWithText(beginCheckString).performClick() + composeTestRule.waitUntil(5000) { + composeTestRule.onAllNodesWithText(connectingString) + .fetchSemanticsNodes().size == 1 + } + + composeTestRule.waitForIdle() + + val pause = 1 + onSessionStarted.captured.accept(FaceLivenessSession(emptyList(), {}, {}, {})) + + composeTestRule.waitForIdle() + + onLivenessComplete.captured.call() + assertTrue(completesSuccessfully) + + unmockkConstructor(FrameAnalyzer::class) + } + + @Test + fun testLivenessFlowWithChallenges() { + mockkConstructor(FrameAnalyzer::class) + var livenessState: LivenessState? = null + every { + constructedWith( + OfTypeMatcher(Context::class), + InvokeMatcher { + livenessState = it + }, + ).analyze(any()) + } answers { + assert(livenessState != null) + + livenessState?.onFrameFaceCountUpdate(1) + + // Features should be sized correctly here + livenessState?.onFrameFaceUpdate( + RectF(0f, 0f, 200f, 200f), + FaceDetector.Landmark(60f, 60f), + FaceDetector.Landmark(140f, 60f), + FaceDetector.Landmark(100f, 160f), + ) + } + + val sessionId = "sessionId" + var completesSuccessfully = false + composeTestRule.setContent { + FaceLivenessDetector( + sessionId = sessionId, + region = "us-east-1", + credentialsProvider = mockCredentialsProvider, + onComplete = { + completesSuccessfully = true + }, + onError = { assertTrue(false) }, + ) + } + + composeTestRule.onNodeWithText(beginCheckString).assertExists() + composeTestRule.onNodeWithText(beginCheckString).performClick() + composeTestRule.waitUntil(5000) { + composeTestRule.onAllNodesWithText(connectingString) + .fetchSemanticsNodes().size == 1 + } + + val faceTargetMatchingParameters = mockk() + every { faceTargetMatchingParameters.targetIouThreshold }.returns(0.7f) + every { faceTargetMatchingParameters.targetIouWidthThreshold }.returns(0.25f) + every { faceTargetMatchingParameters.targetIouHeightThreshold }.returns(0.25f) + every { faceTargetMatchingParameters.faceIouWidthThreshold }.returns(0.15f) + every { faceTargetMatchingParameters.faceIouHeightThreshold }.returns(0.15f) + every { faceTargetMatchingParameters.ovalFitTimeout }.returns(10000) + + val faceTargetChallenge = mockk() + val faceRect = RectF(19f, -49f, 441f, 633f) + every { faceTargetChallenge.targetWidth }.returns(faceRect.right - faceRect.left) + every { faceTargetChallenge.targetHeight }.returns(faceRect.bottom - faceRect.top) + every { faceTargetChallenge.targetCenterX }.returns((faceRect.left + faceRect.right) / 2) + every { faceTargetChallenge.targetCenterY }.returns((faceRect.top + faceRect.bottom) / 2) + every { faceTargetChallenge.faceTargetMatching }.returns(faceTargetMatchingParameters) + + val colors = listOf( + RgbColor(0, 0, 0), + RgbColor(0, 255, 255), + RgbColor(255, 0, 0), + RgbColor(0, 255, 0), + RgbColor(0, 0, 255), + RgbColor(255, 255, 0), + RgbColor(0, 255, 0), + RgbColor(255, 0, 0), + ) + val durations = listOf( + 75f, + 475f, + 475f, + 475f, + 475f, + 475f, + 475f, + 475f, + ) + val challengeColors = List(colors.size) { + val colorDisplayInformation = mockk() + every { colorDisplayInformation.color }.returns(colors[it]) + every { colorDisplayInformation.duration }.returns(durations[it]) + every { colorDisplayInformation.shouldScroll }.returns(false) + colorDisplayInformation + } + val colorChallenge = mockk() + every { colorChallenge.challengeId }.returns("id") + every { colorChallenge.challengeType }.returns(ColorChallengeType.SEQUENTIAL) + every { colorChallenge.challengeColors }.returns(challengeColors) + + onSessionStarted.captured.accept( + FaceLivenessSession( + listOf(faceTargetChallenge, colorChallenge), + {}, // onVideoEvent + {}, // onChallengeResponseEvent + {}, // stopLivenessSession + ), + ) + var faceUpdates = 0 + + // update face location to show oval + livenessState?.onFrameFaceUpdate( + RectF(0f, 0f, 400f, 400f), + FaceDetector.Landmark(60f, 60f), + FaceDetector.Landmark(140f, 60f), + FaceDetector.Landmark(100f, 160f), + ) + faceUpdates += 1 + + // in the same spot as it was originally, the face is too far + composeTestRule.waitUntil(1000) { + composeTestRule.onAllNodesWithText(moveCloserString) + .fetchSemanticsNodes().size == 1 + } + + composeTestRule.waitForIdle() + + // update face to be inside the oval position + livenessState?.onFrameFaceUpdate( + faceRect, + FaceDetector.Landmark(60f, 60f), + FaceDetector.Landmark(140f, 60f), + FaceDetector.Landmark(100f, 160f), + ) + faceUpdates += 1 + + composeTestRule.waitForIdle() + + // countdown is now invsible, wait one second so that we can start freshness + composeTestRule.waitUntil(2000) { + livenessState?.faceMatchOvalStart?.let { (Date().time - it) > 1000 } ?: false + } + livenessState?.onFrameAvailable() + assert(livenessState?.runningFreshness!!) + + // now, freshness is running. wait for the colors to finish + composeTestRule.waitUntil(10000) { + composeTestRule.onAllNodesWithText(verifyingString) + .fetchSemanticsNodes().size == 1 + } + + val state = livenessState?.livenessCheckState?.value + assertEquals(livenessState?.readyToSendFinalEvents, true) + assertTrue(state is LivenessCheckState.Success) + assertTrue((state as LivenessCheckState.Success).faceGuideRect == faceRect) + // inconsistent number of frames sent + assertTrue(framesSent >= faceUpdates) + + onLivenessComplete.captured.call() + assertTrue(completesSuccessfully) + + unmockkConstructor(FrameAnalyzer::class) + } + + companion object { + @BeforeClass + @JvmStatic + fun setupAmplify() { + val context = InstrumentationRegistry.getInstrumentation().targetContext.applicationContext + + // mock the Amplify Auth category + val authPlugin = AWSCognitoAuthPlugin() + mockkObject(authPlugin) + every { authPlugin.fetchAuthSession(any(), any()) } answers { + firstArg<(AuthSession) -> Unit>().invoke(AuthSession(true)) + } + Amplify.addPlugin(authPlugin) + Amplify.configure(context) + } + } +} diff --git a/liveness/src/test/java/com/amplifyframework/ui/liveness/state/LivenessStateTest.kt b/liveness/src/test/java/com/amplifyframework/ui/liveness/state/LivenessStateTest.kt index f354924e..e129ec9d 100644 --- a/liveness/src/test/java/com/amplifyframework/ui/liveness/state/LivenessStateTest.kt +++ b/liveness/src/test/java/com/amplifyframework/ui/liveness/state/LivenessStateTest.kt @@ -17,6 +17,7 @@ package com.amplifyframework.ui.liveness.state import android.graphics.RectF import androidx.test.core.app.ApplicationProvider +import androidx.test.ext.junit.runners.AndroidJUnit4 import com.amplifyframework.predictions.aws.models.ColorChallenge import com.amplifyframework.predictions.aws.models.FaceTargetChallenge import com.amplifyframework.predictions.aws.models.FaceTargetChallengeResponse @@ -37,9 +38,8 @@ import org.junit.Assert.assertTrue import org.junit.Before import org.junit.Test import org.junit.runner.RunWith -import org.robolectric.RobolectricTestRunner -@RunWith(RobolectricTestRunner::class) +@RunWith(AndroidJUnit4::class) internal class LivenessStateTest { private lateinit var livenessState: LivenessState diff --git a/scripts/generate_df_testrun_report b/scripts/generate_df_testrun_report new file mode 100755 index 00000000..3c8db8e4 --- /dev/null +++ b/scripts/generate_df_testrun_report @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +"""Python script that generates a user-readable report for a given DeviceFarm test run. +""" + +import os +import sys +import subprocess +import argparse +import logging +import boto3 +from botocore.config import Config +from junit_xml import TestSuite, TestCase + +LOG_FORMATTER = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') +CONSOLE_HANDLER = logging.StreamHandler() +CONSOLE_HANDLER.setFormatter(LOG_FORMATTER) +LOGGER = logging.getLogger("DeviceFarmTestRunReportGenerator") +LOGGER.setLevel(os.getenv("LOG_LEVEL") if os.getenv("LOG_LEVEL") is not None else "INFO") +LOGGER.addHandler(CONSOLE_HANDLER) + +client = boto3.client('devicefarm', config=Config(region_name='us-west-2')) + +def parse_arguments(): + parser = argparse.ArgumentParser(description="Utility that generates a report for a DeviceFarm test run.") + parser.add_argument("--run_arn", help="The ARN of the DeviceFarm test run.", required=True) + parser.add_argument("--module_name", help="The module name for the test suite.", required=True) + parser.add_argument("--output_path", help="Destination path for the build reports.", required=True) + parser.add_argument("--pr", help="The github PR number.") + return parser.parse_args() + +def generate_junit_report(run_arn, output_path): + LOGGER.debug(f"Retrieving test jobs for run {run_arn}") + jobs = get_test_jobs(run_arn) + for job_no, job in enumerate(jobs): + LOGGER.debug(f"Retrieving test suites for job {job['arn']}") + suites = get_test_suites(job['arn']) + for suite in suites: + LOGGER.debug(f"Retrieving tests for suite {suite['arn']}") + tests = get_tests(suite['arn']) + test_cases = [] + for test in tests: + tc = TestCase(test['name'], + classname=suite['name'], + elapsed_sec=test['deviceMinutes']['total']*60, + stdout=test['message'], + status=test['result'] ) + if test['result'] == 'FAILED': + tc.add_failure_info(message=test['message']) + if test['result'] == 'ERROR': + tc.add_error_info(message=test['message']) + test_cases.append(tc) + ts = TestSuite(suite['name'] + "-" + str(job_no),test_cases=test_cases) + ts_output = TestSuite.to_xml_string([ts]) + LOGGER.info(f"Saving test suite {suite['name']} report.") + if not os.path.exists(output_path): + os.makedirs(output_path) + f = open(output_path + suite['name'] + "-" + str(job_no) + ".xml", "w") + f.write(ts_output) + f.close() + +def get_test_jobs(run_arn): + result = client.list_jobs(arn=run_arn) + return result['jobs'] if result is not None else [] + +def get_test_suites(job_arn): + result = client.list_suites(arn=job_arn) + return result['suites'] if result is not None else [] + +def get_tests(suite_arn): + result = client.list_tests(arn=suite_arn) + return result['tests'] if result is not None else [] + +def get_problems(run_arn): + return client.list_unique_problems( + arn=run_arn + ) + +def main(arguments): + args = parse_arguments() + build_id = os.getenv("CODEBUILD_BUILD_ID") + source_version = os.getenv("CODEBUILD_SOURCE_VERSION") + arn_suffix = args.run_arn.split(':')[-1] + LOGGER.info(f"devicefarm_run: {arn_suffix} build_id: {build_id} source_version: {source_version}") + generate_junit_report(run_arn=args.run_arn, + output_path=args.output_path) + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/scripts/pull_backend_config_from_s3 b/scripts/pull_backend_config_from_s3 new file mode 100755 index 00000000..ff0e0d09 --- /dev/null +++ b/scripts/pull_backend_config_from_s3 @@ -0,0 +1,31 @@ +#!/bin/bash + +set -x +set -e + +# This bucket contains a collection of config files that are used by the +# integration tests. The configuration files contain sensitive +# tokens/credentials/identifiers, so are not published publicly. +readonly config_bucket=$1 + +readonly config_files=( + # Liveness + "liveness/src/androidTest/res/raw/amplifyconfiguration.json" +) + +# Set up output path +declare -r dest_dir=$HOME/.aws-amplify/amplify-android +mkdir -p "$dest_dir" + +# Download remote files into a local directory outside of the project. +for config_file in ${config_files[@]}; do + aws s3 cp "s3://$config_bucket/$config_file" "$dest_dir/$config_file" & +done +wait + +# Create a symlink for each configuration file. +for config_file in ${config_files[@]}; do + mkdir -p "$(dirname "$config_file")" + ln -s "$dest_dir/$config_file" "$config_file" & +done +wait diff --git a/scripts/run_test_in_devicefarm.sh b/scripts/run_test_in_devicefarm.sh new file mode 100755 index 00000000..e1a74ce5 --- /dev/null +++ b/scripts/run_test_in_devicefarm.sh @@ -0,0 +1,151 @@ +#!/bin/bash +project_arn=$DEVICEFARM_PROJECT_ARN +max_devices=$NUMBER_OF_DEVICES_TO_TEST +module_name=$1 +file_name="$module_name-debug-androidTest.apk" +full_path="$module_name/build/outputs/apk/androidTest/debug/$file_name" + +if [[ -z "${project_arn}" ]]; then + echo "DEVICEFARM_PROJECT_ARN environment variable not set." + exit 1 +fi + +if [[ -z "${max_devices}" ]]; then + echo "NUMBER_OF_DEVICES_TO_TEST not set. Defaulting to 1." + max_devices=1 +fi + +# Function to setup the app uploads in device farm +function createUpload { + test_type=$1 + upload_response=`aws devicefarm create-upload --type $test_type \ + --content-type="application/octet-stream" \ + --project-arn="$project_arn" \ + --name="$file_name" \ + --query="upload.[url, arn]" \ + --region="us-west-2" \ + --output=text` + echo $upload_response +} + +echo 'Uploading test package' +# Create an upload for the instrumentation test package +read -a result <<< $(createUpload "INSTRUMENTATION_TEST_PACKAGE") +test_package_url=${result[0]} +test_package_upload_arn=${result[1]} +# Upload the apk +curl -H "Content-Type:application/octet-stream" -T $full_path $test_package_url + +# Create an upload for the app package (They're the same, but they have to be setup in device farm) +echo 'Uploading app package' +read -a result <<< $(createUpload "ANDROID_APP") +app_package_url=${result[0]} +app_package_upload_arn=${result[1]} +# Upload the apk +curl -H "Content-Type:application/octet-stream" -T $full_path $app_package_url + +# Wait to make sure the upload completes. This should actually make a get-upload call and check the status. +echo "Waiting for uploads to complete" +sleep 10 + +# Get oldest device we can test against. +minDevice=$(aws devicefarm list-devices \ + --region="us-west-2" \ + --filters '[ + {"attribute":"AVAILABILITY","operator":"EQUALS","values":["HIGHLY_AVAILABLE"]}, + {"attribute":"PLATFORM","operator":"EQUALS","values":["ANDROID"]}, + {"attribute":"OS_VERSION","operator":"GREATER_THAN_OR_EQUALS","values":["8"]}, + {"attribute":"OS_VERSION","operator":"LESS_THAN","values":["8.1"]}, + {"attribute":"MANUFACTURER","operator":"IN","values":["Google", "Pixel", "Samsung"]} + ]' \ + | jq -r '.devices[0].arn') + +# Get middle device we can test against. +middleDevice=$(aws devicefarm list-devices \ + --region="us-west-2" \ + --filters '[ + {"attribute":"AVAILABILITY","operator":"EQUALS","values":["HIGHLY_AVAILABLE"]}, + {"attribute":"PLATFORM","operator":"EQUALS","values":["ANDROID"]}, + {"attribute":"OS_VERSION","operator":"GREATER_THAN_OR_EQUALS","values":["10"]}, + {"attribute":"OS_VERSION","operator":"LESS_THAN","values":["11"]}, + {"attribute":"MANUFACTURER","operator":"IN","values":["Samsung"]} + ]' \ + | jq -r '.devices[0].arn') + +# Get latest device we can test against. +latestDevice=$(aws devicefarm list-devices \ + --region="us-west-2" \ + --filters '[ + {"attribute":"AVAILABILITY","operator":"EQUALS","values":["HIGHLY_AVAILABLE"]}, + {"attribute":"PLATFORM","operator":"EQUALS","values":["ANDROID"]}, + {"attribute":"OS_VERSION","operator":"GREATER_THAN_OR_EQUALS","values":["12"]}, + {"attribute":"MANUFACTURER","operator":"IN","values":["Google", "Pixel"]} + ]' \ + | jq -r '.devices[0].arn') + +# IF we fail to find our required test devices, fail. +if [[ -z "${minDevice}" || -z "${middleDevice}" || -z "${latestDevice}" ]]; then + echo "Failed to grab 3 required devices for integration tests." + exit 1 +fi + +# Function to cancel duplicate runs for same code source in device farm. +function stopDuplicates { + echo "Stopping duplicate runs" + name="$file_name-$CODEBUILD_SOURCE_VERSION" + read -a running_arns <<< $(aws devicefarm list-runs \ + --arn="$project_arn" \ + --query="runs[?(status == 'RUNNING' || status == 'PENDING') && name == '${name}'].arn" \ + --region="us-west-2" \ + --max-items=5 \ + | jq -r '.[]') + + for arn in "${running_arns[@]}" + do + ## Just consume the result and do nothing with it. + result=`aws devicefarm stop-run --arn $arn --region="us-west-2" --query="run.name"` + done +} +stopDuplicates + +# Schedule the test run in device farm +echo "Scheduling test run" +run_arn=`aws devicefarm schedule-run --project-arn=$project_arn \ + --app-arn="$app_package_upload_arn" \ + --device-selection-configuration='{ + "filters": [ + {"attribute": "ARN", "operator":"IN", "values":["'$minDevice'", "'$middleDevice'", "'$latestDevice'"]} + ], + "maxDevices": '$max_devices' + }' \ + --name="$file_name-$CODEBUILD_SOURCE_VERSION" \ + --test="type=INSTRUMENTATION,testPackageArn=$test_package_upload_arn" \ + --execution-configuration="jobTimeoutMinutes=30,videoCapture=false" \ + --query="run.arn" \ + --output=text \ + --region="us-west-2"` + +status='NONE' +result='NONE' +# Wait for the test to complete +while true; do + run_status_response=`aws devicefarm get-run --arn="$run_arn" --region="us-west-2" --query="run.[status, result]" --output text` + read -a result_arr <<< $run_status_response + status=${result_arr[0]} + result=${result_arr[1]} + if [ "$status" = "COMPLETED" ] + then + break + fi + sleep 30 +done +echo "Status = $status Result = $result" + +./scripts/generate_df_testrun_report --run_arn="$run_arn" --module_name="$module_name" --pr="$CODEBUILD_SOURCE_VERSION" --output_path="build/allTests/$module_name/" +# If the result is PASSED, then exit with a return code 0 +if [ "$result" = "PASSED" ] +then + exit 0 +fi +# Otherwise, exit with a non-zero. +exit 1