diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index a4f7d59..34cbbea 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -6,9 +6,9 @@ name: CI
on:
# Triggers the workflow on push or pull request events but only for the main branch
push:
- branches: [ main ]
+ branches: [ main, aceaas-and-minikube ]
pull_request:
- branches: [ main ]
+ branches: [ main, aceaas-and-minikube ]
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
diff --git a/.gitignore b/.gitignore
index 6c1d9ae..2eaf2a5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,3 +27,5 @@ TeaRESTApplication_UnitTest/resources
.gradle
+
+.vscode
diff --git a/Jenkinsfile b/Jenkinsfile
index c26f161..9e969d8 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -1,5 +1,9 @@
pipeline {
- agent { docker { image 'ace-minimal-build:12.0.10.0-alpine' } }
+ agent { docker {
+ image 'cp.icr.io/cp/appc/ace:12.0.11.0-r1'
+ /* image 'ace-minimal:12.0.11.0-alpine' */
+ args '-e LICENSE=accept --entrypoint ""'
+ } }
parameters {
/* These values would be better moved to a configuration file and provided by */
/* the Config File Provider plugin (or equivalent), but this is good enough */
@@ -18,17 +22,40 @@ pipeline {
# Set HOME to somewhere writable by Maven
export HOME=/tmp
+ export LICENSE=accept
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error - this must be done after the profile in case the container has the profile loaded already
+
# Clean up just in case files have been left around
- rm -f */maven-reports/TEST*.xml
- rm -rf $PWD/ace-server
+ rm -f */junit-reports/TEST*.xml
+ rm -rf /tmp/test-work-dir
+
+ echo ========================================================================
+ echo Building application
+ echo ========================================================================
+ # Using --compile-maps-and-schemas for 12.0.11 and later . . .
+ ibmint package --input-path . --output-bar-file $PWD/tea-application-combined.bar --project TeaSharedLibraryJava --project TeaSharedLibrary --project TeaRESTApplication --compile-maps-and-schemas
+
+ echo ========================================================================
+ echo Building unit tests
+ echo ========================================================================
+ # Create the unit test work directory
+ mqsicreateworkdir /tmp/test-work-dir
+ mqsibar -w /tmp/test-work-dir -a $PWD/tea-application-combined.bar
+ # Build just the unit tests
+ ibmint deploy --input-path . --output-work-directory /tmp/test-work-dir --project TeaRESTApplication_UnitTest
- mvn --no-transfer-progress -Dinstall.work.directory=$PWD/ace-server install
+ echo ========================================================================
+ echo Running unit tests
+ echo ========================================================================
+ IntegrationServer -w /tmp/test-work-dir --no-nodejs --start-msgflows false --test-project TeaRESTApplication_UnitTest --test-junit-options --reports-dir=junit-reports
'''
}
post {
always {
- junit '**/maven-reports/TEST*.xml'
+ junit '**/junit-reports/TEST*.xml'
}
}
}
@@ -41,50 +68,48 @@ pipeline {
sh '''#!/bin/bash
# Should alread have the projects unpacked
- export WORKDIR=$PWD/ace-server
+ export WORKDIR=/tmp/test-work-dir
# Set HOME to somewhere writable by Maven
export HOME=/tmp
+ export LICENSE=accept
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error - this must be done after the profile in case the container has the profile loaded already
+
mkdir ${WORKDIR}/run/CTPolicies
echo '' > ${WORKDIR}/run/CTPolicies/policy.descriptor
cp /tmp/TEAJDBC.policyxml ${WORKDIR}/run/CTPolicies/
mqsisetdbparms -w ${WORKDIR} -n jdbc::tea -u $CT_JDBC_USR -p $CT_JDBC_PSW
sed -i "s/#policyProject: 'DefaultPolicies'/policyProject: 'CTPolicies'/g" ${WORKDIR}/server.conf.yaml
- rm -f */maven-reports/TEST*.xml
- ( cd TeaRESTApplication_ComponentTest && mvn --no-transfer-progress -Dct.work.directory=${WORKDIR} verify )
+ rm -f */junit-reports/TEST*.xml
+
+
+ echo ========================================================================
+ echo Building component tests
+ echo ========================================================================
+
+ # Build just the component tests
+ ibmint deploy --input-path . --output-work-directory ${WORKDIR} --project TeaRESTApplication_ComponentTest
+
+ echo ========================================================================
+ echo Running component tests
+ echo ========================================================================
+ IntegrationServer -w ${WORKDIR} --no-nodejs --start-msgflows false --test-project TeaRESTApplication_ComponentTest --test-junit-options --reports-dir=junit-reports
+
'''
}
post {
always {
- junit '**/maven-reports/TEST*.xml'
+ junit '**/junit-reports/TEST*.xml'
}
}
}
- stage('Next stage BAR build') {
- steps {
- sh '''#!/bin/bash
- # Build a single BAR file that contains everything rather than deploying two BAR files.
- # Deploying two BAR files (one for the shared library and the other for the application)
- # would work, but would take longer on redeploys due to reloading the application on
- # each deploy.
- #
- # The Tekton pipeline doesn't have this issue because the application and library are
- # unpacked into a work directory in a container image in that pipeline, so there is no
- # deploy to a running server.
- mqsipackagebar -w $PWD -a tea-application-combined.bar -y TeaSharedLibrary -k TeaRESTApplication
-
- # Optional compile for XMLNSC, DFDL, and map resources. Useful as long as the target
- # broker is the same OS, CPU, and installation including ifixes as the build system.
- # mqsibar --bar-file tea-application-combined.bar --compile
- '''
- }
- }
-
stage('Next stage deploy') {
steps {
- sh "bash -c \"mqsideploy -i ${params.integrationNodeHost} -p ${params.integrationNodePort} -e ${params.integrationServerName} -a tea-application-combined.bar\""
+ sh "bash -c \"export LICENSE=accept ; . /opt/ibm/ace-12/server/bin/mqsiprofile ; mqsideploy -i ${params.integrationNodeHost} -p ${params.integrationNodePort} -e ${params.integrationServerName} -a tea-application-combined.bar\""
}
}
diff --git a/Jenkinsfile.windows b/Jenkinsfile.windows
index 36854ad..475e987 100644
--- a/Jenkinsfile.windows
+++ b/Jenkinsfile.windows
@@ -12,7 +12,7 @@ pipeline {
string(name: 'integrationServerName', defaultValue: 'default', description: 'Integration server name')
}
environment {
- ACE_COMMAND = "C:\\Program Files\\IBM\\ACE\\12.0.10.0\\ace"
+ ACE_COMMAND = "C:\\Program Files\\IBM\\ACE\\12.0.11.0\\ace"
CT_JDBC = credentials('CT_JDBC')
}
stages {
diff --git a/README.md b/README.md
index 18cb4f1..8ee0375 100644
--- a/README.md
+++ b/README.md
@@ -3,69 +3,122 @@
Demo pipeline for ACE to show how ACE solutions can be built in CI/CD pipelines using standard
tools. The main focus is on how to use existing ACE capabilities in a pipeline, with the application
being constructed to show pipeline-friendliness rather than being a "best practice" application.
+As part of this, the pipeline scripts are stored in this repo along with the application source
+to make the demo simpler, while in practice they would often be stored separately.
+The overall goal is to deploy a REST HTTP application to an ACE integration server:
-![Pipeline overview](tekton/ace-demo-pipeline-tekton-1.png)
-
-Note on 20231204: IBM Kubernetes Service no longer offers free clusters, and this demo is being adjusted to use other solutions.
-
-## Constituent parts
-
-- This repo, containing the application source and tests plus the DB2 client JAR.
-- Maven for building applications and running JUnit tests
-- Tekton for running builds in a cloud
-- Docker container build files in this repo for building the application image (see tekton/Dockerfile)
-- IBM Cloud container registry (free tier) for hosting the application image
-- IBM Cloud Kubernetes cluster (free tier) for running the application container
-- DB2 on Cloud (free tier) for use by the application container; credentials stored in Kubernetes secrets
-
-This repo can also be built using a GitHub action for CI enablement. It is also possible to run the
-pipeline using OpenShift with RedHat OpenShift Pipelines instead of using the IBM Cloud Kubernetes
-service, and the instructions contain OpenShift-specific sections for the needed changes.
-
-There is also a variant of the pipeline that uses the IBM Cloud Pak for Integration and creates
-custom resources to deploy the application (amongst other changes). See the
-[CP4i README](tekton/os/cp4i/README.md) for details and instructions.
-
-Jenkins can also be used to run the pipeline and deploy the application to an integration node.
-See the [Jenkins README](demo-infrastructure/README-jenkins.md) for details and instructions.
-
-Note that the Tekton pipeline can also create temporary databases for use during pipeline runs; see
-[temp-db2](tekton/temp-db2/README.md) for more details.
-
-For online testing and development, see [README-codespaces](README-codespaces.md) for details on
-using a github-hosted container.
-
-## The application
+![Pipeline high-level](/demo-infrastructure/images/pipeline-high-level.png)
The application used to demonstrate the pipeline consists of a REST API that accepts JSON and interacts
-with a database, with a supporting shared library containing a lot of the code. It is designed around
-indexing different types of tea, storing the name and strength of the tea and assigning a unique integer
-id to each type so that it can be retrieved later. Audit data is logged as XML for each operation performed.
-
-This repo can be imported into the ACE v12 toolkit using the egit plugin (included in the ACE v12 toolkit)
-and inspected; as most pipelines would be expected to work with source repositories, there is no project
-interchange file to import for the projects.
+with a database via JDBC, with a supporting shared library containing a lot of the code (hereafter
+referred to as the "Tea REST application"). It is designed around indexing different types of tea, storing
+the name and strength of the tea along with assigning a unique integer id to each type so that it can be
+retrieved later. Audit data is logged as XML for each operation performed.
As this application exists to help demonstrate pipelines and how they work with ACE, there are some shortcuts
in the code that would not normally be present in a production-ready application: the database table is
created on-demand to make setup easier, the logging goes to the console instead of an audit service, etc.
-Maven is used for many builds but the configuration is deliberately constructed to make the steps as explicit
-as possible, bash is used for other builds scripts, etc.
-
-## The tests
-
-Unit tests reside in TeaRESTApplication_UnitTest along with their own test data.
-
-Component testing is run from TeaRESTApplication_ComponentTest and relies on JDBC connections.
-
-## How to get started with IBM Cloud
-To replicate the pipeline locally, do the following:
+## Recent changes
+
+- Minikube added as the default "plain Kubernetes" option.
+- ACE-as-a-Service added as a deploy target (see below).
+
+## Technology and target options
+
+This repo can be built in several different ways, and can deploy to different targets (see
+[Getting started](#getting-started) for suggestions on how to choose a target) from the same
+source as shown in this diagram:
+
+![Pipeline overview](/demo-infrastructure/images/pipelines-overview.jpg)
+
+Testing is split into "Unit Test" and "Component Test" categories, where "unit tests" are self-contained
+and do not connect to external services such as databases (so they can run reliably anywhere) while the
+term "component test" was used in the ACE product development pipeline to mean "unit tests that use external
+services (such as databases)". See
+[ACE unit and component tests](https://community.ibm.com/community/user/integration/blogs/trevor-dolby/2023/03/20/app-connect-enterprise-ace-unit-and-component-test)
+for a discussion of the difference between test styles in integration.
+
+Pipeline technology options currently include:
+
+- [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/understanding-github-actions)
+ for CI build and test before pull requests (PRs) are merged. This requires a GitHub instance that supports
+ actions (not all Enterprise variants do), and credit enough to run the actions. There is currently no
+ component testing nor a deploy target (though these could be added) for these builds.
+- [Tekton](https://tekton.dev/docs/concepts/overview/) can be used to build, test, and deploy the Tea
+ REST application to ACE runtime infrastructure such as Kubernetes containers. Tekton is the basis for
+ many Kubernetes application build pipelines and also underpins RedHat OpenShift Pipelines.
+- [Jenkins](https://www.jenkins.io/) can be used to build, test, and deploy the Tea REST application
+ to ACE runtime infrastructure such as integration nodes. Jenkins is widely used in many organizations
+ for build and deployment.
+
+ACE deploy targets currently include:
+
+- Kubernetes containers, with both standalone ACE containers and ACE certified containers (via the
+ ACE operator code) as possible runtimes. [Minikube](https://minikube.sigs.k8s.io/docs/) (easily installed
+ locally) and OpenShift can be used with the former, while the latter expects to deploy to the Cloud
+ Pak for Integration (CP4i). See [tekton/README.md#container-deploy-target](tekton/README.md#container-deploy-target)
+ for a description of the container deploy pipelines.
+- [ACE-as-a-Service](https://www.ibm.com/docs/en/app-connect/12.0?topic=app-connect-enterprise-as-service)
+ (ACEaaS) running on Amazon Web Services (AWS). This option requires an instance (which can be a trial instance)
+ of ACEaaS to be available but does not require ACE servers to managed directly (in virtual machines or containers)
+ as the flows run entirely in the cloud. See [demo-infrastructure/README-aceaas-pipelines.md](demo-infrastructure/README-aceaas-pipelines.md)
+ for an overview of the pipelines deploying to ACEaaS.
+- An ACE integration node, using an existing ACE integration node.
+
+As can be seen from the diagram above, not all deployment targets have been configured for all of
+the pipeline technology options, but more could be added as needed.
+
+As well as multiple options for pipelines and deploy targets, multiple build tools can be used to
+build the ACE flows, Java code, Maps, etc and test the application in the pipeline and locally:
+
+- Standard ACE commands introduced at v12 (such as ibmint) can be used to build, deploy, and test
+ the application.
+- Maven can also be used, and was the default in the ACE v11 version of this repo.
+- Gradle can be used to run builds and unit tests, but has not been enabled for component tests.
+- The toolkit can build and run the application and tests, and also to check source into the GitHub repo.
+
+## Getting started
+
+Regardless of the pipeline technology and deployment target, some initial steps are similar:
+
+- Forking this repository is recommended as this allows experimentation with all aspects of
+ the application and pipeline. PRs welcome, too!
+- A database will be needed for the application to run correctly. GitHub Action CI builds can
+ succeed without a database because they only run build and UT steps, but all other use cases
+ require a database, and DB2 on Cloud (requires an IBM Cloud account) is one option that
+ requires no local setup nor any payment. For DB2oC, create a "free tier" DB2 instance via
+ "Create resource" on the IBM Cloud dashboard and download the connection credentials for
+ use in the pipeline. See [demo-infrastructure/cloud-resources.md](demo-infrastructure/cloud-resources.md)
+ for more details.
+ - Note that component testing relies on the same DB2 on Cloud instance as the eventual application
+ image; this is not a best practice, but does keep the demo simpler to get going, and so getting
+ the DB2 instance credentials set up in Kubernetes and/or locally is necessary for the component tests.
+- Installing the ACE toolkit locally is recommended, and the ACE v12 toolkit can clone the
+ (forked) repo locally with the pre-installed eGit plugin. Although development and testing
+ can be done online using a GitHub-hosted container (see [README-codespaces](README-codespaces.md)
+ for details), having the toolkit available locally is helpful for replicating the most common
+ ACE development experience.
+
+Beyond those common steps, the choice of pipeline and target determine the next steps. The simplest
+way to choose the pipeline is to choose the target (Kubernetes, ACEaaS, or integration nodes), and
+then pick one of the pipeline technologies that will deploy to that target. For advanced users who
+are already familiar with pipelines it may better to start with a familiar pipeline technology and
+then choose an available target.
+
+- For Tekton deploying to Kubernetes, see [tekton/README.md](tekton/README.md) for instructions
+ for the various container options and pipelines.
+ - See also [CP4i README](tekton/os/cp4i/README.md) for CP4i-specific variations, including
+ component testing in a CP4i container (as opposed to a build pipeline container) to ensure
+ credentials configurations are working as expected.
+ - Note that the Tekton pipeline can also create temporary databases for use during pipeline runs; see
+ [temp-db2](tekton/temp-db2/README.md) for more details.
+- Tekton-to-ACEaaS follows a similar pattern (see [tekton/README.md#ace-as-a-service-target](tekton/README.md#ace-as-a-service-target)),
+ but does not need a runtime container as the runtime is in the cloud. Credentials are needed for the
+ cloud service.
+- For Jenkins, see the [Jenkins README](demo-infrastructure/README-jenkins.md) for details and
+ instructions on initial setup.
+ - Integration node targets require host/port/server information.
+ - Additional steps are required for ACE-as-a-Service credentials.
-1) Fork this repo and then clone it locally; although cloning it locally straight from the ot4i repo would allow building locally, for the pipeline itself to work some of the files need to be updated. The source also needs to be accessible to the IBM Cloud Kubernetes workers, and a public github repo forked from this one is the easiest way to do this. Cloning can be achieved with the git command line, or via the ACE v12 toolkit; the ACE v12 product can be downloaded from [the IBM website](https://www.ibm.com/marketing/iwm/iwm/web/pickUrxNew.do?source=swg-wmbfd).
-2) Acquire an IBM Cloud account and create a Kubernetes cluster called "aceCluster", a Docker registry, and a DB2 on Cloud instance. More info in [cloud resources description](demo-infrastructure/cloud-resources.md).
-3) Build the pre-req docker images and create the required credentials; see instructions in the [demo-infrastructure](demo-infrastructure) and [tekton/minimal-image-build](tekton/minimal-image-build) directories.
-4) Component testing relies on the same DB2 on Cloud instance as the eventual application image; this is not a best practice, but does keep the demo simpler to get going, and so getting the DB2 instance credentials set up in Kubernetes and/or locally is necessary for the component tests.
-5) Try running the pipeline using the instructions in the [tekton](tekton) directory.
-6) Optionally, enable GitHub actions; this requires a GitHub instance that supports actions (not all Enterprise variants do), and credit enough to run the actions.
diff --git a/TeaRESTApplication_ComponentTest/src/main/java/com/ibm/ot4i/ace/pipeline/demo/tea/TeaRESTApplication_WholeFlow_Tests.java b/TeaRESTApplication_ComponentTest/src/main/java/com/ibm/ot4i/ace/pipeline/demo/tea/TeaRESTApplication_WholeFlow_Tests.java
new file mode 100644
index 0000000..11f24e5
--- /dev/null
+++ b/TeaRESTApplication_ComponentTest/src/main/java/com/ibm/ot4i/ace/pipeline/demo/tea/TeaRESTApplication_WholeFlow_Tests.java
@@ -0,0 +1,80 @@
+package com.ibm.ot4i.ace.pipeline.demo.tea;
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+
+import com.ibm.integration.test.v1.NodeSpy;
+import com.ibm.integration.test.v1.SpyObjectReference;
+import com.ibm.integration.test.v1.TestMessageAssembly;
+import com.ibm.integration.test.v1.TestSetup;
+import com.ibm.integration.test.v1.exception.TestException;
+
+import static com.ibm.integration.test.v1.Matchers.*;
+import static net.javacrumbs.jsonunit.JsonMatchers.jsonEquals;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+
+public class TeaRESTApplication_WholeFlow_Tests {
+
+ /*
+ * TeaRESTApplication_getIndex_subflow_0001_Test
+ * Test generated by IBM App Connect Enterprise Toolkit 12.0.1.0 on 10-Jun-2021 12:48:56
+ */
+
+ @AfterEach
+ public void cleanupTest() throws TestException {
+ // Ensure any mocks created by a test are cleared after the test runs
+ TestSetup.restoreAllMocks();
+ }
+
+ @Test
+ public void TeaRESTApplication_WholeFlow_Get_Test() throws TestException {
+
+
+ // Define the SpyObjectReference objects
+ SpyObjectReference httpInputObjRef = new SpyObjectReference().application("TeaRESTApplication")
+ .messageFlow("gen.TeaRESTApplication").node("HTTP Input");
+ SpyObjectReference httpReplyObjRef = new SpyObjectReference().application("TeaRESTApplication")
+ .messageFlow("gen.TeaRESTApplication").node("HTTP Reply");
+
+ // Initialise NodeSpy objects
+ NodeSpy httpInputSpy = new NodeSpy(httpInputObjRef);
+ NodeSpy httpReplySpy = new NodeSpy(httpReplyObjRef);
+
+ // Declare a new TestMessageAssembly object for the message being sent into the node
+ TestMessageAssembly inputMessageAssembly = new TestMessageAssembly();
+ InputStream inputMessage = Thread.currentThread().getContextClassLoader().getResourceAsStream("00003CC8-65DDFF90-00000001-0.mxml");
+ inputMessageAssembly.buildFromRecordedMessageAssembly(inputMessage);
+
+ // Configure the "in" terminal on the HTTP Reply node not to propagate.
+ // If we don't do this, then the reply node will throw exceptions when it
+ // realises we haven't actually used the HTTP transport.
+ httpReplySpy.setStopAtInputTerminal("in");
+
+ // Now call propagate on the "out" terminal of the HTTP Input node.
+ // This takes the place of an actual HTTP message: we simple hand the node
+ // the message assembly and tell it to propagate that as if it came from an
+ // actual client. This line is where the flow is actually run.
+ httpInputSpy.propagate(inputMessageAssembly, "out");
+
+ // Note that any exceptions would cause this test to fail, so if we reach
+ // the next lines then the flow has completed successfully.
+
+ // Validate the results from the flow execution
+ // We will now pick up the message that is propagated into the "HttpReply" node and validate it
+ TestMessageAssembly replyMessageAssembly = httpReplySpy.receivedMessageAssembly("in", 1);
+
+ // Assert output message body data
+ // Get the TestMessageAssembly object for the expected propagated message
+ TestMessageAssembly expectedMessageAssembly = new TestMessageAssembly();
+ InputStream expectedMessage = Thread.currentThread().getContextClassLoader().getResourceAsStream("00003CC8-65DDFF90-00000001-12.mxml");
+ expectedMessageAssembly.buildFromRecordedMessageAssembly(expectedMessage);
+
+ // Check the reply is as expected
+ assertThat(replyMessageAssembly, equalsMessage(expectedMessageAssembly).ignorePath("/HTTPReplyHeader/Server_Hostname", false));
+ }
+}
\ No newline at end of file
diff --git a/TeaRESTApplication_ComponentTest/src/main/resources/00003CC8-65DDFF90-00000001-0.mxml b/TeaRESTApplication_ComponentTest/src/main/resources/00003CC8-65DDFF90-00000001-0.mxml
new file mode 100644
index 0000000..4bd3d1a
--- /dev/null
+++ b/TeaRESTApplication_ComponentTest/src/main/resources/00003CC8-65DDFF90-00000001-0.mxml
@@ -0,0 +1 @@
+100003CC8-65DDFF90-00000001012024-02-27 15:28:16.228640HTTP Inputgen.TeaRESTApplication#FCMComposite_1_1ComIbmWSInputNodeoutTeaRESTApplicationgen.TeaRESTApplicationHTTP InputTRUERoute To Labelgen.TeaRESTApplication#FCMComposite_1_2ComIbmRouteToLabelNodeinTeaRESTApplicationgen.TeaRESTApplicationRoute To Label5461208FALSEFALSE2024-02-27 15:28:16.228640-10000000000000000000000000000000000000000000000000SOAP-HTTPlocalhost:7800curl/7.68.0*/*GET http://localhost:7800/tea/index/1 HTTP/1.1localhost7800127.0.0.1localhosthttp455648540000000000000000644b2eebbf3c000000000000getIndexteaindex1GETgetIndex/tea/index/1http://localhost:7800/tea/index/11
\ No newline at end of file
diff --git a/TeaRESTApplication_ComponentTest/src/main/resources/00003CC8-65DDFF90-00000001-12.mxml b/TeaRESTApplication_ComponentTest/src/main/resources/00003CC8-65DDFF90-00000001-12.mxml
new file mode 100644
index 0000000..1f37a5e
--- /dev/null
+++ b/TeaRESTApplication_ComponentTest/src/main/resources/00003CC8-65DDFF90-00000001-12.mxml
@@ -0,0 +1 @@
+100003CC8-65DDFF90-0000000112132024-02-27 15:28:16.228640getIndex (Implementation)gen.TeaRESTApplication#FCMComposite_1_5SubFlowNodeOutput_1TeaRESTApplicationgen.TeaRESTApplicationgetIndex (Implementation)FALSEgetIndex (Implementation).getIndex#OutTerminal.Output_1gen.TeaRESTApplication#FCMComposite_1_5.getIndex#OutTerminal.Output_1OutputNodeoutTeaRESTApplicationgen.TeaRESTApplication0getIndex (Implementation)getIndex (Implementation).getIndex#OutTerminal.Output_1HTTP Replygen.TeaRESTApplication#FCMComposite_1_3ComIbmWSReplyNodeinTeaRESTApplicationgen.TeaRESTApplicationHTTP Reply5461208FALSEFALSE2024-02-27 15:28:16.228640-10000000000000000000000000000000000000000000000000SOAP-HTTPlocalhost:7800curl/7.68.0*/*GET http://localhost:7800/tea/index/1 HTTP/1.1localhost7800127.0.0.1localhosthttpIBM-PF3K066LAssam1455648540000000000000000644b2eebbf3c000000000000teaindex1GETgetIndex/tea/index/1http://localhost:7800/tea/index/11
\ No newline at end of file
diff --git a/demo-infrastructure/Jenkinsfile.aceaas b/demo-infrastructure/Jenkinsfile.aceaas
new file mode 100644
index 0000000..f5b16a9
--- /dev/null
+++ b/demo-infrastructure/Jenkinsfile.aceaas
@@ -0,0 +1,273 @@
+pipeline {
+ agent { docker {
+ /* See README-jenkins.md for image discussion */
+ image 'cp.icr.io/cp/appc/ace:12.0.11.0-r1'
+ /* image 'ace-minimal-build:12.0.11.0-alpine' */
+ args '-e LICENSE=accept --entrypoint ""'
+ } }
+ parameters {
+ /* These values would be better moved to a configuration file and provided by */
+ /* the Config File Provider plugin (or equivalent), but this is good enough */
+ /* for a demo of ACE pipelines that isn't intended as a Jenkins tutorial. */
+ string(name: 'databaseName', defaultValue: 'BLUDB', description: 'JDBC database name')
+ string(name: 'serverName', defaultValue: '19af6446-6171-4641-8aba-9dcff8e1b6ff.c1ogj3sd0tgtu0lqde00.databases.appdomain.cloud', description: 'JDBC database host')
+ string(name: 'portNumber', defaultValue: '30699', description: 'JDBC database port')
+ string(name: 'deployPrefix', defaultValue: 'tdolby', description: 'ACEaaS artifact prefix')
+ /* Could put the endpoint in as a credential, but it's not really secret . . . */
+ string(name: 'APPCON_ENDPOINT', defaultValue: 'api.p-vir-c1.appconnect.automation.ibm.com', description: 'ACEaaS endpoint hostname')
+ booleanParam(name: 'DEPLOY_CONFIGURATION', defaultValue: false, description: 'Create policies, runtime, etc')
+ }
+ stages {
+ stage('Build and UT') {
+ steps {
+ sh '''#!/bin/bash
+ # Set HOME to somewhere writable by Maven
+ export HOME=/tmp
+
+ export LICENSE=accept
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error - this must be done after the profile in case the container has the profile loaded already
+
+ # Clean up just in case files have been left around
+ rm -f */junit-reports/TEST*.xml
+ rm -rf /tmp/test-work-dir
+
+ echo ========================================================================
+ echo Building application
+ echo ========================================================================
+ # Using --compile-maps-and-schemas for 12.0.11 and later . . .
+ ibmint package --input-path . --output-bar-file $PWD/tea-application-combined.bar --project TeaSharedLibraryJava --project TeaSharedLibrary --project TeaRESTApplication --compile-maps-and-schemas
+
+ echo ========================================================================
+ echo Building unit tests
+ echo ========================================================================
+ # Create the unit test work directory
+ mqsicreateworkdir /tmp/test-work-dir
+ mqsibar -w /tmp/test-work-dir -a $PWD/tea-application-combined.bar
+ # Build just the unit tests
+ ibmint deploy --input-path . --output-work-directory /tmp/test-work-dir --project TeaRESTApplication_UnitTest
+
+ echo ========================================================================
+ echo Running unit tests
+ echo ========================================================================
+ IntegrationServer -w /tmp/test-work-dir --no-nodejs --start-msgflows false --test-project TeaRESTApplication_UnitTest --test-junit-options --reports-dir=junit-reports
+ '''
+
+ }
+ post {
+ always {
+ junit '**/junit-reports/TEST*.xml'
+ }
+ }
+ }
+
+ stage('Test DB interactions') {
+ steps {
+ sh "cat demo-infrastructure/TEAJDBC.policyxml | sed 's/DATABASE_NAME/${params.databaseName}/g' > /tmp/TEAJDBC.policyxml"
+ sh "sed -i 's/SERVER_NAME/${params.serverName}/g' /tmp/TEAJDBC.policyxml"
+ sh "sed -i 's/PORT_NUMBER/${params.portNumber}/g' /tmp/TEAJDBC.policyxml"
+
+ sh '''#!/bin/bash
+ # Should alread have the projects unpacked
+ export WORKDIR=/tmp/test-work-dir
+ # Set HOME to somewhere writable by Maven
+ export HOME=/tmp
+
+ export LICENSE=accept
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error - this must be done after the profile in case the container has the profile loaded already
+
+ mkdir ${WORKDIR}/run/CTPolicies
+ echo '' > ${WORKDIR}/run/CTPolicies/policy.descriptor
+ cp /tmp/TEAJDBC.policyxml ${WORKDIR}/run/CTPolicies/
+ mqsisetdbparms -w ${WORKDIR} -n jdbc::tea -u $CT_JDBC_USR -p $CT_JDBC_PSW
+ sed -i "s/#policyProject: 'DefaultPolicies'/policyProject: 'CTPolicies'/g" ${WORKDIR}/server.conf.yaml
+
+ rm -f */junit-reports/TEST*.xml
+
+
+ echo ========================================================================
+ echo Building component tests
+ echo ========================================================================
+
+ # Build just the component tests
+ ibmint deploy --input-path . --output-work-directory ${WORKDIR} --project TeaRESTApplication_ComponentTest
+
+ echo ========================================================================
+ echo Running component tests
+ echo ========================================================================
+ IntegrationServer -w ${WORKDIR} --no-nodejs --start-msgflows false --test-project TeaRESTApplication_ComponentTest --test-junit-options --reports-dir=junit-reports
+
+ '''
+ }
+ post {
+ always {
+ junit '**/junit-reports/TEST*.xml'
+ }
+ }
+ }
+
+ stage('Push BAR file') {
+ steps {
+ sh "echo ${params.APPCON_ENDPOINT} > /tmp/APPCON_ENDPOINT"
+ sh "echo ${params.deployPrefix} > /tmp/deployPrefix"
+
+ sh '''#!/bin/bash
+ # Set HOME to somewhere writable by Maven
+ export HOME=/tmp
+
+ # Fix for ace-minimal-build and curl
+ unset LD_LIBRARY_PATH
+
+ set -e # Fail on error - this must be done after the profile in case the container has the profile loaded already
+
+ echo "########################################################################"
+ echo "# Acquiring token using API key"
+ echo "########################################################################" && echo
+
+ curl --request POST \
+ --url https://`cat /tmp/APPCON_ENDPOINT`/api/v1/tokens \
+ --header "X-IBM-Client-Id: ${APPCON_CLIENT_ID}" \
+ --header "X-IBM-Client-Secret: ${APPCON_CLIENT_SECRET}" \
+ --header 'accept: application/json' \
+ --header 'content-type: application/json' \
+ --header "x-ibm-instance-id: ${APPCON_INSTANCE_ID}" \
+ --data "{\\"apiKey\\": \\"${APPCON_API_KEY}\\"}" --output /tmp/token-output.txt
+ cat /tmp/token-output.txt | tr -d '{}"' | tr ',' '\n' | grep access_token | sed 's/access_token://g' > /tmp/APPCON_TOKEN
+
+ echo "########################################################################"
+ echo "# PUTting BAR file to ACE service"
+ echo "########################################################################" && echo
+
+ curl -X PUT https://`cat /tmp/APPCON_ENDPOINT`/api/v1/bar-files/`cat /tmp/deployPrefix`-tea-jenkins \
+ -H "x-ibm-instance-id: ${APPCON_INSTANCE_ID}" -H "Content-Type: application/octet-stream" \
+ -H "Accept: application/json" -H "X-IBM-Client-Id: ${APPCON_CLIENT_ID}" -H "authorization: Bearer `cat /tmp/APPCON_TOKEN`" \
+ --data-binary @tea-application-combined.bar --output /tmp/curl-output.txt
+
+ # We will have exited if curl returned non-zero so the output should contain the BAR file name
+ cat /tmp/curl-output.txt ; echo
+ # This would be easier with jq but that's not available in most ACE images
+ export BARURL=$(cat /tmp/curl-output.txt | tr -d '{}"' | tr ',' '\n' | grep url | sed 's/url://g')
+ echo BARURL: $BARURL
+ echo -n $BARURL > /tmp/BARURL
+ '''
+ }
+ }
+
+ stage('Create Configurations and IR') {
+ when {
+ expression {
+ return params.DEPLOY_CONFIGURATION
+ }
+ }
+
+ steps {
+ sh "echo ${params.APPCON_ENDPOINT} > /tmp/APPCON_ENDPOINT"
+ sh "echo ${params.deployPrefix} > /tmp/deployPrefix"
+
+ sh '''#!/bin/bash
+ # Set HOME to somewhere writable by Maven
+ export HOME=/tmp
+
+ export LICENSE=accept
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error - this must be done after the profile in case the container has the profile loaded already
+
+ echo ========================================================================
+ echo Creating `cat /tmp/deployPrefix`-jdbc-policies configuration
+ echo ========================================================================
+ mkdir /tmp/JDBCPolicies
+ echo '' > /tmp/JDBCPolicies/policy.descriptor
+ cp /tmp/TEAJDBC.policyxml /tmp/JDBCPolicies/TEAJDBC.policyxml
+
+ # Using "zip" would be more obvious, but not all ACE images have it available.
+ (cd /tmp && /opt/ibm/ace-12/common/jdk/bin/jar cvf /tmp/JDBCPolicies.zip JDBCPolicies)
+ cat /tmp/JDBCPolicies.zip | base64 -w 0 > /tmp/JDBCPolicies.zip.base64
+
+ # Fix for ace-minimal-build and curl
+ unset LD_LIBRARY_PATH
+
+ cp tekton/aceaas/create-configuration-template.json /tmp/jdbc-policies-configuration.json
+ sed -i "s/TEMPLATE_NAME/`cat /tmp/deployPrefix`-jdbc-policies/g" /tmp/jdbc-policies-configuration.json
+ sed -i "s/TEMPLATE_TYPE/policyproject/g" /tmp/jdbc-policies-configuration.json
+ sed -i "s/TEMPLATE_DESCRIPTION/`cat /tmp/deployPrefix` JDBCPolicies project/g" /tmp/jdbc-policies-configuration.json
+ # Backslash issues with groovy scripting - the effect is to escape / characters in the base64 data to avoid issues with sed
+ sed -i "s/TEMPLATE_BASE64DATA/`cat /tmp/JDBCPolicies.zip.base64 | sed 's/\\//\\\\\\\\\\\\//g'`/g" /tmp/jdbc-policies-configuration.json
+ #cat /tmp/jdbc-policies-configuration.json
+
+ curl -X PUT https://`cat /tmp/APPCON_ENDPOINT`/api/v1/configurations/`cat /tmp/deployPrefix`-jdbc-policies \
+ -H "x-ibm-instance-id: ${APPCON_INSTANCE_ID}" -H "Content-Type: application/json" \
+ -H "Accept: application/json" -H "X-IBM-Client-Id: ${APPCON_CLIENT_ID}" -H "authorization: Bearer `cat /tmp/APPCON_TOKEN`" \
+ --data-binary @/tmp/jdbc-policies-configuration.json
+ echo
+
+ echo ========================================================================
+ echo Creating jdbc::tea as `cat /tmp/deployPrefix`-jdbc-setdbparms configuration
+ echo ========================================================================
+ echo -n jdbc::tea $CT_JDBC_USR $CT_JDBC_PSW | base64 -w 0 > /tmp/jdbc-setdbparms.base64
+ cp tekton/aceaas/create-configuration-template.json /tmp/jdbc-setdbparms-configuration.json
+ sed -i "s/TEMPLATE_NAME/`cat /tmp/deployPrefix`-jdbc-setdbparms/g" /tmp/jdbc-setdbparms-configuration.json
+ sed -i "s/TEMPLATE_TYPE/setdbparms/g" /tmp/jdbc-setdbparms-configuration.json
+ sed -i "s/TEMPLATE_DESCRIPTION/`cat /tmp/deployPrefix` JDBC credentials/g" /tmp/jdbc-setdbparms-configuration.json
+ # Backslash issues with groovy scripting - the effect is to escape / characters in the base64 data to avoid issues with sed
+ sed -i "s/TEMPLATE_BASE64DATA/`cat /tmp/jdbc-setdbparms.base64 | sed 's/\\//\\\\\\\\\\\\//g'`/g" /tmp/jdbc-setdbparms-configuration.json
+ #cat /tmp/jdbc-setdbparms-configuration.json
+
+ curl -X PUT https://`cat /tmp/APPCON_ENDPOINT`/api/v1/configurations/`cat /tmp/deployPrefix`-jdbc-setdbparms \
+ -H "x-ibm-instance-id: ${APPCON_INSTANCE_ID}" -H "Content-Type: application/json" \
+ -H "Accept: application/json" -H "X-IBM-Client-Id: ${APPCON_CLIENT_ID}" -H "authorization: Bearer `cat /tmp/APPCON_TOKEN`" \
+ --data-binary @/tmp/jdbc-setdbparms-configuration.json
+ echo
+
+ echo ========================================================================
+ echo Creating default policy project setting as `cat /tmp/deployPrefix`-default-policy-project configuration
+ echo ========================================================================
+ (echo "Defaults:" && echo " policyProject: 'JDBCPolicies'") | base64 -w 0 > /tmp/default-policy-project.base64
+ cp tekton/aceaas/create-configuration-template.json /tmp/default-policy-project-configuration.json
+ sed -i "s/TEMPLATE_NAME/`cat /tmp/deployPrefix`-default-policy-project/g" /tmp/default-policy-project-configuration.json
+ sed -i "s/TEMPLATE_TYPE/serverconf/g" /tmp/default-policy-project-configuration.json
+ sed -i "s/TEMPLATE_DESCRIPTION/`cat /tmp/deployPrefix` default policy project for JDBC/g" /tmp/default-policy-project-configuration.json
+ # Backslash issues with groovy scripting - the effect is to escape / characters in the base64 data to avoid issues with sed
+ sed -i "s/TEMPLATE_BASE64DATA/`cat /tmp/default-policy-project.base64 | sed 's/\\//\\\\\\\\\\\\//g'`/g" /tmp/default-policy-project-configuration.json
+ #cat /tmp/default-policy-project-configuration.json
+
+ curl -X PUT https://`cat /tmp/APPCON_ENDPOINT`/api/v1/configurations/`cat /tmp/deployPrefix`-default-policy-project \
+ -H "x-ibm-instance-id: ${APPCON_INSTANCE_ID}" -H "Content-Type: application/json" \
+ -H "Accept: application/json" -H "X-IBM-Client-Id: ${APPCON_CLIENT_ID}" -H "authorization: Bearer `cat /tmp/APPCON_TOKEN`" \
+ --data-binary @/tmp/default-policy-project-configuration.json
+
+ echo ========================================================================
+ echo Creating IntegrationRuntime JSON
+ echo ========================================================================
+ cp tekton/aceaas/create-integrationruntime-template.json /tmp/create-integrationruntime.json
+ sed -i "s/TEMPLATE_NAME/`cat /tmp/deployPrefix`-tea-jenkins-ir/g" /tmp/create-integrationruntime.json
+ # Backslash issues with groovy scripting - the effect is to escape / characters in the base64 data to avoid issues with sed
+ sed -i "s/TEMPLATE_BARURL/`cat /tmp/BARURL | sed 's/\\//\\\\\\\\\\\\//g'`/g" /tmp/create-integrationruntime.json
+ sed -i "s/TEMPLATE_POLICYPROJECT/`cat /tmp/deployPrefix`-jdbc-policies/g" /tmp/create-integrationruntime.json
+ sed -i "s/TEMPLATE_SERVERCONF/`cat /tmp/deployPrefix`-default-policy-project/g" /tmp/create-integrationruntime.json
+ sed -i "s/TEMPLATE_SETDBPARMS/`cat /tmp/deployPrefix`-jdbc-setdbparms/g" /tmp/create-integrationruntime.json
+ echo "Contents of create-integrationruntime.json:"
+ cat /tmp/create-integrationruntime.json
+
+ curl -X PUT https://`cat /tmp/APPCON_ENDPOINT`/api/v1/integration-runtimes/`cat /tmp/deployPrefix`-tea-jenkins-ir \
+ -H "x-ibm-instance-id: ${APPCON_INSTANCE_ID}" -H "Content-Type: application/json" \
+ -H "Accept: application/json" -H "X-IBM-Client-Id: ${APPCON_CLIENT_ID}" -H "authorization: Bearer `cat /tmp/APPCON_TOKEN`" \
+ --data-binary @/tmp/create-integrationruntime.json
+ '''
+ }
+ }
+
+ }
+ environment {
+ CT_JDBC = credentials('CT_JDBC')
+ APPCON_INSTANCE_ID = credentials('APPCON_INSTANCE_ID')
+ APPCON_CLIENT_ID = credentials('APPCON_CLIENT_ID')
+ APPCON_CLIENT_SECRET = credentials('APPCON_CLIENT_SECRET')
+ APPCON_API_KEY = credentials('APPCON_API_KEY')
+ /* Could put the endpoint in as a credential, but it's not really secret . . . */
+ /* APPCON_ENDPOINT = credentials('APPCON_ENDPOINT') */
+ }
+}
diff --git a/demo-infrastructure/Jenkinsfile.ibmint-integration-node b/demo-infrastructure/Jenkinsfile.ibmint-integration-node
new file mode 100644
index 0000000..69e1cc5
--- /dev/null
+++ b/demo-infrastructure/Jenkinsfile.ibmint-integration-node
@@ -0,0 +1,120 @@
+pipeline {
+ agent { docker {
+ /* image 'cp.icr.io/cp/appc/ace:12.0.11.0-r1' */
+ image 'ace-minimal:12.0.11.0-alpine'
+ args '-e LICENSE=accept --entrypoint ""'
+ } }
+ parameters {
+ /* These values would be better moved to a configuration file and provided by */
+ /* the Config File Provider plugin (or equivalent), but this is good enough */
+ /* for a demo of ACE pipelines that isn't intended as a Jenkins tutorial. */
+ string(name: 'databaseName', defaultValue: 'BLUDB', description: 'JDBC database name')
+ string(name: 'serverName', defaultValue: '19af6446-6171-4641-8aba-9dcff8e1b6ff.c1ogj3sd0tgtu0lqde00.databases.appdomain.cloud', description: 'JDBC database host')
+ string(name: 'portNumber', defaultValue: '30699', description: 'JDBC database port')
+ string(name: 'integrationNodeHost', defaultValue: '10.0.0.2', description: 'Integration node REST API host or IP address')
+ string(name: 'integrationNodePort', defaultValue: '4414', description: 'Integration node REST API port')
+ string(name: 'integrationServerName', defaultValue: 'default', description: 'Integration server name')
+ }
+ stages {
+ stage('Build and UT') {
+ steps {
+ sh '''#!/bin/bash
+ # Set HOME to somewhere writable by Maven
+ export HOME=/tmp
+
+ export LICENSE=accept
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error - this must be done after the profile in case the container has the profile loaded already
+
+ # Clean up just in case files have been left around
+ rm -f */junit-reports/TEST*.xml
+ rm -rf /tmp/test-work-dir
+
+ echo ========================================================================
+ echo Building application
+ echo ========================================================================
+ # Using --compile-maps-and-schemas for 12.0.11 and later . . .
+ ibmint package --input-path . --output-bar-file $PWD/tea-application-combined.bar --project TeaSharedLibraryJava --project TeaSharedLibrary --project TeaRESTApplication --compile-maps-and-schemas
+
+ echo ========================================================================
+ echo Building unit tests
+ echo ========================================================================
+ # Create the unit test work directory
+ mqsicreateworkdir /tmp/test-work-dir
+ mqsibar -w /tmp/test-work-dir -a $PWD/tea-application-combined.bar
+ # Build just the unit tests
+ ibmint deploy --input-path . --output-work-directory /tmp/test-work-dir --project TeaRESTApplication_UnitTest
+
+ echo ========================================================================
+ echo Running unit tests
+ echo ========================================================================
+ IntegrationServer -w /tmp/test-work-dir --no-nodejs --start-msgflows false --test-project TeaRESTApplication_UnitTest --test-junit-options --reports-dir=junit-reports
+ '''
+
+ }
+ post {
+ always {
+ junit '**/junit-reports/TEST*.xml'
+ }
+ }
+ }
+
+ stage('Test DB interactions') {
+ steps {
+ sh "cat demo-infrastructure/TEAJDBC.policyxml | sed 's/DATABASE_NAME/${params.databaseName}/g' > /tmp/TEAJDBC.policyxml"
+ sh "sed -i 's/SERVER_NAME/${params.serverName}/g' /tmp/TEAJDBC.policyxml"
+ sh "sed -i 's/PORT_NUMBER/${params.portNumber}/g' /tmp/TEAJDBC.policyxml"
+
+ sh '''#!/bin/bash
+ # Should alread have the projects unpacked
+ export WORKDIR=/tmp/test-work-dir
+ # Set HOME to somewhere writable by Maven
+ export HOME=/tmp
+
+ export LICENSE=accept
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error - this must be done after the profile in case the container has the profile loaded already
+
+ mkdir ${WORKDIR}/run/CTPolicies
+ echo '' > ${WORKDIR}/run/CTPolicies/policy.descriptor
+ cp /tmp/TEAJDBC.policyxml ${WORKDIR}/run/CTPolicies/
+ mqsisetdbparms -w ${WORKDIR} -n jdbc::tea -u $CT_JDBC_USR -p $CT_JDBC_PSW
+ sed -i "s/#policyProject: 'DefaultPolicies'/policyProject: 'CTPolicies'/g" ${WORKDIR}/server.conf.yaml
+
+ rm -f */junit-reports/TEST*.xml
+
+
+ echo ========================================================================
+ echo Building component tests
+ echo ========================================================================
+
+ # Build just the component tests
+ ibmint deploy --input-path . --output-work-directory ${WORKDIR} --project TeaRESTApplication_ComponentTest
+
+ echo ========================================================================
+ echo Running component tests
+ echo ========================================================================
+ IntegrationServer -w ${WORKDIR} --no-nodejs --start-msgflows false --test-project TeaRESTApplication_ComponentTest --test-junit-options --reports-dir=junit-reports
+
+ '''
+ }
+ post {
+ always {
+ junit '**/junit-reports/TEST*.xml'
+ }
+ }
+ }
+
+ stage('Next stage deploy') {
+ steps {
+ sh "bash -c \"export LICENSE=accept ; . /opt/ibm/ace-12/server/bin/mqsiprofile ; mqsideploy -i ${params.integrationNodeHost} -p ${params.integrationNodePort} -e ${params.integrationServerName} -a tea-application-combined.bar\""
+ }
+ }
+
+ }
+ environment {
+ CT_JDBC = credentials('CT_JDBC')
+ }
+}
diff --git a/demo-infrastructure/Jenkinsfile.maven b/demo-infrastructure/Jenkinsfile.maven
new file mode 100644
index 0000000..58eb3c6
--- /dev/null
+++ b/demo-infrastructure/Jenkinsfile.maven
@@ -0,0 +1,95 @@
+pipeline {
+ agent { docker { image 'ace-minimal-build:12.0.11.0-alpine' } }
+ parameters {
+ /* These values would be better moved to a configuration file and provided by */
+ /* the Config File Provider plugin (or equivalent), but this is good enough */
+ /* for a demo of ACE pipelines that isn't intended as a Jenkins tutorial. */
+ string(name: 'databaseName', defaultValue: 'BLUDB', description: 'JDBC database name')
+ string(name: 'serverName', defaultValue: '19af6446-6171-4641-8aba-9dcff8e1b6ff.c1ogj3sd0tgtu0lqde00.databases.appdomain.cloud', description: 'JDBC database host')
+ string(name: 'portNumber', defaultValue: '30699', description: 'JDBC database port')
+ string(name: 'integrationNodeHost', defaultValue: '10.0.0.2', description: 'Integration node REST API host or IP address')
+ string(name: 'integrationNodePort', defaultValue: '4414', description: 'Integration node REST API port')
+ string(name: 'integrationServerName', defaultValue: 'default', description: 'Integration server name')
+ }
+ stages {
+ stage('Build and UT') {
+ steps {
+ sh '''#!/bin/bash
+ # Set HOME to somewhere writable by Maven
+ export HOME=/tmp
+
+ # Clean up just in case files have been left around
+ rm -f */maven-reports/TEST*.xml
+ rm -rf $PWD/ace-server
+
+ mvn --no-transfer-progress -Dinstall.work.directory=$PWD/ace-server install
+ '''
+
+ }
+ post {
+ always {
+ junit '**/maven-reports/TEST*.xml'
+ }
+ }
+ }
+
+ stage('Test DB interactions') {
+ steps {
+ sh "cat demo-infrastructure/TEAJDBC.policyxml | sed 's/DATABASE_NAME/${params.databaseName}/g' > /tmp/TEAJDBC.policyxml"
+ sh "sed -i 's/SERVER_NAME/${params.serverName}/g' /tmp/TEAJDBC.policyxml"
+ sh "sed -i 's/PORT_NUMBER/${params.portNumber}/g' /tmp/TEAJDBC.policyxml"
+
+ sh '''#!/bin/bash
+ # Should alread have the projects unpacked
+ export WORKDIR=$PWD/ace-server
+ # Set HOME to somewhere writable by Maven
+ export HOME=/tmp
+
+ mkdir ${WORKDIR}/run/CTPolicies
+ echo '' > ${WORKDIR}/run/CTPolicies/policy.descriptor
+ cp /tmp/TEAJDBC.policyxml ${WORKDIR}/run/CTPolicies/
+ mqsisetdbparms -w ${WORKDIR} -n jdbc::tea -u $CT_JDBC_USR -p $CT_JDBC_PSW
+ sed -i "s/#policyProject: 'DefaultPolicies'/policyProject: 'CTPolicies'/g" ${WORKDIR}/server.conf.yaml
+
+ rm -f */maven-reports/TEST*.xml
+ ( cd TeaRESTApplication_ComponentTest && mvn --no-transfer-progress -Dct.work.directory=${WORKDIR} verify )
+ '''
+ }
+ post {
+ always {
+ junit '**/maven-reports/TEST*.xml'
+ }
+ }
+ }
+
+ stage('Next stage BAR build') {
+ steps {
+ sh '''#!/bin/bash
+ # Build a single BAR file that contains everything rather than deploying two BAR files.
+ # Deploying two BAR files (one for the shared library and the other for the application)
+ # would work, but would take longer on redeploys due to reloading the application on
+ # each deploy.
+ #
+ # The Tekton pipeline doesn't have this issue because the application and library are
+ # unpacked into a work directory in a container image in that pipeline, so there is no
+ # deploy to a running server.
+ mqsipackagebar -w $PWD -a tea-application-combined.bar -y TeaSharedLibrary -k TeaRESTApplication
+
+ # Optional compile for XMLNSC, DFDL, and map resources. Useful as long as the target
+ # broker is the same OS, CPU, and installation including ifixes as the build system.
+ # mqsibar --bar-file tea-application-combined.bar --compile
+ '''
+ }
+ }
+
+ stage('Next stage deploy') {
+ steps {
+ sh "bash -c \"mqsideploy -i ${params.integrationNodeHost} -p ${params.integrationNodePort} -e ${params.integrationServerName} -a tea-application-combined.bar\""
+ }
+ }
+
+ }
+ environment {
+ CT_JDBC = credentials('CT_JDBC')
+ }
+}
diff --git a/demo-infrastructure/README-aceaas-pipelines.md b/demo-infrastructure/README-aceaas-pipelines.md
new file mode 100644
index 0000000..047b319
--- /dev/null
+++ b/demo-infrastructure/README-aceaas-pipelines.md
@@ -0,0 +1,59 @@
+# ACEaaS pipelines and configurations
+
+ACE-as-a-Service is built on the Cloud Pak for Integration (CP4i) and uses the same
+set of artifacts to run ACE flows, so the pipelines will create the same set of
+configurations to run the Tea REST application. Details on the various configuration
+types can be found [in the ACEaaS docs](https://www.ibm.com/docs/en/app-connect/containers_cd?topic=dashboard-configuration-types-integration-servers-integration-runtimes)
+but the key artifacts built for this application are as follows:
+
+- A BAR file containing the Tea REST application, associated shared libraries, JDBC
+ driver, and any other code needed for successful operation. ACEaaS uses the same
+ BAR format as integration nodes; there is only one ACE BAR format.
+- A set of configurations for the various credentials and service locations:
+ - [policy project](https://www.ibm.com/docs/en/app-connect/containers_cd?topic=runtimes-policy-project-type)
+ for the JDBCProviders policy used for database access.
+ - [setdbparms.txt](https://www.ibm.com/docs/en/app-connect/containers_cd?topic=runtimes-setdbparmstxt-type)
+ configuration for the JDBC username/password.
+ - [server.conf.yaml](https://www.ibm.com/docs/en/app-connect/containers_cd?topic=runtimes-serverconfyaml-type)
+ configuration to specify the JDBC policy project as the default for the server.
+- An [Integration Runtime](https://www.ibm.com/docs/en/app-connect/containers_cd?topic=resources-integration-runtime-reference)
+ that references the BAR file and various configurations; this artifacts causes ACEaaS to
+ start an IntegrationServer in a container.
+
+![ACEaaS pipelines](/demo-infrastructure/images/aceaas-pipeline-overview.png)
+
+## ACEaaS API
+
+The pipelines will call the [ACE-as-a-Service API](https://www.ibm.com/docs/en/app-connect/saas?topic=information-api-overview)
+to create and deploy the various artifacts, and require credentials to be able to do so.
+These can be created using the ACEaaS console and provided to the pipeline as credentials:
+
+- Endpoint URL (similar to `api.p-vir-c1.appconnect.automation.ibm.com`)
+- Instance identifier (similar to `2vkpa0udw`)
+- Client ID and client secret, created from the "Public API credentials" section of the ACEaaS dashboard
+ (see URL of the form https://2vkpa0udw-dashboard.p-vir-c1.appconnect.automation.ibm.com/settings?tab=credentialsTab)
+- API key created from the ACEaaS dashboard (see URL of the form https://2vkpa0udw-dashboard.p-vir-c1.appconnect.automation.ibm.com/management/apikeys)
+
+See [https://www.ibm.com/docs/en/app-connect/saas?topic=overview-accessing-api](https://www.ibm.com/docs/en/app-connect/saas?topic=overview-accessing-api) for
+further instructions on how to create the correct credentials.
+
+## ACEaaS API rate limits
+
+The API has a limit of 100 calls per hour (at the time of writing) which could be exhausted
+quickly if the configurations are recreated every time, and so the configuration creation is
+normally only run when needed (such as the first time the application is being deployed).
+
+Note that the API call to acquire a token counts as part of the limit, so each application
+deployment uses two calls (one for the token and the other to deploy the BAR file). It might
+be possible to securely store the token between pipeline runs in some scenarios, but care
+must be taken if this is attempted: the token should be kept secret, and ideally stored in a
+secure vault, but the tokens also expire every 12 hours and so need to be refreshed.
+
+The examples in this repo acquire a new token every time to avoid having to solve this issue,
+using pipeline-provided secure storage (Kubernetes secrets, Jenkins credentials, etc) to
+ensure the API keys are kept securely.
+
+The API rate limit also prevents polling for the application to (re)start successfully, so the
+pipelines complete after updating the BAR file (or after creating the initial configuration)
+rather than completing when the application is actually running with the built artifacts.
+
diff --git a/demo-infrastructure/README-jenkins.md b/demo-infrastructure/README-jenkins.md
index 48da7fb..0ed3e87 100644
--- a/demo-infrastructure/README-jenkins.md
+++ b/demo-infrastructure/README-jenkins.md
@@ -1,9 +1,10 @@
# Jenkins pipeline
-Used to run the pipeline stages via Jenkins. Relies on an existing integration node being available with the JDBC
-credentials having been set up, and will run on Windows or via docker on Unix platforms.
+Used to run the pipeline stages via Jenkins. Relies either on an existing integration node being available
+with the JDBC credentials having been set up or else on an ACE-as-a-Service (ACEaaS) instance being
+available, and will run on Windows (integration node only) or via docker on Unix platforms to either target.
-![Pipeline overview](ace-nodes-testing-jenkins.png)
+![Pipelines overview](/demo-infrastructure/images/jenkins-pipelines-overview.jpg)
## Running Jenkins
@@ -21,26 +22,50 @@ or Jenkinsfile.windows depending on which platform is used:
- databaseName
- serverName
- portNumber
-- integrationNodeHost
-- integrationNodePort
-- integrationServerName
+
+These values should be set from the DB2 on Cloud credentials (see [cloud-resources.md](cloud-resources.md))
+so that the component tests can run successfully. This is required regardless of the pipeline
+deploy target (integration node or ACEaaS).
For Windows, the ACE_COMMAND environment variable may need to be changed to match a locally-installed
version of ACE (currently set to 12.0.10). Container support is not required.
-For Linux, the ACE build container image may need to be created first. The use of a container
-to run ACE commands ensures that the Jenkins environment (for example, Java level) does not
-affect ACE commands, and ensures a consistent environment for building ACE artifacts.
-See the [ace-minimal-build](/demo-infrastructure/docker/ace-minimal-build) directory for
-information on building the image.
+For Linux, the pipeline will use containers for the actual build steps, and this requires either
+the `ace` container image from cp.icr.io or the `ace-minimal-build` container image to be created
+first (for users without an IBM Entitlement Key). The use of a container to run ACE commands ensures
+that the Jenkins environment (for example, Java level) does not affect ACE commands, and ensures
+a consistent environment for building ACE artifacts. See the [ace-minimal-build](/demo-infrastructure/docker/ace-minimal-build)
+directory for information on building the image, or [Obtaining an IBM App Connect Enterprise
+server image](https://www.ibm.com/docs/en/app-connect/12.0?topic=cacerid-building-sample-app-connect-enterprise-image-using-docker#aceimages__title__1)
+to download the `ace` image. The Jenkinsfile will need to be updated to use the correct image.
-Once those values have been updated and containers built if needed, then the pipeline can be
-constructed, but it may be a good idea to change "GitHub API usage" under "Configure System"
-in the Jenkins settings as otherwise messages such as the following may appear regularly:
+The ACE version does not have to match the exact modification level (12.0.X) of the deploy target
+(integration node or ACEaaS) but keeping build containers up-to-date is a good idea in general in
+order to benefit from fixes and new capabilities.
+
+Once the Jenkinsfile values have been updated and containers built if needed, then the pipeline
+can be constructed, but it may be a good idea to change "GitHub API usage" under "System
+Configuration" -> "System" in the Jenkins settings as otherwise messages such as the
+following may appear regularly:
```
17:07:37 Jenkins-Imposed API Limiter: Current quota for Github API usage has 52 remaining (1 over budget). Next quota of 60 in 58 min. Sleeping for 4 min 9 sec.
17:07:37 Jenkins is attempting to evenly distribute GitHub API requests. To configure a different rate limiting strategy, such as having Jenkins restrict GitHub API requests only when near or above the GitHub rate limit, go to "GitHub API usage" under "Configure System" in the Jenkins settings.
```
+Adding GitHub credentials will also fix these errors, as it avoids (heavily-restricted) anonymous access.
+
+The procedure beyond this point differs somewhat depending on the target, as the deployment
+configuration for local integration nodes and ACEaaS require different values.
+
+## Local integration node target
+
+![Pipeline overview](/demo-infrastructure/images/jenkins-pipeline.png)
+
+The following values should be changed in either Jenkinsfile or Jenkinsfile.windows
+depending on which platform is used:
+
+- integrationNodeHost
+- integrationNodePort
+- integrationServerName
To create the pipeline (and following the Jenkins pipeline tour instructions), a "multibranch
pipeline" should be created and pointed at the github repo. For Windows users, the pipeline
@@ -96,7 +121,7 @@ and the policy should look like
```
and be deployed to the default policy project for the integration server specified above.
-## Running the pipeline and validating the results
+### Running the pipeline and validating the results
Assuming the pipeline parameters have been modified in the Jenkinsfile, the pipeline can be run
using "Build with Parameters" on the desired branch. This should start the pipeline, which will
@@ -126,6 +151,63 @@ To add tea to the index, curl can be used:
```
curl -X POST --data '{"name": "Assam", "strength": 5}' http://localhost:7800/tea/index
```
+
+## ACE-as-a-Service target
+
+See [README-aceaas-pipelines.md](README-aceaas-pipelines.md) for a general overview. The
+Jenkins pipeline for ACEaaS looks as follows, with the (optional) "Create Configurations and IR"
+shown as running only for the initial build:
+
+![Pipeline overview](/demo-infrastructure/images/jenkins-aceaas-pipeline.png)
+
+Similar to the integration node pipeline, the following values should be changed in
+[Jenkinsfile.aceaas](/demo-infrastructure/Jenkinsfile.aceaas) or set when running the build:
+
+- deployPrefix, which is used as a prefix for the various configurations to avoid conflicts on shared services.
+- APPCON_ENDPOINT, which is the API endpoint.
+- DEPLOY_CONFIGURATION, which enables the "Create Configurations and IR" stage and defaults to `false` but should be set to `true` for the initial configuration creation.
+
+To create the pipeline (and following the Jenkins pipeline tour instructions), a "multibranch
+pipeline" should be created and pointed at the github repo. This pipeline must refer to
+`demo-infrastructure/Jenkinsfile.aceaas` and not the default `Jenkinsfile`.
+
+Once the pipeline has been created and branches configured, the `CT_JDBC` credentials should
+be created as described in the integration node section above, and additional credentials are
+needed for ACEaaS. See [https://www.ibm.com/docs/en/app-connect/saas?topic=overview-accessing-api](https://www.ibm.com/docs/en/app-connect/saas?topic=overview-accessing-api)
+for details on how to create the correct credentials, and then set the following as "secret text" values:
+
+- APPCON_INSTANCE_ID is the instance identifier (similar to `2vkpa0udw`)
+- APPCON_CLIENT_ID is the client ID created from the "Public API credentials" section of the ACEaaS dashboard
+- APPCON_CLIENT_SECRET is the client secret created from the "Public API credentials" section of the ACEaaS dashboard
+- APPCON_API_KEY is the API key created from the ACEaaS dashboard
+
+The pipeline could be changed to store APPCON_ENDPOINT as a credential as well (similar to the
+Tekton equivalent), but the URL is not secret so in the default case it is provided in the Jenkinsfile.
+
+The pipeline should create the required configurations based on the JDBC credentials
+and other values if the DEPLOY_CONFIGURATION is set to `true`; this should only be used
+for the first pipeline run or after any change to the credentials (see the "ACEaaS API rate
+limits" section of [README-aceaas-pipelines.md](README-aceaas-pipelines.md) for more information).
+
+### Running the pipeline and validating the results
+
+Assuming the pipeline parameters have been modified in demo-infrastructure/Jenkinsfile.aceaas, the
+pipeline can be run using "Build with Parameters" on the desired branch. This should start the
+pipeline, which will then pull the source down, compile and test it, and then deploy it to ACEaaS.
+
+Once the pipeline has completed and the integration runtime has started, the application can be
+tested by using a browser or curl to access the application API endpoint. The endpoint can be
+found from the ACEaaS UI by examining the deployed REST API as shown:
+
+![aceaas-rest-api-endpoint.png](aceaas-rest-api-endpoint.png)
+
+The endpoint should be of the form `https://tdolby-tea-jenkins-ir-https-ac2vkpa0udw.p-vir-d1.appconnect.ibmappdomain.cloud/tea`
+and (similar to the integration node example above) curl can be used to retrieve or add data.
+```
+C:\>curl https://tdolby-tea-jenkins-ir-https-ac2vkpa0udw.p-vir-d1.appconnect.ibmappdomain.cloud/tea/index/1
+{"name":"Assam","id":"1"}
+```
+
## Common errors
Incorrect credentials in Jenkins can cause component test failures while leaving the unit
diff --git a/demo-infrastructure/aceaas-rest-api-endpoint.png b/demo-infrastructure/aceaas-rest-api-endpoint.png
new file mode 100644
index 0000000..b035fcd
Binary files /dev/null and b/demo-infrastructure/aceaas-rest-api-endpoint.png differ
diff --git a/demo-infrastructure/cloud-resources.md b/demo-infrastructure/cloud-resources.md
index 49d5363..ecddfb8 100644
--- a/demo-infrastructure/cloud-resources.md
+++ b/demo-infrastructure/cloud-resources.md
@@ -1,51 +1,69 @@
-# Cloud resources for pipeline use (in progress)
+# Cloud resources for pipeline use (changed from previous iterations of the pipeline)
-Need an IBM ID and then cloud registration at https://cloud.ibm.com/registration
+The IBM Kubernetes Service no longer offers the "free tier" that was used in prior versions
+of this repo, and so the free options available are now [Minikube](https://minikube.sigs.k8s.io/docs/)
+(free to install locally) and [RedHat Single-Node OpenShift](https://www.redhat.com/en/blog/meet-single-node-openshift-our-smallest-openshift-footprint-edge-architectures)
+(45-day trial installation). Both of these have been tested (including CP4i on OpenShift) but
+the Tekton pipeline is generic enough that it should work with other Kubernetes providers with
+only minimal modifications.
-The IBM Cloud tools should be available; installing them locally is one way to achieve this, following
-the instructions at https://cloud.ibm.com/docs/cli?topic=cli-getting-started to install the ibmcloud
-command and plugins.
+## Tekton interactions
-Tekton can be run from a dashboard or from the command line; the command is available from https://github.com/tektoncd/cli and
-can be installed locally.
+For plain Kubernetes users, Tekton can be run from a dashboard or from the command line;
+the command is available from https://github.com/tektoncd/cli and can be installed locally.
+OpenShift users should install the Red Hat OpenShift Pipelines operator, as this includes
+Tekton and provides an integrated pipeline UI.
-## API keys for command-line and Tekton builds
+For Tekton dashboard users, the [Tekton dashboard docs](https://tekton.dev/docs/dashboard/install/#using-kubectl-port-forward)
+describe a port-forwarding way to access the dahsboard from outside the cluster, which may
+be helpful.
-Need to create an API key: from the "Manage" menu at the top of the IBM Cloud dashboard, choose "Access (IAM)" and
-then "API keys" on the left. This key is used for login from local commands, Tekton builds, and container image
-registry access.
+## Docker registry
+
+The IBM Cloud container registry does still have a free tier, but pull bandwidth is limited to
+5GB a month per region so this may not be a good option for clusters running outside IBM Cloud.
-## Kubernetes
+Both Minikube and OpenShift can run container registries within the cluster, and this is
+likely to be the best way to run simple pipeline experiments. The `registry` addon for Minikube
+(plus some additional configuration to enable insecure access) can be enabled during cluster
+creation, and the OpenShift container registry can be enabled for single-node clusters by
+following [https://docs.openshift.com/container-platform/4.14/registry/configuring_registry_storage/configuring-registry-storage-baremetal.html](https://docs.openshift.com/container-platform/4.14/registry/configuring_registry_storage/configuring-registry-storage-baremetal.html)
+if it is not already enabled.
-Create a cluster called aceCluster by using the IBM Cloud dashboard: select "Kubernetes" on the left-side navigation
-bar (accessible via the hamburger icon at the top left), and then select "Create cluster" on the resulting Kubernetes
-screen.
+External registries can also be used, including Docker hub and RedHat quay.io. Docker hub
+has stricter rate limits at the time of writing, and repeated pipeline runs could hit those
+limits in some cases. Use of a local registry is advised if possible, and if the pipeline
+is run in a non-IBM cloud then using a registry associated with that cloud would be best.
-The cluster may take a few minutes to provision. After the cluster has been created, then it should be possible
-to login to ibmcloud and then access the cluster:
+Note that the minikube registry does not have security enabled by default, and so there is
+no username/password combination to put in a `docker-registry` secret; dummy values can be
+used instead to populate the (required) "regcred" secret:
```
-ibmcloud login -a cloud.ibm.com -r us-south --apikey
-ibmcloud ks cluster config --cluster aceCluster
+kubectl create secret docker-registry regcred --docker-server=us.icr.io --docker-username=dummy --docker-password=dummy
```
-## Docker registry
-
-Create a registry (under "Container Registry" on the IBM Cloud dashboard), then create a namespace
-with a unique name to store the images used in the demo. This demo has "us.icr.io/ace-containers" set as the
-default which means that "ace-containers" is already in use and another name must be chosen.
-
-The various pipeline-run files in the tekton directories (ace-pipeline-run.yaml,
-minimal-image-build/ace-minimal-image-pipeline-run.yaml, etc) need to be updated with the registry information,
-otherwise permissions-related errors will occur.
-
-To enable pipeline access to the registry, assign the API key (see above) as a "secret text" credential in
-Tekton called "regcred" for use in pushing and pulling container images:
+For other registries, the credentials should be the same ones that would be used when running
+`docker login`. For single-node OpenShift out-of-the-box, this can mean using the temporary
+admin credentials via `oc whoami -t`:
```
-kubectl create secret docker-registry regcred --docker-server=us.icr.io --docker-username=iamapikey --docker-password=
+kubectl create secret docker-registry regcred --docker-server=image-registry.openshift-image-registry.svc.cluster.local:5000 --docker-username=kubeadmin --docker-password=$(oc whoami -t)
```
+For CP4i users, or plain Kubernetes users running the plain `ace` image
+(see [ACE containers: choosing a base image](https://community.ibm.com/community/user/integration/blogs/trevor-dolby/2024/02/05/ace-containers-choosing-a-base-image)
+for more information on the difference), an IBM Entitlement Key will be needed in order to access
+images from cp.icr.io; see [https://myibm.ibm.com/products-services/containerlibrary](https://myibm.ibm.com/products-services/containerlibrary)
+to access the key.
+
+Note that the `ace-minimal` image can be built from the (free) ACE installation package without
+requiring an IBM Entitlement Key; see [https://github.com/ot4i/ace-docker/tree/main/experimental](https://github.com/ot4i/ace-docker/tree/main/experimental)
+for the various images that can be built using Docker commands, and the [minimal-image-build README](/tekton/minimal-image-build/README.md)
+for details on how to build it using Tekton.
+
## DB2 on Cloud
+To access the IBM cloud, an IBM ID is required and then cloud registration at https://cloud.ibm.com/registration
+
Create a DB2 instance via "Create resource" on the IBM Cloud dashboard; create credentials and add them to the Kubernetes cluster as "jdbc-secret" like this:
```
kubectl create secret generic jdbc-secret --from-literal=USERID='blah' --from-literal=PASSWORD='blah' --from-literal=databaseName='BLUDB' --from-literal=serverName='19af6446-6171-4641-8aba-9dcff8e1b6ff.c1ogj3sd0tgtu0lqde00.databases.appdomain.cloud' --from-literal=portNumber='30699'
diff --git a/demo-infrastructure/docker/README.md b/demo-infrastructure/docker/README.md
index 34293f1..e3f0a97 100644
--- a/demo-infrastructure/docker/README.md
+++ b/demo-infrastructure/docker/README.md
@@ -1,6 +1,6 @@
# Docker images for build pipelines to use
-Building on top of ace-minimal (see https://github.com/trevor-dolby-at-ibm-com/ace-docker/tree/master/experimental/ace-minimal
+Building on top of ace-minimal (see https://github.com/ot4i/ace-docker/tree/master/experimental/ace-minimal
for more information) to create a builder image that contains build-time code that should not be pushed into the final application.
- ace-minimal-build is used for running the Tekton pipeline for the demo application build and also for running Jenkins
diff --git a/demo-infrastructure/docker/ace-minimal-build/Dockerfile b/demo-infrastructure/docker/ace-minimal-build/Dockerfile
index 8f06d75..ad8e6e1 100644
--- a/demo-infrastructure/docker/ace-minimal-build/Dockerfile
+++ b/demo-infrastructure/docker/ace-minimal-build/Dockerfile
@@ -25,6 +25,8 @@ RUN cd /tmp && \
rm /tmp/apache-maven-3.8.4-bin.tar.gz && \
ln -s /opt/apache-maven-3.8.4/bin/mvn /usr/local/bin/mvn
+RUN echo "Adding curl for Alpine - may fail if the base is Ubuntu" ; apk add curl || /bin/true
+
ENV TZ=Europe/London
WORKDIR /
diff --git a/demo-infrastructure/docker/ace-minimal-build/Dockerfile.ace b/demo-infrastructure/docker/ace-minimal-build/Dockerfile.ace
new file mode 100644
index 0000000..ed60a11
--- /dev/null
+++ b/demo-infrastructure/docker/ace-minimal-build/Dockerfile.ace
@@ -0,0 +1,25 @@
+# Copyright (c) 2022 Open Technologies for Integration
+# Licensed under the MIT license (see LICENSE for details)
+ARG BASE_IMAGE=cp.icr.io/cp/appc/ace:12.0.11.0-r1
+FROM $BASE_IMAGE
+
+#
+# This image is run by Tekton in a build container to build the application and tests.
+#
+# Starting from ace-minimal (which has the ACE install plus the aceuser created),
+# this image contains only the parts needed for the build and unit test phase.
+#
+
+LABEL "maintainer"="trevor.dolby@ibm.com"
+USER root
+
+RUN microdnf -y update && microdnf -y install procps
+
+# Patch Jenkins issues
+RUN chmod -R ugo+rwx /var/mqsi
+
+USER 1001
+
+# Default command to run
+ENTRYPOINT []
+CMD ["/bin/bash"]
diff --git a/demo-infrastructure/docker/ace-minimal-build/README.md b/demo-infrastructure/docker/ace-minimal-build/README.md
index e1ae2b2..f6effd8 100644
--- a/demo-infrastructure/docker/ace-minimal-build/README.md
+++ b/demo-infrastructure/docker/ace-minimal-build/README.md
@@ -3,7 +3,7 @@
Used by the pipeline in this repo to run the ACE commands within a CI or other pipeline build.
Built on top of ace-minimal:12.0.10.0-alpine (in a registry of your choice and built from
-https://github.com/trevor-dolby-at-ibm-com/ace-docker/tree/master/experimental/ace-minimal)
+https://github.com/ot4i/ace-docker/tree/master/experimental/ace-minimal)
but will be pushed to the same registry via the Tekton pipelines in tekton/minimal-image-build
in this repo if using Tekton.
diff --git a/demo-infrastructure/images/aceaas-pipeline-overview.drawio b/demo-infrastructure/images/aceaas-pipeline-overview.drawio
new file mode 100644
index 0000000..89443d5
--- /dev/null
+++ b/demo-infrastructure/images/aceaas-pipeline-overview.drawio
@@ -0,0 +1,137 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/demo-infrastructure/images/aceaas-pipeline-overview.jpg b/demo-infrastructure/images/aceaas-pipeline-overview.jpg
new file mode 100644
index 0000000..d20afcb
Binary files /dev/null and b/demo-infrastructure/images/aceaas-pipeline-overview.jpg differ
diff --git a/demo-infrastructure/images/aceaas-pipeline-overview.png b/demo-infrastructure/images/aceaas-pipeline-overview.png
new file mode 100644
index 0000000..d948ee7
Binary files /dev/null and b/demo-infrastructure/images/aceaas-pipeline-overview.png differ
diff --git a/demo-infrastructure/images/jenkins-aceaas-pipeline.drawio b/demo-infrastructure/images/jenkins-aceaas-pipeline.drawio
new file mode 100644
index 0000000..bd259fc
--- /dev/null
+++ b/demo-infrastructure/images/jenkins-aceaas-pipeline.drawio
@@ -0,0 +1,37 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/demo-infrastructure/images/jenkins-aceaas-pipeline.png b/demo-infrastructure/images/jenkins-aceaas-pipeline.png
new file mode 100644
index 0000000..df7c832
Binary files /dev/null and b/demo-infrastructure/images/jenkins-aceaas-pipeline.png differ
diff --git a/demo-infrastructure/images/jenkins-pipeline.drawio b/demo-infrastructure/images/jenkins-pipeline.drawio
new file mode 100644
index 0000000..9ca37b6
--- /dev/null
+++ b/demo-infrastructure/images/jenkins-pipeline.drawio
@@ -0,0 +1,37 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/demo-infrastructure/images/jenkins-pipeline.png b/demo-infrastructure/images/jenkins-pipeline.png
new file mode 100644
index 0000000..0e39c7c
Binary files /dev/null and b/demo-infrastructure/images/jenkins-pipeline.png differ
diff --git a/demo-infrastructure/images/jenkins-pipelines-overview.drawio b/demo-infrastructure/images/jenkins-pipelines-overview.drawio
new file mode 100644
index 0000000..a078113
--- /dev/null
+++ b/demo-infrastructure/images/jenkins-pipelines-overview.drawio
@@ -0,0 +1,67 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/demo-infrastructure/images/jenkins-pipelines-overview.jpg b/demo-infrastructure/images/jenkins-pipelines-overview.jpg
new file mode 100644
index 0000000..0baaf07
Binary files /dev/null and b/demo-infrastructure/images/jenkins-pipelines-overview.jpg differ
diff --git a/demo-infrastructure/images/pipeline-high-level.drawio b/demo-infrastructure/images/pipeline-high-level.drawio
new file mode 100644
index 0000000..db29294
--- /dev/null
+++ b/demo-infrastructure/images/pipeline-high-level.drawio
@@ -0,0 +1,52 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/demo-infrastructure/images/pipeline-high-level.jpg b/demo-infrastructure/images/pipeline-high-level.jpg
new file mode 100644
index 0000000..95ae2b2
Binary files /dev/null and b/demo-infrastructure/images/pipeline-high-level.jpg differ
diff --git a/demo-infrastructure/images/pipeline-high-level.png b/demo-infrastructure/images/pipeline-high-level.png
new file mode 100644
index 0000000..baed3d4
Binary files /dev/null and b/demo-infrastructure/images/pipeline-high-level.png differ
diff --git a/demo-infrastructure/images/pipelines-overview.drawio b/demo-infrastructure/images/pipelines-overview.drawio
new file mode 100644
index 0000000..3560c83
--- /dev/null
+++ b/demo-infrastructure/images/pipelines-overview.drawio
@@ -0,0 +1,159 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/demo-infrastructure/images/pipelines-overview.jpg b/demo-infrastructure/images/pipelines-overview.jpg
new file mode 100644
index 0000000..25e1a0d
Binary files /dev/null and b/demo-infrastructure/images/pipelines-overview.jpg differ
diff --git a/demo-infrastructure/images/pipelines-overview.png b/demo-infrastructure/images/pipelines-overview.png
new file mode 100644
index 0000000..575239e
Binary files /dev/null and b/demo-infrastructure/images/pipelines-overview.png differ
diff --git a/demo-infrastructure/images/tekton-aceaas-pipeline.drawio b/demo-infrastructure/images/tekton-aceaas-pipeline.drawio
new file mode 100644
index 0000000..b4e640b
--- /dev/null
+++ b/demo-infrastructure/images/tekton-aceaas-pipeline.drawio
@@ -0,0 +1,126 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/demo-infrastructure/images/tekton-aceaas-pipeline.jpg b/demo-infrastructure/images/tekton-aceaas-pipeline.jpg
new file mode 100644
index 0000000..109f371
Binary files /dev/null and b/demo-infrastructure/images/tekton-aceaas-pipeline.jpg differ
diff --git a/demo-infrastructure/images/tekton-aceaas-pipeline.png b/demo-infrastructure/images/tekton-aceaas-pipeline.png
new file mode 100644
index 0000000..aebd1b3
Binary files /dev/null and b/demo-infrastructure/images/tekton-aceaas-pipeline.png differ
diff --git a/demo-infrastructure/images/tekton-cp4i-pipeline.drawio b/demo-infrastructure/images/tekton-cp4i-pipeline.drawio
new file mode 100644
index 0000000..d97f598
--- /dev/null
+++ b/demo-infrastructure/images/tekton-cp4i-pipeline.drawio
@@ -0,0 +1,211 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/demo-infrastructure/images/tekton-cp4i-pipeline.jpg b/demo-infrastructure/images/tekton-cp4i-pipeline.jpg
new file mode 100644
index 0000000..b16af2a
Binary files /dev/null and b/demo-infrastructure/images/tekton-cp4i-pipeline.jpg differ
diff --git a/demo-infrastructure/images/tekton-cp4i-pipeline.png b/demo-infrastructure/images/tekton-cp4i-pipeline.png
new file mode 100644
index 0000000..ad7770e
Binary files /dev/null and b/demo-infrastructure/images/tekton-cp4i-pipeline.png differ
diff --git a/demo-infrastructure/images/tekton-dashboard.png b/demo-infrastructure/images/tekton-dashboard.png
new file mode 100644
index 0000000..dd6b1de
Binary files /dev/null and b/demo-infrastructure/images/tekton-dashboard.png differ
diff --git a/demo-infrastructure/images/tekton-pipeline.drawio b/demo-infrastructure/images/tekton-pipeline.drawio
new file mode 100644
index 0000000..ae3b9dc
--- /dev/null
+++ b/demo-infrastructure/images/tekton-pipeline.drawio
@@ -0,0 +1,153 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/demo-infrastructure/images/tekton-pipeline.png b/demo-infrastructure/images/tekton-pipeline.png
new file mode 100644
index 0000000..55ec4cd
Binary files /dev/null and b/demo-infrastructure/images/tekton-pipeline.png differ
diff --git a/demo-infrastructure/images/tekton-pipelines-overview.drawio b/demo-infrastructure/images/tekton-pipelines-overview.drawio
new file mode 100644
index 0000000..b271a12
--- /dev/null
+++ b/demo-infrastructure/images/tekton-pipelines-overview.drawio
@@ -0,0 +1,84 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/demo-infrastructure/images/tekton-pipelines-overview.jpg b/demo-infrastructure/images/tekton-pipelines-overview.jpg
new file mode 100644
index 0000000..bc098aa
Binary files /dev/null and b/demo-infrastructure/images/tekton-pipelines-overview.jpg differ
diff --git a/demo-infrastructure/images/tekton-pipelines-overview.png b/demo-infrastructure/images/tekton-pipelines-overview.png
new file mode 100644
index 0000000..1f78942
Binary files /dev/null and b/demo-infrastructure/images/tekton-pipelines-overview.png differ
diff --git a/demo-infrastructure/init-creds.sh b/demo-infrastructure/init-creds.sh
index f7ad240..63dfd62 100755
--- a/demo-infrastructure/init-creds.sh
+++ b/demo-infrastructure/init-creds.sh
@@ -8,8 +8,8 @@ echo "Pulling in secrets"
set +x
-mkdir /home/aceuser/ace-server/run/PreProdPolicies
-echo '' > /home/aceuser/ace-server/run/PreProdPolicies/policy.descriptor
+mkdir /home/aceuser/ace-server/run/JDBCPolicies
+echo '' > /home/aceuser/ace-server/run/JDBCPolicies/policy.descriptor
export TEMPLATE_POLICYXML=/tmp/TEAJDBC.policyxml
@@ -31,7 +31,7 @@ then
echo "policy ${TEMPLATE_POLICYXML} after"
cat ${TEMPLATE_POLICYXML}
- cp ${TEMPLATE_POLICYXML} /home/aceuser/ace-server/run/PreProdPolicies/
+ cp ${TEMPLATE_POLICYXML} /home/aceuser/ace-server/run/JDBCPolicies/
echo "---" >> /home/aceuser/ace-server/overrides/server.conf.yaml
echo "Credentials:" >> /home/aceuser/ace-server/overrides/server.conf.yaml
@@ -54,7 +54,7 @@ then
echo "policy ${TEMPLATE_POLICYXML} after"
cat ${TEMPLATE_POLICYXML}
- cp ${TEMPLATE_POLICYXML} /home/aceuser/ace-server/run/PreProdPolicies/
+ cp ${TEMPLATE_POLICYXML} /home/aceuser/ace-server/run/JDBCPolicies/
echo "---" >> /home/aceuser/ace-server/overrides/server.conf.yaml
echo "Credentials:" >> /home/aceuser/ace-server/overrides/server.conf.yaml
@@ -78,12 +78,12 @@ then
echo "policy ${TEMPLATE_POLICYXML} after"
cat ${TEMPLATE_POLICYXML}
- cp ${TEMPLATE_POLICYXML} /home/aceuser/ace-server/run/PreProdPolicies/
+ cp ${TEMPLATE_POLICYXML} /home/aceuser/ace-server/run/JDBCPolicies/
mqsisetdbparms -w /home/aceuser/ace-server -n jdbc::tea -u `cat /work/jdbc/USERID` -p `cat /work/jdbc/PASSWORD`
fi
-if [[ -e "/home/aceuser/ace-server/run/PreProdPolicies/TEAJDBC.policyxml" ]]
+if [[ -e "/home/aceuser/ace-server/run/JDBCPolicies/TEAJDBC.policyxml" ]]
then
# Already completed
echo "Not reading kube secrets"
@@ -97,10 +97,10 @@ else
echo "policy ${TEMPLATE_POLICYXML} after"
cat ${TEMPLATE_POLICYXML}
- cp ${TEMPLATE_POLICYXML} /home/aceuser/ace-server/run/PreProdPolicies/
+ cp ${TEMPLATE_POLICYXML} /home/aceuser/ace-server/run/JDBCPolicies/
mqsisetdbparms -w /home/aceuser/ace-server -n jdbc::tea -u `cat /run/secrets/jdbc/USERID` -p `cat /run/secrets/jdbc/PASSWORD`
fi
-sed -i "s/#policyProject: 'DefaultPolicies'/policyProject: 'PreProdPolicies'/g" /home/aceuser/ace-server/server.conf.yaml
+sed -i "s/#policyProject: 'DefaultPolicies'/policyProject: 'JDBCPolicies'/g" /home/aceuser/ace-server/server.conf.yaml
diff --git a/demo-infrastructure/windows-containers/README.md b/demo-infrastructure/windows-containers/README.md
index 6753da1..96b0113 100644
--- a/demo-infrastructure/windows-containers/README.md
+++ b/demo-infrastructure/windows-containers/README.md
@@ -13,13 +13,13 @@ mount the Jenkins build workspace from the host:
## Building the ACE Jenkins container
-The [ace-docker](https://github.com/trevor-dolby-at-ibm-com/ace-docker/tree/main/experimental/windows)
-repo contains an [ace-basic](https://github.com/trevor-dolby-at-ibm-com/ace-docker/tree/main/experimental/windows/ace-basic)
+The [ace-docker](https://github.com/ot4i/ace-docker/tree/main/experimental/windows)
+repo contains an [ace-basic](https://github.com/ot4i/ace-docker/tree/main/experimental/windows/ace-basic)
directory that can be used to build a Jenkins agent container. See the ace-basic link for details
on the image itself, and how it is constructed.
The repo must be cloned locally, possibly configured with a download URL (see
-[ace-basic](https://github.com/trevor-dolby-at-ibm-com/ace-docker/tree/main/experimental/windows/ace-basic)),
+[ace-basic](https://github.com/ot4i/ace-docker/tree/main/experimental/windows/ace-basic)),
and then the following command should be run in the experimental/windows/ace-basic directory
```
docker build --build-arg FROMIMAGE=jenkins/agent:windowsservercore-ltsc2019 -t ace-jenkins:12.0.10.0-windows .
diff --git a/serverless/tea-tekton-knative-service.yaml b/serverless/tea-tekton-knative-service.yaml
index 5f1b5e4..9540617 100644
--- a/serverless/tea-tekton-knative-service.yaml
+++ b/serverless/tea-tekton-knative-service.yaml
@@ -13,7 +13,7 @@ spec:
- name: regcred
containers:
- name: tea-tekton-knative
- image: DOCKER_REGISTRY/tea-tekton:latest
+ image: DOCKER_REGISTRY/tea-tekton:IMAGE_TAG
ports:
- containerPort: 7800
volumeMounts:
diff --git a/tekton/10-ibmint-ace-build-task.yaml b/tekton/10-ibmint-ace-build-task.yaml
new file mode 100644
index 0000000..5191368
--- /dev/null
+++ b/tekton/10-ibmint-ace-build-task.yaml
@@ -0,0 +1,223 @@
+apiVersion: tekton.dev/v1beta1
+kind: Task
+metadata:
+ name: ace-build
+spec:
+ # The security and environment settings are needed for OpenShift in a non-default
+ # namespace such as cp4i. Kaniko is expecting to be root in the container.
+ stepTemplate:
+ securityContext:
+ runAsUser: 0
+ env:
+ - name: "HOME"
+ value: "/tekton/home"
+ - name: "LICENSE"
+ value: "accept"
+ params:
+ - name: outputRegistry
+ type: string
+ - name: url
+ type: string
+ - name: revision
+ type: string
+ - name: buildImage
+ type: string
+ - name: runtimeBaseImage
+ type: string
+ results:
+ - name: tag
+ description: image tag of the form 20240220135127-6fe9106
+ steps:
+ - name: clone
+ image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
+ script: |
+ #!/bin/sh
+ set -e # Fail on error
+ cd /work
+ git clone -b $(params.revision) $(params.url)
+ cd ace-demo-pipeline
+ export DATE=$(date '+%Y%m%d%H%M%S')
+ export COMMIT=$(git log -1 --pretty=%h)
+ export TAG="$DATE"-"$COMMIT"
+ echo Setting container tag to "$TAG"
+ echo -n "$TAG" > $(results.tag.path)
+
+ # Slightly hacky but works . . .
+ chmod -R 777 /work/ace-demo-pipeline
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: ibmint-build
+ image: $(params.buildImage)
+ #image: cp.icr.io/cp/appc/ace:12.0.11.0-r1
+ #
+ # Runs the build and unit test phases, leaving the results in the work directory
+ # for later steps.
+ #
+ script: |
+ #!/bin/bash
+
+ export LICENSE=accept
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error - this must be done after the profile in case the container has the profile loaded already
+
+ cd /work/ace-demo-pipeline
+ mkdir /work/ibmint-output
+ mqsicreateworkdir /work/ibmint-output/ace-server
+ # Using --compile-maps-and-schemas for 12.0.11 and later . . .
+ ibmint deploy --input-path . --output-work-directory /work/ibmint-output/ace-server --project TeaSharedLibraryJava --project TeaSharedLibrary --project TeaRESTApplication --compile-maps-and-schemas
+ ibmint optimize server --work-dir /work/ibmint-output/ace-server --disable NodeJS
+
+ # Copy the contents of the work directory into a new unit-test-specific work directory
+ # This avoids the risk of unit tests files being deployed in the real containers, and
+ # is quicker than building the application again
+ mqsicreateworkdir /work/ut-work-dir
+ (cd /work/ibmint-output/ace-server && tar -cf - * ) | (cd /work/ut-work-dir && tar -xf - )
+ # Build just the unit tests
+ ibmint deploy --input-path . --output-work-directory /work/ut-work-dir --project TeaRESTApplication_UnitTest
+
+ # Run the unit tests
+ IntegrationServer -w /work/ut-work-dir --no-nodejs --start-msgflows false --test-project TeaRESTApplication_UnitTest
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: component-test
+ image: $(params.buildImage)
+ #image: cp.icr.io/cp/appc/ace:12.0.11.0-r1
+ #
+ # Builds and runs the component tests using the JDBC credentials provided from
+ # the secret. Also uses the same init-creds.sh script used by the non-CP4i image
+ # to load credentials at startup.
+ #
+ # Leaves the resulting component test project in the work directory to be picked
+ # up by the second Kaniko build in the next step.
+ #
+ script: |
+ #!/bin/bash
+
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error
+
+ export PATH=/opt/ibm/ace-12/common/jdk/bin:$PATH
+ # Slightly hacky, but quicker than building everything again!
+ (cd /work/ibmint-output/ace-server && tar -cf - * ) | (cd /home/aceuser/ace-server && tar -xf - )
+ ls -l /home/aceuser/ace-server
+ # Set up credentials for the component tests; init-creds.sh looks in /tmp for policy
+ cp /work/ace-demo-pipeline/demo-infrastructure/TEAJDBC.policyxml /tmp/
+ bash /work/ace-demo-pipeline/demo-infrastructure/init-creds.sh
+ # Build and run the tests
+ cd /work/ace-demo-pipeline
+
+ # Build just the component tests
+ ibmint deploy --input-path . --output-work-directory /home/aceuser/ace-server --project TeaRESTApplication_ComponentTest
+
+ # Run the component tests
+ IntegrationServer -w /home/aceuser/ace-server --no-nodejs --start-msgflows false --test-project TeaRESTApplication_ComponentTest
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: secret-volume-2
+ mountPath: /var/run/secrets/jdbc
+ - name: next-stage-container-setup
+ image: $(params.buildImage)
+ script: |
+ #!/bin/bash
+
+ set -e # Fail on error
+
+ cd /work/ibmint-output
+ cp /work/ace-demo-pipeline/tekton/Dockerfile Dockerfile
+
+ # Copy in various startup files
+ cp /work/ace-demo-pipeline/demo-infrastructure/TEAJDBC.policyxml ace-server/
+ cp /work/ace-demo-pipeline/demo-infrastructure/application-overrides.txt ace-server/
+ cp /work/ace-demo-pipeline/demo-infrastructure/init-creds.sh ace-server/ace-startup-script.sh
+ cp /work/ace-demo-pipeline/demo-infrastructure/read-hashicorp-creds.sh ace-server/
+ cp /work/ace-demo-pipeline/demo-infrastructure/read-xml-creds.sh ace-server/
+
+ echo Contents of /work/ibmint-output/ace-server/server.components.yaml
+ cat /work/ibmint-output/ace-server/server.components.yaml || /bin/true
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: docker-build-and-push
+ image: quay.io/buildah/stable:v1
+ #image: registry.redhat.io/rhel8/buildah:8.9-5
+ securityContext:
+ runAsUser: 0
+ # Needed for hostPath volumes on OpenShift
+ #privileged: true
+ capabilities:
+ add: ["CHOWN", "DAC_OVERRIDE","FOWNER","SETFCAP","SETGID","SETUID"]
+ # specifying DOCKER_CONFIG is required to allow buildah to detect docker credential
+ env:
+ - name: "DOCKER_CONFIG"
+ value: "/tekton/home/.docker/"
+ script: |
+ date
+ export TAG=`cat $(results.tag.path)`
+ echo Using $TAG as image tag
+ buildah --storage-driver=overlay bud --format=oci --tls-verify=false --no-cache \
+ --build-arg BASE_IMAGE=$(params.runtimeBaseImage) \
+ -f /work/ibmint-output/Dockerfile -t $(params.outputRegistry)/tea-tekton:$TAG /work/ibmint-output
+ date
+ buildah --storage-driver=overlay push --tls-verify=false --digestfile /tmp/image-digest \
+ $(params.outputRegistry)/tea-tekton:$TAG "docker://$(params.outputRegistry)/tea-tekton:$TAG"
+ date
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: varlibcontainers
+ mountPath: /var/lib/containers
+ volumes:
+ - name: work
+ emptyDir: {}
+ - name: secret-volume-2
+ secret:
+ secretName: jdbc-secret
+ #
+ # Default buildah approach using emptyDir; takes about 2 minutes on a test SNO cluster
+ #
+ - name: varlibcontainers
+ emptyDir: {}
+ #
+ # Local directory for this pipeline; takes a few seconds after initial pull
+ #
+ #- name: varlibcontainers
+ # hostPath:
+ # path: '/tmp'
+ # type: Directory
+ #
+ # Sharing the host containers; takes a few seconds
+ #
+ #- name: varlibcontainers
+ # hostPath:
+ # path: '/var/lib/containers'
+ # type: Directory
+ #
+ # Local disk using LVM operator on SNO; same speed as hostPath
+ #
+ #- name: varlibcontainers
+ # persistentVolumeClaim:
+ # claimName: buildah-cache
+ #
+ # NFS mount from same subnet; initial pull took 35 minutes, and
+ # subsequent builds took around 9 minutes.
+ #
+ # May also see messages like
+ #
+ # time="2024-02-15T00:55:11Z" level=error msg="'overlay' is not supported over nfs at \"/var/lib/containers/storage/overlay\""
+ #
+ # or possibly failing with
+ #
+ # time="2024-02-15T20:05:58Z" level=warning msg="Network file system detected as backing store. Enforcing overlay option `force_mask=\"700\"`. Add it to storage.conf to silence this warning"
+ # Error: mount /var/lib/containers/storage/overlay:/var/lib/containers/storage/overlay, flags: 0x1000: permission denied
+ # time="2024-02-15T20:05:58Z" level=warning msg="Network file system detected as backing store. Enforcing overlay option `force_mask=\"700\"`. Add it to storage.conf to silence this warning"
+ # time="2024-02-15T20:05:58Z" level=warning msg="failed to shutdown storage: \"mount /var/lib/containers/storage/overlay:/var/lib/containers/storage/overlay, flags: 0x1000: permission denied\""
+ #
+ # if not running privileged
+ #- name: varlibcontainers
+ # persistentVolumeClaim:
+ # claimName: buildah-cache-nfs
diff --git a/tekton/10-maven-ace-build-task.yaml b/tekton/10-maven-ace-build-task.yaml
index 0d1ba6c..5242e1e 100644
--- a/tekton/10-maven-ace-build-task.yaml
+++ b/tekton/10-maven-ace-build-task.yaml
@@ -1,7 +1,7 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
- name: maven-ace-build
+ name: ace-build
spec:
# The security and environment settings are needed for OpenShift in a non-default
# namespace such as cp4i. Kaniko is expecting to be root in the container.
@@ -11,8 +11,10 @@ spec:
env:
- name: "HOME"
value: "/tekton/home"
+ - name: "LICENSE"
+ value: "accept"
params:
- - name: dockerRegistry
+ - name: outputRegistry
type: string
- name: url
type: string
@@ -20,22 +22,34 @@ spec:
type: string
- name: buildImage
type: string
- - name: runtimeImage
+ - name: runtimeBaseImage
type: string
+ results:
+ - name: tag
+ description: image tag of the form 20240220135127-6fe9106
steps:
- name: clone
image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
script: |
#!/bin/sh
+
+ set -e # Fail on error
cd /work
git clone -b $(params.revision) $(params.url)
- # Slightly hacky but works
+ cd ace-demo-pipeline
+ export DATE=$(date '+%Y%m%d%H%M%S')
+ export COMMIT=$(git log -1 --pretty=%h)
+ export TAG="$DATE"-"$COMMIT"
+ echo Setting container tag to "$TAG"
+ echo -n "$TAG" > $(results.tag.path)
+
+ # Slightly hacky but works . . .
chmod -R 777 /work/ace-demo-pipeline
volumeMounts:
- mountPath: /work
name: work
- name: maven-build
- image: $(params.dockerRegistry)/$(params.buildImage)
+ image: $(params.buildImage)
script: |
#!/bin/bash
export LICENSE=accept
@@ -51,7 +65,7 @@ spec:
- mountPath: /work
name: work
- name: component-test
- image: $(params.dockerRegistry)/$(params.buildImage)
+ image: $(params.buildImage)
script: |
#!/bin/bash
export LICENSE=accept
@@ -71,7 +85,7 @@ spec:
- name: secret-volume-2
mountPath: /var/run/secrets/jdbc
- name: next-stage-container-setup
- image: $(params.dockerRegistry)/$(params.buildImage)
+ image: $(params.buildImage)
script: |
#!/bin/bash
cd /work/maven-output
@@ -82,25 +96,81 @@ spec:
- mountPath: /work
name: work
- name: docker-build-and-push
- image: gcr.io/kaniko-project/executor:latest
- # specifying DOCKER_CONFIG is required to allow kaniko to detect docker credential
+ image: quay.io/buildah/stable:v1
+ #image: registry.redhat.io/rhel8/buildah:8.9-5
+ securityContext:
+ runAsUser: 0
+ # Needed for hostPath volumes on OpenShift
+ #privileged: true
+ capabilities:
+ add: ["CHOWN", "DAC_OVERRIDE","FOWNER","SETFCAP","SETGID","SETUID"]
+ # specifying DOCKER_CONFIG is required to allow buildah to detect docker credential
env:
- name: "DOCKER_CONFIG"
value: "/tekton/home/.docker/"
- command:
- - /kaniko/executor
- args:
- - --dockerfile=/work/maven-output/Dockerfile
- - --destination=$(params.dockerRegistry)/tea-tekton
- - --context=/work/maven-output
- - --build-arg=BASE_IMAGE=$(params.dockerRegistry)/$(params.runtimeImage)
- - --skip-tls-verify
+ script: |
+ date
+ export TAG=`cat $(results.tag.path)`
+ echo Using $TAG as image tag
+ buildah --storage-driver=overlay bud --format=oci --tls-verify=false --no-cache \
+ --build-arg BASE_IMAGE=$(params.runtimeBaseImage) \
+ -f /work/maven-output/Dockerfile -t $(params.outputRegistry)/tea-tekton:$TAG /work/maven-output
+ date
+ buildah --storage-driver=overlay push --tls-verify=false --digestfile /tmp/image-digest \
+ $(params.outputRegistry)/tea-tekton:$TAG "docker://$(params.outputRegistry)/tea-tekton:$TAG"
+ date
volumeMounts:
- mountPath: /work
name: work
+ - name: varlibcontainers
+ mountPath: /var/lib/containers
volumes:
- name: work
emptyDir: {}
- name: secret-volume-2
secret:
secretName: jdbc-secret
+ #
+ # Default buildah approach using emptyDir; takes about 2 minutes on a test SNO cluster
+ #
+ - name: varlibcontainers
+ emptyDir: {}
+ #
+ # Local directory for this pipeline; takes a few seconds after initial pull
+ #
+ #- name: varlibcontainers
+ # hostPath:
+ # path: '/var/hostPath/buildah-cache'
+ # type: Directory
+ #
+ # Sharing the host containers; takes a few seconds
+ #
+ #- name: varlibcontainers
+ # hostPath:
+ # path: '/var/lib/containers'
+ # type: Directory
+ #
+ # Local disk using LVM operator on SNO; same speed as hostPath
+ #
+ #- name: varlibcontainers
+ # persistentVolumeClaim:
+ # claimName: buildah-cache
+ #
+ # NFS mount from same subnet; initial pull took 35 minutes, and
+ # subsequent builds took around 9 minutes.
+ #
+ # May also see messages like
+ #
+ # time="2024-02-15T00:55:11Z" level=error msg="'overlay' is not supported over nfs at \"/var/lib/containers/storage/overlay\""
+ #
+ # or possibly failing with
+ #
+ # time="2024-02-15T20:05:58Z" level=warning msg="Network file system detected as backing store. Enforcing overlay option `force_mask=\"700\"`. Add it to storage.conf to silence this warning"
+ # Error: mount /var/lib/containers/storage/overlay:/var/lib/containers/storage/overlay, flags: 0x1000: permission denied
+ # time="2024-02-15T20:05:58Z" level=warning msg="Network file system detected as backing store. Enforcing overlay option `force_mask=\"700\"`. Add it to storage.conf to silence this warning"
+ # time="2024-02-15T20:05:58Z" level=warning msg="failed to shutdown storage: \"mount /var/lib/containers/storage/overlay:/var/lib/containers/storage/overlay, flags: 0x1000: permission denied\""
+ #
+ # if not running privileged
+ #- name: varlibcontainers
+ # persistentVolumeClaim:
+ # claimName: buildah-cache-nfs
diff --git a/tekton/20-deploy-to-cluster-task.yaml b/tekton/20-deploy-to-cluster-task.yaml
index 956608c..6df451b 100644
--- a/tekton/20-deploy-to-cluster-task.yaml
+++ b/tekton/20-deploy-to-cluster-task.yaml
@@ -10,6 +10,8 @@ spec:
type: string
- name: revision
type: string
+ - name: tag
+ type: string
steps:
- name: clone
image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
@@ -21,6 +23,9 @@ spec:
export REG_WITH_ESCAPED_SLASH=`echo $(params.dockerRegistry) | sed 's/\//\\\\\\//g'`
echo $REG_WITH_ESCAPED_SLASH
sed -i "s/DOCKER_REGISTRY/$REG_WITH_ESCAPED_SLASH/g" /work/ace-demo-pipeline/tekton/tea-tekton-deployment.yaml
+ export TAG=$(params.tag)
+ echo Using $TAG as image tag
+ sed -i "s/IMAGE_TAG/$TAG/g" /work/ace-demo-pipeline/tekton/*.yaml
cat /work/ace-demo-pipeline/tekton/tea-tekton-deployment.yaml
volumeMounts:
- mountPath: /work
diff --git a/tekton/21-knative-deploy-task.yaml b/tekton/21-knative-deploy-task.yaml
index d28a23b..1bbbc0a 100644
--- a/tekton/21-knative-deploy-task.yaml
+++ b/tekton/21-knative-deploy-task.yaml
@@ -12,6 +12,8 @@ spec:
- name: revision
type: string
default: "main"
+ - name: tag
+ type: string
steps:
- name: clone
image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
@@ -23,6 +25,9 @@ spec:
export REG_WITH_ESCAPED_SLASH=`echo $(params.dockerRegistry) | sed 's/\//\\\\\\//g'`
echo $REG_WITH_ESCAPED_SLASH
sed -i "s/DOCKER_REGISTRY/$REG_WITH_ESCAPED_SLASH/g" /work/ace-demo-pipeline/serverless/tea-tekton-knative-service.yaml
+ export TAG=$(params.tag)
+ echo Using $TAG as image tag
+ sed -i "s/IMAGE_TAG/$TAG/g" /work/ace-demo-pipeline/serverless/*.yaml
cat /work/ace-demo-pipeline/serverless/tea-tekton-knative-service.yaml
volumeMounts:
- mountPath: /work
diff --git a/tekton/Dockerfile b/tekton/Dockerfile
index af2092c..a355181 100644
--- a/tekton/Dockerfile
+++ b/tekton/Dockerfile
@@ -33,13 +33,19 @@ RUN chown -R aceuser:mqbrkrs /tmp/* && \
# Kaniko seems to chmod this directory 755 by mistake sometimes, which causes trouble later
RUN chmod 1777 /tmp
-# Needed for OpenShift support due to random userids at runtime
-RUN chmod -R 777 /home/aceuser/ace-server /var/mqsi || /bin/true
+
+# This seems to be needed for OpenShift support due to random userids at runtime
+RUN chmod -R 777 /home/aceuser /var/mqsi || /bin/true
USER aceuser
# We're in an internal pipeline
ENV LICENSE=accept
+# Set BASH_ENV to source mqsiprofile when using docker exec bash -c
+ENV BASH_ENV=/opt/ibm/ace-12/server/bin/mqsiprofile
+
# Set entrypoint to run the server; should move the apply overrides to the startup script at some point
-ENTRYPOINT ["bash", "-c", "/home/aceuser/ace-server/ace-startup-script.sh && ibmint apply overrides /home/aceuser/ace-server/application-overrides.txt --work-directory /home/aceuser/ace-server && IntegrationServer -w /home/aceuser/ace-server --admin-rest-api -1 --no-nodejs"]
+ENTRYPOINT ["bash", "-c", "/home/aceuser/ace-server/ace-startup-script.sh && \
+ ibmint apply overrides /home/aceuser/ace-server/application-overrides.txt --work-directory /home/aceuser/ace-server && \
+ IntegrationServer -w /home/aceuser/ace-server --admin-rest-api -1 --no-nodejs"]
diff --git a/tekton/README.md b/tekton/README.md
index a771afd..37f16e6 100644
--- a/tekton/README.md
+++ b/tekton/README.md
@@ -1,44 +1,115 @@
# Tekton pipeline
-Used to run the pipeline stages via Tekton. Relies on the same IBM Cloud kubernetes cluster as before, with the JDBC
-credentials having been set up, and can also be run using OpenShift Code-Ready Containers (tested on 1.27).
+Used to run the pipeline stages via Tekton, and can deploy to either ACE containers
+or to ACE-as-a-Service:
-![Pipeline overview](ace-demo-pipeline-tekton-1.png)
+![Pipelines overview](/demo-infrastructure/images/tekton-pipelines-overview.png)
-The tasks rely on several different containers:
+The tasks rely on several different containers for all use cases:
- The Tekton git-init image to run the initial git clones.
-- Kaniko for building the container images.
-- The ace-minimal image for a small Alpine-based runtime container (~420MB, which fits into the IBM Cloud container registry
-free tier limit of 512MB), and builder variant with Maven added in. See https://github.com/trevor-dolby-at-ibm-com/ace-docker/tree/master/experimental/ace-minimal
-for more details on the minimal image, and [minimal image build instructions](minimal-image-build/README.md) on how to build the various pre-req images.
+- A build container, which would normally be one of the following:
+ - The `ace-minimal` image (see [minimal image build instructions](minimal-image-build/README.md) for details).
+ This image can be built from the ACE developer edition package (no purchase necessary) and is much
+ smaller than most other ACE images.
+ - The `ace` image from cp.icr.io (see [Obtaining an IBM App Connect Enterprise server image](https://www.ibm.com/docs/en/app-connect/12.0?topic=cacerid-building-sample-app-connect-enterprise-image-using-docker#aceimages__title__1) for versions and necessary credentials).
+ This image is created by IBM and requires an IBM Entitlement Key for access.
-For the initial testing, variants of ace-minimal:12.0.10.0-alpine have been pushed to tdolby/experimental on DockerHub, but this is not a
-stable location, and the images should be rebuilt by anyone attempting to use this repo.
+For container deployments, more containers are used:
+
+- Buildah for building the application runtime images.
+- lachlanevenson/k8s-kubectl for managing Kubernetes artifacts
+- A runtime base image:
+ - The `ace-minimal` image, which is the smallest image and therefore results in quicker builds in some cases.
+ See [minimal image build instructions](minimal-image-build/README.md) for details on building the image.
+ - The `ace` image, which should be shadowed to the local registry to avoid pulling from cp.icr.io too often.
+ - For CP4i use cases, the `ace-server-prod` image (see [os/cp4i/README.md](os/cp4i/README.md) for CP4i details)
+ which should also be shadowed to the local registry.
+
+For ACEaaS, the target does not present as a container system (though it runs containers in the cloud):
+
+- An ACE-as-a-Service (ACEaaS) instance needs to be available.
+
+In general, using the default namespace for Kubernetes artifacts is discouraged, so a namespace
+(such as `ace-demo`) should be created for the pipeline and runtime containers. The YAML files
+in this repo generally do not have namespaces specified (other than some CP4i files), so using
+`oc project ace-demo` to set the default namespace should provide the correct results.
## Getting started
- Most of the specific registry names need to be customised: us.icr.io may not be the right region, for example, and us.icr.io/ace-containers
-is unlikely to be writable. Creating registries and so on (though essential) is beyond the scope of this document, but customisation of
-the artifacts in this repo (such as ace-pipeline-run.yaml) will almost certainly be necessary.
+A Kubernetes cluster will be needed, with Minikube (see [minikube/README.md](/tekton/minikube/README.md)) and
+OpenShift 4.14 being the two most-tested. Other clusters should also work with appropriate adjustments to
+ingress routing and container registry settings. Note that the Cloud Pak for Integration (CP4i) has a separate
+pipeline the creates IntegrationRuntime CRs with custom images; see [os/cp4i/README.md](/tekton/os/cp4i/README.md)
+for more details.
- The Tekton pipeline relies on docker credentials being provided for Kaniko to use when pushing the built image, and these credentials
-must be associated with the service account for the pipeline. If this has not already been done elsewhere, then create as follows, with
-appropriate changes for a fork of this repo:
+Many of the artifacts in this repo (such as ace-pipeline-run.yaml) will need to be customized depending on
+the exact cluster layout. The defaults are set up for Minikube running with Docker on Ubuntu, and may need
+to be modified depending on network addresses, etc. The most-commonly-modified files have options in the
+comments, with [ace-pipeline-run.yaml](ace-pipeline-run.yaml) being one example:
+```
+ - name: buildImage
+ # Requires an IBM Entitlement Key
+ #value: "cp.icr.io/cp/appc/ace:12.0.11.0-r1"
+ # ace-minimal can be built from the ACE package without needing a key
+ #value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default/ace-minimal:12.0.11.0-alpine"
+ # Need to use the -build image for Maven
+ #value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default/ace-minimal-build:12.0.11.0-alpine"
+ value: "192.168.49.2:5000/default/ace-minimal-build:12.0.11.0-alpine"
+```
+
+The Tekton pipeline expects docker credentials to be provided for Buildah to use when pushing the built image, and
+these credentials must be associated with the service account for the pipeline. If this has not already been done
+elsewhere, then create them with the following format for OpenShift
+```
+kubectl create secret docker-registry regcred --docker-server=image-registry.openshift-image-registry.svc.cluster.local:5000 --docker-username=kubeadmin --docker-password=$(oc whoami -t)
+kubectl apply -f tekton/service-account.yaml
```
-kubectl create secret docker-registry regcred --docker-server=us.icr.io --docker-username=iamapikey --docker-password=
+or a dummy variant for Minikube or ACEaaS without registry authentication enabled:
+```
+kubectl create secret docker-registry regcred --docker-server=us.icr.io --docker-username=dummy --docker-password=dummy
kubectl apply -f tekton/service-account.yaml
```
The service account also has the ability to create services, deployments, etc, which are necessary for running the service.
+As well as the registry credentials, the pipeline needs JDBC credentials to run the component tests.
+See [cloud-resources.md](cloud-resources.md) for DB2 on Cloud instructions, with the credentials being created as follows
+```
+kubectl create secret generic jdbc-secret --from-literal=USERID='blah' --from-literal=PASSWORD='blah' --from-literal=databaseName='BLUDB' --from-literal=serverName='19af6446-6171-4641-8aba-9dcff8e1b6ff.c1ogj3sd0tgtu0lqde00.databases.appdomain.cloud' --from-literal=portNumber='30699'
+```
+with the obvious replacements.
+
+## Tekton dashboard
+
+The Tekton dashboard (for non-OpenShift users) can be installed as follows:
+```
+kubectl apply --filename https://storage.googleapis.com/tekton-releases/dashboard/latest/release.yaml
+```
+and shows pipeline runs in a UI:
+
+![/demo-infrastructure/images/tekton-dashboard.png](/demo-infrastructure/images/tekton-dashboard.png)
+
+By default, the Tekton dashboard is not accessible outside the cluster; assuming a secure host somewhere, the
+dashboard HTTP port can be made available locally as follows:
+```
+kubectl --namespace tekton-pipelines port-forward --address 0.0.0.0 svc/tekton-dashboard 9097:9097
+```
+
+## Container deploy target
+
+![Pipeline overview](/demo-infrastructure/images/tekton-pipeline.png)
+
Setting up the pipeline requires Tekton to be installed (which may already have happend via OpenShift operators, in which case
skip the first line), tasks to be created, and the pipeline itself to be configured:
```
kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
-kubectl apply -f tekton/10-maven-ace-build-task.yaml
+kubectl apply -f tekton/10-ibmint-ace-build-task.yaml
kubectl apply -f tekton/20-deploy-to-cluster-task.yaml
+kubectl apply -f tekton/21-knative-deploy-task.yaml
kubectl apply -f tekton/ace-pipeline.yaml
```
+(note that the pipeline will run without the cluster being enabled for Knative serverless; the 21
+task is only run if `knativeDeploy` is set to `true` when the pipeline is run).
Once that has been accomplished, the simplest way to run the pipeline is
```
@@ -48,20 +119,19 @@ tkn pipelinerun logs ace-pipeline-run-1 -f
and this should build the projects, run the unit tests, create a docker image, and then create a deployment that runs the application.
-## How to know if the pipeline has succeeded
+### How to know if the container deploy pipeline has succeeded
The end result should be a running container with the tea application deployed, listening for requests on /tea/index at the
appropriate host and port. An HTTP GET on http://containerHost:containerPort/tea/index/1 should return some JSON, though the
name may be null if the database has no entry for id 1.
-For the IBM Kubernetes Service, the public IP address of the worker node is the easiest way to access the service, but the host
-is not published in the usual external IP field. To find the external IP, use IBM Cloud dashboard to view the "Worker nodes"
-tab information for the Kube cluster, where the "Public IP" contains the correct address. The port number can be found by querying
-the Kubernetes tea-tekton-service either by using
+For Minikube, the easiest way to access the container from the local machine is to run
```
-kubectl get service tea-tekton-service
+kubectl --namespace default port-forward --address 0.0.0.0 svc/tea-tekton-service 7800:7800
```
-or by using the Kubernetes dashboard to view the service. These values can then be used to access the application.
+and then access http://localhost:7800/tea/index/1. If Minikube ingress is enabled, then
+deploying `tekton/minikube/tea-tekton-minikube-ingress.yaml` will add the correct definitions
+for the service to be made available that way.
Note that if errors of the form
@@ -79,55 +149,99 @@ occur, then it is likely that the TEAJDBC policy is not configured to use SSL. S
```
in the policyxml should eliminate this error.
-## Tekton dashboard
+### OpenShift
-The Tekton dashboard (for non-OpenShift users) can be installed as follows:
+The majority of steps are the same, but the registry authentication is a little different; assuming a session
+logged in as kubeadmin, it would look as follows:
```
-kubectl apply --filename https://storage.googleapis.com/tekton-releases/dashboard/latest/release.yaml
+kubectl create secret docker-registry regcred --docker-server=image-registry.openshift-image-registry.svc.cluster.local:5000 --docker-username=kubeadmin --docker-password=$(oc whoami -t)
```
+Note that the actual password itself (as opposed to the hash provided by "oc whoami -t") does not work for
+registry authentication for some reason when using single-node OpenShift with a temporary admin user.
-By default, the Tekton dashboard is not accessible outside the cluster; assuming a secure host somewhere, the
-dashboard HTTP port can be made available locally as follows:
+After that, the pipeline run YAML should be changed to point to the OpenShift registry, and the
+pipeline run as normal:
```
-kubectl --namespace tekton-pipelines port-forward --address 0.0.0.0 svc/tekton-dashboard 9097:9097
+kubectl apply -f tekton/ace-pipeline-run.yaml
+tkn pipelinerun logs ace-pipeline-run-1 -f
```
+The OpenShift Pipeline operator provides a web interface for the pipeline runs also, which may be
+an easier way to view progress.
+
+To enable external connectivity from within OpenShift to enable testing, edit the `host`
+setting in [tekton/os/tea-tekton-route.yaml](tekton/os/tea-tekton-route.yaml) to reflect
+the correct namespace and domain name, then run
+```
+kubectl apply -f tekton/os/tea-tekton-route.yaml
+```
+to create a route. The resulting URL of the form http://tea-route-namespace.apps.mycompany.com/tea/index/1
+should then access the Tea REST application in the container and show JSON result data.
+
+### CP4i
+
+See [os/cp4i/README.md](os/cp4i/README.md) for details on how to create IntegrationRuntime CRs for CP4i, along
+with a pipeline that included running component tests in a CP4i container during the build to ensure that the
+configurations are valid.
-## OpenShift
+## ACE-as-a-Service target
-The majority of steps are the same, but the registry authentication is a little different; assuming a session logged in as kubeadmin, it would look as follows:
+See [README-aceaas-pipelines.md](README-aceaas-pipelines.md) for a general overview. The
+Tekton pipeline for ACEaaS looks as follows, with the (optional) "Create configuration" steps
+shown as a separate task that only runs when requested:
+
+![Pipeline overview](/demo-infrastructure/images/tekton-aceaas-pipeline.png)
+
+As there is no runtime container, this pipeline can run using the `ace` image as the
+build image without any performance concerns because there are no buildah or Kaniko steps
+that would need to unpack the image; this requires an IBM Entitlement Key and the
+appropriate credentials:
```
-kubectl create secret docker-registry regcred --docker-server=image-registry.openshift-image-registry.svc.cluster.local:5000 --docker-username=kubeadmin --docker-password=$(oc whoami -t)
+kubectl create secret docker-registry ibm-entitlement-key --docker-username=cp --docker-password=myEntitlementKey --docker-server=cp.icr.io
```
-Note that the actual password itself (as opposed to the hash provided by "oc whoami -t") does not work for registry authentication for some reason.
+Ensure that the ace-tekton-service-account includes the `ibm-entitlement-key` secret for both secrets
+and imagePullSecrets. For those without an IBM Entitlement Key, the `ace-minimal` image will also work.
-After that, the pipeline run would be
+Setting up the pipeline requires Tekton to be installed (which may already have happend via OpenShift operators, in which case
+skip the first line), tasks to be created, and the pipeline itself to be configured:
```
-kubectl apply -f tekton/os/ace-pipeline-run.yaml
-tkn pipelinerun logs ace-pipeline-run-1 -f
+kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
+kubectl apply -f tekton/aceaas/40-ibmint-aceaas-deploy-task.yaml
+kubectl apply -f tekton/aceaas/41-ibmint-aceaas-config-task.yaml
+kubectl apply -f tekton/aceaas/aceaas-pipeline.yaml
```
-to pick up the correct registry default. The OpenShift Pipeline operator provides a web interface for the pipeline runs
-also, which may be an easier way to view progress.
-To enable external connectivity from within OpenShift to enable testing, run the following
+The pipeline requires additional credentials to deploy to ACEaaS, and `aceaas-credentials`
+should be created using values acquired using the ACEaaS console. See
+[https://www.ibm.com/docs/en/app-connect/saas?topic=overview-accessing-api](https://www.ibm.com/docs/en/app-connect/saas?topic=overview-accessing-api)
+for details on how to find or create the correct credentials, and then set the following
```
-kubectl apply -f tekton/os/tea-tekton-route.yaml
+kubectl create secret generic aceaas-credentials --from-literal=appConEndpoint=MYENDPOINT --from-literal=appConInstanceID=MYINSTANECID --from-literal=appConClientID=HEXNUMBERSTRING --from-literal=appConApiKey=BASE64APIKEY --from-literal=appConClientSecret=HEXNUMBERCLIENTSECRET
```
-which will create a route at http://tea-route-default.apps-crc.testing (which can be changed in the yaml file to
-match the correct domain name for the cluster).
+The pipeline should create the required configurations based on the JDBC credentials
+and other values if the createConfiguration parameter is set to `true`; this should only be used
+for the first pipeline run or after any change to the credentials (see the "ACEaaS API rate
+limits" section of [README-aceaas-pipelines.md](/demo-infrastructure/README-aceaas-pipelines.md)
+for more information).
-Accessing http://tea-route-default.apps-crc.testing/tea/index/1 should result in the application running and showing
-JSON result data.
+Once that has been accomplished, the simplest way to run the pipeline is
+```
+kubectl apply -f tekton/aceaas/aceaas-pipeline-run.yaml
+tkn pipelinerun logs aceaas-pipeline-run-1 -f
+```
-## CP4i
+and this should build the projects, run the tests, and then deploy to ACEaaS.
-See [os/cp4i/README.md](os/cp4i/README.md) for details on how to create IntegrationServer CRs for CP4i, along
-with a pipeline that included running component tests in a CP4i container during the build to ensure that the
-configurations are valid.
+### How to know if the ACEaaS pipeline has succeeded
-## Possible enhancements
+Once the pipeline has completed and the integration runtime has started, the application can be
+tested by using a browser or curl to access the application API endpoint. The endpoint can be
+found from the ACEaaS UI by examining the deployed REST API as shown:
-The pipeline should use a single git commit to ensure the two tasks are actually using the same source. Alternatively, PVCs could
-be used to share a workspace between the tasks, which at the moment use transient volumes to maintain state between the task steps
-but not between the tasks themselves.
+![aceaas-tekton-rest-api-endpoint.png](aceaas/aceaas-tekton-rest-api-endpoint.png)
-The remaining docker images, git repo references, etc could be turned into parameters.
+The endpoint should be of the form `https://tdolby-tea-tekton-ir-https-ac2vkpa0udw.p-vir-d1.appconnect.ibmappdomain.cloud/tea`
+and (similar to the integration node example above) curl can be used to retrieve or add data.
+```
+C:\>curl https://tdolby-tea-tekton-ir-https-ac2vkpa0udw.p-vir-d1.appconnect.ibmappdomain.cloud/tea/index/1
+{"name":"Assam","id":"1"}
+```
diff --git a/tekton/ace-pipeline-run.yaml b/tekton/ace-pipeline-run.yaml
index 4ed5a5d..8f708d5 100644
--- a/tekton/ace-pipeline-run.yaml
+++ b/tekton/ace-pipeline-run.yaml
@@ -1,15 +1,46 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
+ # Can use generated names with kubectl create; follow logs with "tkn pipeline logs ace-pipeline -f" to
+ # avoid needing a fixed name.
+ #generateName: ace-pipeline-run-
+
+ # Fixed name allows "tkn pr delete ace-pipeline-run-1 -f ; kubectl apply -f tekton/ace-pipeline-run.yaml ; tkn pr logs ace-pipeline-run-1 -f"
+ # which has a slightly nicer log format.
name: ace-pipeline-run-1
spec:
serviceAccountName: ace-tekton-service-account
pipelineRef:
name: ace-pipeline
params:
- - name: dockerRegistry
- value: "us.icr.io/ace-containers"
-# - name: buildImage
-# value: "ace-minimal-build:12.0.10.0-alpine-java11"
-# - name: runtimeImage
-# value: "ace-minimal:12.0.10.0-alpine-java11"
+ - name: outputRegistry
+ # OpenShift
+ #value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default"
+ #value: "quay.io/trevor_dolby"
+ #value: "us.icr.io/ace-containers"
+ #value: "aceDemoRegistry.azurecr.io"
+ # Minikube
+ value: "192.168.49.2:5000/default"
+ - name: buildImage
+ # Requires an IBM Entitlement Key
+ #value: "cp.icr.io/cp/appc/ace:12.0.11.0-r1"
+ # ace-minimal can be built from the ACE package without needing a key
+ #value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default/ace-minimal:12.0.11.0-alpine"
+ # Need to use the -build image for Maven
+ #value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default/ace-minimal-build:12.0.11.0-alpine"
+ value: "192.168.49.2:5000/default/ace-minimal-build:12.0.11.0-alpine"
+ - name: runtimeBaseImage
+ # Requires an IBM Entitlement Key
+ #value: "cp.icr.io/cp/appc/ace:12.0.11.0-r1"
+ # ace-minimal can be built from the ACE package without needing a key
+ # value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default/ace-minimal:12.0.11.0-alpine"
+ # Minikube
+ value: "192.168.49.2:5000/default/ace-minimal:12.0.11.0-alpine"
+ # Local copy of ace container
+ #value: "192.168.49.2:5000/default/ace:12.0.11.0-r1"
+ - name: url
+ value: "https://github.com/ot4i/ace-demo-pipeline"
+ - name: revision
+ value: "main"
+ #- name: knativeDeploy
+ # value: "true"
diff --git a/tekton/ace-pipeline-taskrun.yaml b/tekton/ace-pipeline-taskrun.yaml
deleted file mode 100644
index 0d881fe..0000000
--- a/tekton/ace-pipeline-taskrun.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: TaskRun
-metadata:
- name: ace-pipeline-taskrun-1
-spec:
- serviceAccountName: ace-tekton-service-account
- taskRef:
- name: maven-ace-build
- params:
- - name: dockerRegistry
- value: "us.icr.io/ace-containers"
- - name: url
- type: string
- default: "https://github.com/ot4i/ace-demo-pipeline"
- - name: revision
- type: string
- default: "main"
- - name: buildImage
- type: string
- default: "ace-minimal-build:12.0.10.0-alpine"
- - name: runtimeImage
- type: string
- default: "ace-minimal:12.0.10.0-alpine"
diff --git a/tekton/ace-pipeline.yaml b/tekton/ace-pipeline.yaml
index e1529c4..2ebbc31 100644
--- a/tekton/ace-pipeline.yaml
+++ b/tekton/ace-pipeline.yaml
@@ -4,7 +4,7 @@ metadata:
name: ace-pipeline
spec:
params:
- - name: dockerRegistry
+ - name: outputRegistry
type: string
- name: url
type: string
@@ -14,34 +14,59 @@ spec:
default: "main"
- name: buildImage
type: string
- default: "ace-minimal-build:12.0.10.0-alpine"
- - name: runtimeImage
+ - name: runtimeBaseImage
type: string
- default: "ace-minimal:12.0.10.0-alpine"
+ - name: knativeDeploy
+ type: string
+ default: "false"
tasks:
- name: build-from-source
taskRef:
- name: maven-ace-build
+ name: ace-build
params:
- - name: dockerRegistry
- value: $(params.dockerRegistry)
+ - name: outputRegistry
+ value: $(params.outputRegistry)
- name: url
value: $(params.url)
- name: revision
value: $(params.revision)
- name: buildImage
value: $(params.buildImage)
- - name: runtimeImage
- value: $(params.runtimeImage)
+ - name: runtimeBaseImage
+ value: $(params.runtimeBaseImage)
- name: deploy-to-cluster
taskRef:
name: deploy-to-cluster
params:
- name: dockerRegistry
- value: $(params.dockerRegistry)
+ value: $(params.outputRegistry)
+ - name: url
+ value: $(params.url)
+ - name: revision
+ value: $(params.revision)
+ - name: tag
+ value: "$(tasks.build-from-source.results.tag)"
+ runAfter:
+ - build-from-source
+ when:
+ - input: "$(params.knativeDeploy)"
+ operator: in
+ values: ["false"]
+ - name: deploy-knative-to-cluster
+ taskRef:
+ name: knative-deploy
+ params:
+ - name: dockerRegistry
+ value: $(params.outputRegistry)
- name: url
value: $(params.url)
- name: revision
value: $(params.revision)
+ - name: tag
+ value: "$(tasks.build-from-source.results.tag)"
runAfter:
- build-from-source
+ when:
+ - input: "$(params.knativeDeploy)"
+ operator: in
+ values: ["true"]
\ No newline at end of file
diff --git a/tekton/aceaas/40-ibmint-aceaas-deploy-task.yaml b/tekton/aceaas/40-ibmint-aceaas-deploy-task.yaml
new file mode 100644
index 0000000..9044313
--- /dev/null
+++ b/tekton/aceaas/40-ibmint-aceaas-deploy-task.yaml
@@ -0,0 +1,184 @@
+apiVersion: tekton.dev/v1beta1
+kind: Task
+metadata:
+ name: aceaas-build-and-deploy-bar
+spec:
+ stepTemplate:
+ env:
+ - name: "LICENSE"
+ value: "accept"
+ params:
+ - name: url
+ type: string
+ - name: revision
+ type: string
+ - name: buildImage
+ type: string
+ - name: deployPrefix
+ type: string
+ results:
+ - name: barURL
+ description: BAR URL on ACEaaS of the form https://dataplane-api-dash.appconnect:3443/v1/ac2vkpa0udw/directories/tdolby-tea-tekton?
+ steps:
+ - name: clone
+ image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
+ script: |
+ #!/bin/sh
+ set -e # Fail on error
+ cd /work
+ git clone -b $(params.revision) $(params.url)
+
+ # Slightly hacky but works . . .
+ chmod -R 777 /work/ace-demo-pipeline
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: ibmint-build
+ image: $(params.buildImage)
+ #image: cp.icr.io/cp/appc/ace:12.0.11.0-r1
+ #
+ # Runs the build and unit test phases, leaving the application BAR file in the
+ # /work/ibmint-output directory for later steps to use.
+ #
+ script: |
+ #!/bin/bash
+
+ export LICENSE=accept
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error - this must be done after the profile in case the container has the profile loaded already
+
+ cd /work/ace-demo-pipeline
+ mkdir /work/ibmint-output
+ echo ========================================================================
+ echo Building application
+ echo ========================================================================
+ # Using --compile-maps-and-schemas for 12.0.11 and later . . .
+ ibmint package --input-path . --output-bar-file /work/ibmint-output/tea-tekton.bar --project TeaSharedLibraryJava --project TeaSharedLibrary --project TeaRESTApplication --compile-maps-and-schemas
+
+ echo ========================================================================
+ echo Building unit tests
+ echo ========================================================================
+ # Create the unit test work directory
+ mqsicreateworkdir /work/ut-work-dir
+ mqsibar -w /work/ut-work-dir -a /work/ibmint-output/tea-tekton.bar
+ # Build just the unit tests
+ ibmint deploy --input-path . --output-work-directory /work/ut-work-dir --project TeaRESTApplication_UnitTest
+
+ echo ========================================================================
+ echo Running unit tests
+ echo ========================================================================
+ IntegrationServer -w /work/ut-work-dir --no-nodejs --start-msgflows false --test-project TeaRESTApplication_UnitTest
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: component-test
+ image: $(params.buildImage)
+ #image: cp.icr.io/cp/appc/ace:12.0.11.0-r1
+ #
+ # Builds and runs the component tests using the JDBC credentials provided from
+ # the secret. Also uses the same init-creds.sh script used by the non-CP4i image
+ # to load credentials at startup.
+ #
+ script: |
+ #!/bin/bash
+
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error
+
+ echo ========================================================================
+ echo Setting up credentials and deploying main application
+ echo ========================================================================
+ # Set up credentials for the component tests; init-creds.sh looks in /tmp for policy
+ cp /work/ace-demo-pipeline/demo-infrastructure/TEAJDBC.policyxml /tmp/
+ bash /work/ace-demo-pipeline/demo-infrastructure/init-creds.sh
+ mqsibar -w /home/aceuser/ace-server -a /work/ibmint-output/tea-tekton.bar
+
+ echo ========================================================================
+ echo Building component tests
+ echo ========================================================================
+ cd /work/ace-demo-pipeline
+ # Build just the component tests
+ ibmint deploy --input-path . --output-work-directory /home/aceuser/ace-server --project TeaRESTApplication_ComponentTest
+
+ echo ========================================================================
+ echo Running component tests
+ echo ========================================================================
+ IntegrationServer -w /home/aceuser/ace-server --no-nodejs --start-msgflows false --test-project TeaRESTApplication_ComponentTest
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: secret-volume-2
+ mountPath: /var/run/secrets/jdbc
+ - name: push-bar-to-aceaas
+ image: $(params.buildImage)
+ #
+ #
+ #
+ script: |
+ #!/bin/bash
+
+ # Avoid errors using curl with ace-minimal-build
+ unset LD_LIBRARY_PATH
+
+ set -e # Fail on error
+
+ echo ========================================================================
+ echo Deploying BAR file
+ echo ========================================================================
+
+ export appConEndpoint=$(cat /run/secrets/aceaas/appConEndpoint)
+ export appConInstanceID=$(cat /run/secrets/aceaas/appConInstanceID)
+ export appConClientID=$(cat /run/secrets/aceaas/appConClientID)
+ export appConApiKey=$(cat /run/secrets/aceaas/appConApiKey)
+ export appConClientSecret=$(cat /run/secrets/aceaas/appConClientSecret)
+
+ echo ========================================================================
+ echo Acquiring token using API key
+ echo ========================================================================
+
+ curl --request POST \
+ --url https://${appConEndpoint}/api/v1/tokens \
+ --header "X-IBM-Client-Id: ${appConClientID}" \
+ --header "X-IBM-Client-Secret: ${appConClientSecret}" \
+ --header 'accept: application/json' \
+ --header 'content-type: application/json' \
+ --header "x-ibm-instance-id: ${appConInstanceID}" \
+ --data "{\"apiKey\": \"${appConApiKey}\"}" --output /tmp/token-output.txt
+ cat /tmp/token-output.txt | tr -d '{}"' | tr ',' '\n' | grep access_token | sed 's/access_token://g' > /work/APPCON_TOKEN
+ export appConToken=$(cat /work/APPCON_TOKEN)
+
+ echo ========================================================================
+ echo PUTting BAR file
+ echo ========================================================================
+
+ curl -X PUT https://${appConEndpoint}/api/v1/bar-files/$(params.deployPrefix)-tea-tekton \
+ -H "x-ibm-instance-id: ${appConInstanceID}" -H "Content-Type: application/octet-stream" \
+ -H "Accept: application/json" -H "X-IBM-Client-Id: ${appConClientID}" -H "authorization: Bearer ${appConToken}" \
+ --data-binary @/work/ibmint-output/tea-tekton.bar --output /tmp/curl-output.txt
+
+ # We will have exited if curl returned non-zero so the output should contain the BAR file name
+ cat /tmp/curl-output.txt ; echo
+ # This would be easier with jq but that's not available in most ACE images
+ export BARURL=$(cat /tmp/curl-output.txt | tr -d '{}"' | tr ',' '\n' | grep url | sed 's/url://g')
+ echo BARURL: $BARURL
+ echo -n "$BARURL" > $(results.barURL.path)
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: secret-volume-2
+ mountPath: /var/run/secrets/jdbc
+ - name: secret-volume-3
+ mountPath: /var/run/secrets/aceaas
+ volumes:
+ - name: work
+ emptyDir: {}
+ - name: secret-volume-2
+ # Used to check database connectivity
+ secret:
+ secretName: jdbc-secret
+ - name: secret-volume-3
+ # Used to push to ACEaaS
+ secret:
+ secretName: aceaas-credentials
diff --git a/tekton/aceaas/41-ibmint-aceaas-config-task.yaml b/tekton/aceaas/41-ibmint-aceaas-config-task.yaml
new file mode 100644
index 0000000..005927e
--- /dev/null
+++ b/tekton/aceaas/41-ibmint-aceaas-config-task.yaml
@@ -0,0 +1,198 @@
+apiVersion: tekton.dev/v1beta1
+kind: Task
+metadata:
+ name: aceaas-create-config-and-runtime
+spec:
+ stepTemplate:
+ env:
+ - name: "LICENSE"
+ value: "accept"
+ params:
+ - name: url
+ type: string
+ - name: revision
+ type: string
+ - name: buildImage
+ type: string
+ - name: deployPrefix
+ type: string
+ - name: barURL
+ type: string
+ steps:
+ - name: clone
+ image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
+ script: |
+ #!/bin/sh
+ set -e # Fail on error
+ cd /work
+ git clone -b $(params.revision) $(params.url)
+ echo barURL:
+ echo $(params.barURL)
+
+ # Slightly hacky but works . . .
+ chmod -R 777 /work/ace-demo-pipeline
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: create-config
+ image: $(params.buildImage)
+ #
+ #
+ #
+ script: |
+ #!/bin/bash
+
+ # Avoid errors using curl with ace-minimal-build
+ unset LD_LIBRARY_PATH
+
+ set -e # Fail on error
+ export appConEndpoint=$(cat /run/secrets/aceaas/appConEndpoint)
+ export appConInstanceID=$(cat /run/secrets/aceaas/appConInstanceID)
+ export appConClientID=$(cat /run/secrets/aceaas/appConClientID)
+ export appConApiKey=$(cat /run/secrets/aceaas/appConApiKey)
+ export appConClientSecret=$(cat /run/secrets/aceaas/appConClientSecret)
+
+ echo ========================================================================
+ echo Acquiring token using API key
+ echo ========================================================================
+
+ curl --request POST \
+ --url https://${appConEndpoint}/api/v1/tokens \
+ --header "X-IBM-Client-Id: ${appConClientID}" \
+ --header "X-IBM-Client-Secret: ${appConClientSecret}" \
+ --header 'accept: application/json' \
+ --header 'content-type: application/json' \
+ --header "x-ibm-instance-id: ${appConInstanceID}" \
+ --data "{\"apiKey\": \"${appConApiKey}\"}" --output /tmp/token-output.txt
+ cat /tmp/token-output.txt | tr -d '{}"' | tr ',' '\n' | grep access_token | sed 's/access_token://g' > /work/APPCON_TOKEN
+ export appConToken=$(cat /work/APPCON_TOKEN)
+
+ echo ========================================================================
+ echo Creating JDBCPolicies project
+ echo ========================================================================
+ mkdir /tmp/JDBCPolicies
+ echo '' > /tmp/JDBCPolicies/policy.descriptor
+ cp /work/ace-demo-pipeline/demo-infrastructure/TEAJDBC.policyxml /tmp/JDBCPolicies/
+ sed -i "s/DATABASE_NAME/`cat /var/run/secrets/jdbc/databaseName`/g" /tmp/JDBCPolicies/TEAJDBC.policyxml
+ sed -i "s/SERVER_NAME/`cat /var/run/secrets/jdbc/serverName`/g" /tmp/JDBCPolicies/TEAJDBC.policyxml
+ sed -i "s/PORT_NUMBER/`cat /var/run/secrets/jdbc/portNumber`/g" /tmp/JDBCPolicies/TEAJDBC.policyxml
+ echo "Contents of TEAJDBC policy:"
+ cat /tmp/JDBCPolicies/TEAJDBC.policyxml
+
+
+ echo ========================================================================
+ echo Creating $(params.deployPrefix)-jdbc-policies configuration
+ echo ========================================================================
+ cd /tmp
+ # Using "zip" would be more obvious, but not all ACE images have it available.
+ /opt/ibm/ace-12/common/jdk/bin/jar cvf /tmp/JDBCPolicies.zip JDBCPolicies
+ cat /tmp/JDBCPolicies.zip | base64 -w 0 > /tmp/JDBCPolicies.zip.base64
+
+ # Not sure if this is better than the template way of doing things below . . . (TCD 20240305)
+ cat << EOF > /tmp/jdbc-policies-configuration.json
+ { "metadata": { "name": "$(params.deployPrefix)-jdbc-policies" }, "spec": {
+ "type": "policyproject", "description": "$(params.deployPrefix) JDBCPolicies project",
+ "data": "`cat /tmp/JDBCPolicies.zip.base64`"}}
+ EOF
+
+ curl -X PUT https://${appConEndpoint}/api/v1/configurations/$(params.deployPrefix)-jdbc-policies \
+ -H "x-ibm-instance-id: ${appConInstanceID}" -H "Content-Type: application/json" \
+ -H "Accept: application/json" -H "X-IBM-Client-Id: ${appConClientID}" -H "authorization: Bearer ${appConToken}" \
+ --data-binary @/tmp/jdbc-policies-configuration.json
+ echo
+
+ echo ========================================================================
+ echo Creating jdbc::tea as $(params.deployPrefix)-jdbc-setdbparms configuration
+ echo ========================================================================
+ echo -n jdbc::tea `cat /var/run/secrets/jdbc/USERID` `cat /var/run/secrets/jdbc/PASSWORD` | base64 -w 0 > /tmp/jdbc-setdbparms.base64
+ # Could use the cat << EOF approach instead (TCD 20240305)
+ cp /work/ace-demo-pipeline/tekton/aceaas/create-configuration-template.json /tmp/jdbc-setdbparms-configuration.json
+ sed -i "s/TEMPLATE_NAME/$(params.deployPrefix)-jdbc-setdbparms/g" /tmp/jdbc-setdbparms-configuration.json
+ sed -i "s/TEMPLATE_TYPE/setdbparms/g" /tmp/jdbc-setdbparms-configuration.json
+ sed -i "s/TEMPLATE_DESCRIPTION/$(params.deployPrefix) JDBC credentials/g" /tmp/jdbc-setdbparms-configuration.json
+ sed -i "s/TEMPLATE_BASE64DATA/`cat /tmp/jdbc-setdbparms.base64 | sed 's/\//\\\\\\//g'`/g" /tmp/jdbc-setdbparms-configuration.json
+ cat /tmp/jdbc-setdbparms-configuration.json
+
+ curl -X PUT https://${appConEndpoint}/api/v1/configurations/$(params.deployPrefix)-jdbc-setdbparms \
+ -H "x-ibm-instance-id: ${appConInstanceID}" -H "Content-Type: application/json" \
+ -H "Accept: application/json" -H "X-IBM-Client-Id: ${appConClientID}" -H "authorization: Bearer ${appConToken}" \
+ --data-binary @/tmp/jdbc-setdbparms-configuration.json
+ echo
+
+ echo ========================================================================
+ echo Creating default policy project setting as $(params.deployPrefix)-default-policy-project configuration
+ echo ========================================================================
+ (echo "Defaults:" && echo " policyProject: 'JDBCPolicies'") | base64 -w 0 > /tmp/default-policy-project.base64
+ cp /work/ace-demo-pipeline/tekton/aceaas/create-configuration-template.json /tmp/default-policy-project-configuration.json
+ sed -i "s/TEMPLATE_NAME/$(params.deployPrefix)-default-policy-project/g" /tmp/default-policy-project-configuration.json
+ sed -i "s/TEMPLATE_TYPE/serverconf/g" /tmp/default-policy-project-configuration.json
+ sed -i "s/TEMPLATE_DESCRIPTION/$(params.deployPrefix) default policy project for JDBC/g" /tmp/default-policy-project-configuration.json
+ sed -i "s/TEMPLATE_BASE64DATA/`cat /tmp/default-policy-project.base64 | sed 's/\//\\\\\\//g'`/g" /tmp/default-policy-project-configuration.json
+ cat /tmp/default-policy-project-configuration.json
+
+ curl -X PUT https://${appConEndpoint}/api/v1/configurations/$(params.deployPrefix)-default-policy-project \
+ -H "x-ibm-instance-id: ${appConInstanceID}" -H "Content-Type: application/json" \
+ -H "Accept: application/json" -H "X-IBM-Client-Id: ${appConClientID}" -H "authorization: Bearer ${appConToken}" \
+ --data-binary @/tmp/default-policy-project-configuration.json
+ echo
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: secret-volume-2
+ mountPath: /var/run/secrets/jdbc
+ - name: secret-volume-3
+ mountPath: /var/run/secrets/aceaas
+ - name: create-integrationruntime
+ image: $(params.buildImage)
+ #
+ #
+ #
+ script: |
+ #!/bin/bash
+
+ # Avoid errors using curl with ace-minimal-build
+ unset LD_LIBRARY_PATH
+
+ set -e # Fail on error
+ export appConEndpoint=$(cat /run/secrets/aceaas/appConEndpoint)
+ export appConInstanceID=$(cat /run/secrets/aceaas/appConInstanceID)
+ export appConClientID=$(cat /run/secrets/aceaas/appConClientID)
+ export appConApiKey=$(cat /run/secrets/aceaas/appConApiKey)
+ export appConClientSecret=$(cat /run/secrets/aceaas/appConClientSecret)
+ export appConToken=$(cat /work/APPCON_TOKEN)
+
+ echo ========================================================================
+ echo Creating IR JSON
+ echo ========================================================================
+ #export BARURL='https://dataplane-api-dash.appconnect:3443/v1/ac2vkpa0udw/directories/tdolby-tea-tekton?'
+ cp /work/ace-demo-pipeline/tekton/aceaas/create-integrationruntime-template.json /tmp/create-integrationruntime.json
+ sed -i "s/TEMPLATE_NAME/$(params.deployPrefix)-tea-tekton-ir/g" /tmp/create-integrationruntime.json
+ sed -i "s/TEMPLATE_BARURL/`echo $(params.barURL) | sed 's/\//\\\\\\//g'`/g" /tmp/create-integrationruntime.json
+ sed -i "s/TEMPLATE_POLICYPROJECT/$(params.deployPrefix)-jdbc-policies/g" /tmp/create-integrationruntime.json
+ sed -i "s/TEMPLATE_SERVERCONF/$(params.deployPrefix)-default-policy-project/g" /tmp/create-integrationruntime.json
+ sed -i "s/TEMPLATE_SETDBPARMS/$(params.deployPrefix)-jdbc-setdbparms/g" /tmp/create-integrationruntime.json
+ echo "Contents of create-integrationruntime.json:"
+ cat /tmp/create-integrationruntime.json
+
+
+ curl -X PUT https://${appConEndpoint}/api/v1/integration-runtimes/$(params.deployPrefix)-tea-tekton-ir \
+ -H "x-ibm-instance-id: ${appConInstanceID}" -H "Content-Type: application/json" \
+ -H "Accept: application/json" -H "X-IBM-Client-Id: ${appConClientID}" -H "authorization: Bearer ${appConToken}" \
+ --data-binary @/tmp/create-integrationruntime.json
+ echo
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: secret-volume-3
+ mountPath: /var/run/secrets/aceaas
+ volumes:
+ - name: work
+ emptyDir: {}
+ - name: secret-volume-2
+ # Used create database policy and credentials
+ secret:
+ secretName: jdbc-secret
+ - name: secret-volume-3
+ # Used to push to ACEaaS
+ secret:
+ secretName: aceaas-credentials
diff --git a/tekton/aceaas/aceaas-pipeline-run.yaml b/tekton/aceaas/aceaas-pipeline-run.yaml
new file mode 100644
index 0000000..44edd21
--- /dev/null
+++ b/tekton/aceaas/aceaas-pipeline-run.yaml
@@ -0,0 +1,28 @@
+apiVersion: tekton.dev/v1beta1
+kind: PipelineRun
+metadata:
+ # Can use generated names with kubectl create; follow logs with "tkn pipeline logs aceaas-pipeline -f" to
+ # avoid needing a fixed name.
+ #generateName: aceaas-pipeline-run-
+
+ # Fixed name allows "tkn pr delete aceaas-pipeline-run-1 -f ; kubectl apply -f tekton/aceaas/aceaas-pipeline-run.yaml ; tkn pr logs aceaas-pipeline-run-1 -f"
+ # which has a slightly nicer log format.
+ name: aceaas-pipeline-run-1
+spec:
+ serviceAccountName: ace-tekton-service-account
+ pipelineRef:
+ name: aceaas-pipeline
+ params:
+ - name: buildImage
+ # Requires an IBM Entitlement Key
+ value: "cp.icr.io/cp/appc/ace:12.0.11.0-r1"
+ # ace-minimal does not have curl, so we need to use the -build image for curl
+ #value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default/ace-minimal-build:12.0.11.0-alpine"
+ - name: url
+ value: "https://github.com/ot4i/ace-demo-pipeline"
+ - name: revision
+ value: "main"
+ - name: deployPrefix
+ value: "tdolby"
+ - name: createConfiguration
+ value: "true"
diff --git a/tekton/aceaas/aceaas-pipeline.yaml b/tekton/aceaas/aceaas-pipeline.yaml
new file mode 100644
index 0000000..e166f8e
--- /dev/null
+++ b/tekton/aceaas/aceaas-pipeline.yaml
@@ -0,0 +1,52 @@
+apiVersion: tekton.dev/v1beta1
+kind: Pipeline
+metadata:
+ name: aceaas-pipeline
+spec:
+ params:
+ - name: url
+ type: string
+ default: "https://github.com/ot4i/ace-demo-pipeline"
+ - name: revision
+ type: string
+ default: "main"
+ - name: buildImage
+ type: string
+ - name: deployPrefix
+ type: string
+ - name: createConfiguration
+ type: string
+ default: "false"
+ tasks:
+ - name: build-and-deploy-bar
+ taskRef:
+ name: aceaas-build-and-deploy-bar
+ params:
+ - name: url
+ value: $(params.url)
+ - name: revision
+ value: $(params.revision)
+ - name: buildImage
+ value: $(params.buildImage)
+ - name: deployPrefix
+ value: $(params.deployPrefix)
+ - name: create-config-and-runtime
+ taskRef:
+ name: aceaas-create-config-and-runtime
+ params:
+ - name: url
+ value: $(params.url)
+ - name: revision
+ value: $(params.revision)
+ - name: buildImage
+ value: $(params.buildImage)
+ - name: deployPrefix
+ value: $(params.deployPrefix)
+ - name: barURL
+ value: "$(tasks.build-and-deploy-bar.results.barURL)"
+ runAfter:
+ - build-and-deploy-bar
+ when:
+ - input: "$(params.createConfiguration)"
+ operator: in
+ values: ["true"]
\ No newline at end of file
diff --git a/tekton/aceaas/aceaas-tekton-rest-api-endpoint.png b/tekton/aceaas/aceaas-tekton-rest-api-endpoint.png
new file mode 100644
index 0000000..7edd753
Binary files /dev/null and b/tekton/aceaas/aceaas-tekton-rest-api-endpoint.png differ
diff --git a/tekton/aceaas/create-configuration-template.json b/tekton/aceaas/create-configuration-template.json
new file mode 100644
index 0000000..d68fef5
--- /dev/null
+++ b/tekton/aceaas/create-configuration-template.json
@@ -0,0 +1,10 @@
+{
+ "metadata": {
+ "name": "TEMPLATE_NAME"
+ },
+ "spec": {
+ "type": "TEMPLATE_TYPE",
+ "description": "TEMPLATE_DESCRIPTION",
+ "data": "TEMPLATE_BASE64DATA"
+ }
+}
\ No newline at end of file
diff --git a/tekton/aceaas/create-integrationruntime-template.json b/tekton/aceaas/create-integrationruntime-template.json
new file mode 100644
index 0000000..315865d
--- /dev/null
+++ b/tekton/aceaas/create-integrationruntime-template.json
@@ -0,0 +1,26 @@
+{
+ "metadata": {
+ "name": "TEMPLATE_NAME"
+ },
+ "spec": {
+ "template": {
+ "spec": {
+ "containers": [
+ {
+ "name": "runtime"
+ }
+ ]
+ }
+ },
+ "barURL": [
+ "TEMPLATE_BARURL"
+ ],
+ "configurations": [
+ "TEMPLATE_POLICYPROJECT",
+ "TEMPLATE_SERVERCONF",
+ "TEMPLATE_SETDBPARMS"
+ ],
+ "version": "12.0",
+ "replicas": 1
+ }
+}
\ No newline at end of file
diff --git a/tekton/force-pull-of-images.yaml b/tekton/force-pull-of-images.yaml
deleted file mode 100644
index b8490f3..0000000
--- a/tekton/force-pull-of-images.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
- name: force-pull
-spec:
- containers:
- - name: force-pull-base
- imagePullPolicy: Always
- image: us.icr.io/ace-containers/ace-minimal:12.0.10.0-alpine
- command: ["sleep"]
- args: ["100"]
- - name: force-pull-build
- imagePullPolicy: Always
- image: us.icr.io/ace-containers/ace-minimal-build:12.0.10.0-alpine
- command: ["sleep"]
- args: ["100"]
- restartPolicy: Never
diff --git a/tekton/knative-deploy-taskrun.yaml b/tekton/knative-deploy-taskrun.yaml
index b61f51e..89b4837 100644
--- a/tekton/knative-deploy-taskrun.yaml
+++ b/tekton/knative-deploy-taskrun.yaml
@@ -9,7 +9,7 @@ spec:
params:
- name: dockerRegistry
value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default"
-# - name: url
-# value: "https://github.com/ot4i/ace-demo-pipeline"
-# - name: revision
-# value: "main"
+ - name: url
+ value: "https://github.com/ot4i/ace-demo-pipeline"
+ - name: revision
+ value: "main"
diff --git a/tekton/minikube/README.md b/tekton/minikube/README.md
new file mode 100644
index 0000000..12d520f
--- /dev/null
+++ b/tekton/minikube/README.md
@@ -0,0 +1,571 @@
+# Minikube setup
+
+[Minikube](https://minikube.sigs.k8s.io/docs/) is used extensively for local Kubernetes testing
+and there are quite a few guides on the Internet to explain how to set it up and configure it.
+This README describes one example of using minikube v1.32.0 on Ubuntu 22.04 with the demo pipeline.
+
+Points to note:
+- The IP address range in this case was 192.168.x.y but this may vary. The `minikube ip` command
+ should provide the correct address, which then can be used to determine the correct subnet
+ value for the `--insecure-registry` parameter. The addresses appear to be the same for a given
+ machine, so running `minikube start` followed by `minikube ip` to find the IP address followed
+ by `minikube stop` and `minikube delete` should provide the information necessary for the "real"
+ startup command line.
+- This example uses either `ace-minimal` and `ace-minimal-build` or the `ace` image instead.
+ Note that the `ace` image should be copied locally for best performance as shown below.
+- The ingress addon is optional, and container testing can be achieved by port forwarding instead.
+- ACE-as-a-Service builds are also possible, and follow the usual pattern described in [/tekton/README.md](/tekton/README.md)
+
+See [Walkthrough](#walkthrough) for a full example including Knative.
+
+## Steps
+
+```
+minikube start --insecure-registry "192.168.0.0/16"
+minikube addons enable dashboard
+minikube addons enable registry
+minikube addons enable metrics-server
+
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ minikube ip
+192.168.49.2
+
+kubectl apply -f tekton/minikube/minikube-registry-nodeport.yaml
+kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
+kubectl apply --filename https://storage.googleapis.com/tekton-releases/dashboard/latest/release.yaml
+kubectl create secret docker-registry regcred --docker-server=us.icr.io --docker-username=notused --docker-password=notused
+kubectl create secret generic jdbc-secret --from-literal=USERID='BLAH' --from-literal=PASSWORD='BLAH' --from-literal=databaseName='BLUDB' --from-literal=serverName='19af6446-6171-4641-8aba-9dcff8e1b6ff.c1ogj3sd0tgtu0lqde00.databases.appdomain.cloud' --from-literal=portNumber='30699'
+
+kubectl apply -f tekton/service-account.yaml
+```
+
+For the `ace` image (following https://www.ibm.com/docs/en/app-connect/containers_cd?topic=resources-obtaining-applying-your-entitlement-key):
+```
+kubectl create secret docker-registry ibm-entitlement-key --docker-username=cp --docker-password=myEntitlementKey --docker-server=cp.icr.io
+minikube ssh
+docker login cp.icr.io -u cp -p ibmEntitlementKey
+docker pull cp.icr.io/cp/appc/ace:12.0.11.0-r1
+docker tag cp.icr.io/cp/appc/ace:12.0.11.0-r1 192.168.49.2:5000/default/ace:12.0.11.0-r1
+docker push 192.168.49.2:5000/default/ace:12.0.11.0-r1
+```
+
+For `ace-minimal` and `ace-minimal-build`:
+```
+kubectl apply -f tekton/minimal-image-build/01-ace-minimal-image-build-and-push-task.yaml
+kubectl apply -f tekton/minimal-image-build/02-ace-minimal-build-image-build-and-push-task.yaml
+kubectl apply -f tekton/minimal-image-build/ace-minimal-image-pipeline.yaml
+kubectl apply -f tekton/minimal-image-build/ace-minimal-build-image-pipeline.yaml
+
+tkn pr delete ace-minimal-build-image-pipeline-run-1 -f ; kubectl apply -f tekton/minimal-image-build/ace-minimal-build-image-pipeline-run.yaml
+tkn pr logs ace-minimal-build-image-pipeline-run-1 -f
+```
+
+Building and deploying the application:
+```
+kubectl apply -f tekton/10-ibmint-ace-build-task.yaml
+kubectl apply -f tekton/20-deploy-to-cluster-task.yaml
+kubectl apply -f tekton/21-knative-deploy-task.yaml
+kubectl apply -f tekton/ace-pipeline.yaml
+tkn pr delete ace-pipeline-run-1 -f ; kubectl apply -f tekton/ace-pipeline-run.yaml
+tkn pr logs ace-pipeline-run-1 -f
+
+minikube addons enable ingress
+kubectl apply -f tekton/minikube/tea-tekton-minikube-ingress.yaml
+```
+
+Knative setup:
+
+```
+kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.12.1/serving-crds.yaml
+kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v1.11.6/eventing-crds.yaml
+
+kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.12.1/serving-core.yaml
+
+kubectl apply -f https://github.com/knative/net-kourier/releases/download/knative-v1.12.1/kourier.yaml
+
+kubectl patch configmap/config-network -n knative-serving --type merge -p '{"data":{"ingress.class":"kourier.ingress.networking.knative.dev"}}'
+
+kubectl apply -f https://projectcontour.io/quickstart/contour.yaml
+
+cat < gcr.io/k8s-minikube/kicbase...: 453.90 MiB / 453.90 MiB 100.00% 40.66 M
+🔥 Creating docker container (CPUs=2, Memory=3900MB) ...
+🐳 Preparing Kubernetes v1.28.3 on Docker 24.0.7 ...
+ ▪ Generating certificates and keys ...
+ ▪ Booting up control plane ...
+ ▪ Configuring RBAC rules ...
+🔗 Configuring bridge CNI (Container Networking Interface) ...
+🔎 Verifying Kubernetes components...
+ ▪ Using image gcr.io/k8s-minikube/storage-provisioner:v5
+🌟 Enabled addons: storage-provisioner, default-storageclass
+🏄 Done! kubectl is now configured to use "minikube" cluster and "default" namespace by default
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ minikube addons enable dashboard
+💡 dashboard is an addon maintained by Kubernetes. For any concerns contact minikube on GitHub.
+You can view the list of minikube maintainers at: https://github.com/kubernetes/minikube/blob/master/OWNERS
+ ▪ Using image docker.io/kubernetesui/dashboard:v2.7.0
+ ▪ Using image docker.io/kubernetesui/metrics-scraper:v1.0.8
+💡 Some dashboard features require the metrics-server addon. To enable all features please run:
+
+ minikube addons enable metrics-server
+
+
+🌟 The 'dashboard' addon is enabled
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ minikube addons enable registry
+💡 registry is an addon maintained by minikube. For any concerns contact minikube on GitHub.
+You can view the list of minikube maintainers at: https://github.com/kubernetes/minikube/blob/master/OWNERS
+ ▪ Using image docker.io/registry:2.8.3
+ ▪ Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.5
+🔎 Verifying registry addon...
+🌟 The 'registry' addon is enabled
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ minikube addons enable metrics-server
+💡 metrics-server is an addon maintained by Kubernetes. For any concerns contact minikube on GitHub.
+You can view the list of minikube maintainers at: https://github.com/kubernetes/minikube/blob/master/OWNERS
+ ▪ Using image registry.k8s.io/metrics-server/metrics-server:v0.6.4
+🌟 The 'metrics-server' addon is enabled
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ minikube ip
+192.168.49.2
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f tekton/minikube/minikube-registry-nodeport.yaml
+service/registry-nodeport created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ minikube ssh
+docker@minikube:~$ docker login cp.icr.io -u cp -p ibmEntitlementKey
+WARNING! Using --password via the CLI is insecure. Use --password-stdin.
+WARNING! Your password will be stored unencrypted in /home/docker/.docker/config.json.
+Configure a credential helper to remove this warning. See
+https://docs.docker.com/engine/reference/commandline/login/#credentials-store
+
+Login Succeeded
+docker@minikube:~$ docker pull cp.icr.io/cp/appc/ace:12.0.11.0-r1
+12.0.11.0-r1: Pulling from cp/appc/ace
+a032f50e22ae: Pull complete
+1bb268c9da71: Pull complete
+a8313fdaaeb2: Pull complete
+2aa0417eb4e3: Pull complete
+446d1d900c62: Pull complete
+Digest: sha256:2a3ba6902daf785b7ae435a6aaa6f7018e0b9dcfec8c0d1a5e82107b01e7394c
+Status: Downloaded newer image for cp.icr.io/cp/appc/ace:12.0.11.0-r1
+cp.icr.io/cp/appc/ace:12.0.11.0-r1
+docker@minikube:~$ docker tag cp.icr.io/cp/appc/ace:12.0.11.0-r1 192.168.49.2:5000/default/ace:12.0.11.0-r1
+docker@minikube:~$ docker push 192.168.49.2:5000/default/ace:12.0.11.0-r1
+The push refers to repository [192.168.49.2:5000/default/ace]
+ece30248c995: Pushed
+a2240a6f3243: Pushed
+eb39a818d785: Pushed
+ec6dd3599c39: Pushed
+80c0d7946d02: Pushed
+12.0.11.0-r1: digest: sha256:6261af08295ff3ea9126c6e1126619522476ca794ec202fbc596fb1ccf66a5a1 size: 1376
+docker@minikube:~$ exit
+logout
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
+namespace/tekton-pipelines created
+clusterrole.rbac.authorization.k8s.io/tekton-pipelines-controller-cluster-access created
+clusterrole.rbac.authorization.k8s.io/tekton-pipelines-controller-tenant-access created
+clusterrole.rbac.authorization.k8s.io/tekton-pipelines-webhook-cluster-access created
+clusterrole.rbac.authorization.k8s.io/tekton-events-controller-cluster-access created
+role.rbac.authorization.k8s.io/tekton-pipelines-controller created
+role.rbac.authorization.k8s.io/tekton-pipelines-webhook created
+role.rbac.authorization.k8s.io/tekton-pipelines-events-controller created
+role.rbac.authorization.k8s.io/tekton-pipelines-leader-election created
+role.rbac.authorization.k8s.io/tekton-pipelines-info created
+serviceaccount/tekton-pipelines-controller created
+serviceaccount/tekton-pipelines-webhook created
+serviceaccount/tekton-events-controller created
+clusterrolebinding.rbac.authorization.k8s.io/tekton-pipelines-controller-cluster-access created
+clusterrolebinding.rbac.authorization.k8s.io/tekton-pipelines-controller-tenant-access created
+clusterrolebinding.rbac.authorization.k8s.io/tekton-pipelines-webhook-cluster-access created
+clusterrolebinding.rbac.authorization.k8s.io/tekton-events-controller-cluster-access created
+rolebinding.rbac.authorization.k8s.io/tekton-pipelines-controller created
+rolebinding.rbac.authorization.k8s.io/tekton-pipelines-webhook created
+rolebinding.rbac.authorization.k8s.io/tekton-pipelines-controller-leaderelection created
+rolebinding.rbac.authorization.k8s.io/tekton-pipelines-webhook-leaderelection created
+rolebinding.rbac.authorization.k8s.io/tekton-pipelines-info created
+rolebinding.rbac.authorization.k8s.io/tekton-pipelines-events-controller created
+rolebinding.rbac.authorization.k8s.io/tekton-events-controller-leaderelection created
+customresourcedefinition.apiextensions.k8s.io/clustertasks.tekton.dev created
+customresourcedefinition.apiextensions.k8s.io/customruns.tekton.dev created
+customresourcedefinition.apiextensions.k8s.io/pipelines.tekton.dev created
+customresourcedefinition.apiextensions.k8s.io/pipelineruns.tekton.dev created
+customresourcedefinition.apiextensions.k8s.io/resolutionrequests.resolution.tekton.dev created
+customresourcedefinition.apiextensions.k8s.io/stepactions.tekton.dev created
+customresourcedefinition.apiextensions.k8s.io/tasks.tekton.dev created
+customresourcedefinition.apiextensions.k8s.io/taskruns.tekton.dev created
+customresourcedefinition.apiextensions.k8s.io/verificationpolicies.tekton.dev created
+secret/webhook-certs created
+validatingwebhookconfiguration.admissionregistration.k8s.io/validation.webhook.pipeline.tekton.dev created
+mutatingwebhookconfiguration.admissionregistration.k8s.io/webhook.pipeline.tekton.dev created
+validatingwebhookconfiguration.admissionregistration.k8s.io/config.webhook.pipeline.tekton.dev created
+clusterrole.rbac.authorization.k8s.io/tekton-aggregate-edit created
+clusterrole.rbac.authorization.k8s.io/tekton-aggregate-view created
+configmap/config-defaults created
+configmap/config-events created
+configmap/feature-flags created
+configmap/pipelines-info created
+configmap/config-leader-election-controller created
+configmap/config-leader-election-events created
+configmap/config-leader-election-webhook created
+configmap/config-logging created
+configmap/config-observability created
+configmap/config-registry-cert created
+configmap/config-spire created
+configmap/config-tracing created
+deployment.apps/tekton-pipelines-controller created
+service/tekton-pipelines-controller created
+deployment.apps/tekton-events-controller created
+service/tekton-events-controller created
+namespace/tekton-pipelines-resolvers created
+clusterrole.rbac.authorization.k8s.io/tekton-pipelines-resolvers-resolution-request-updates created
+role.rbac.authorization.k8s.io/tekton-pipelines-resolvers-namespace-rbac created
+serviceaccount/tekton-pipelines-resolvers created
+clusterrolebinding.rbac.authorization.k8s.io/tekton-pipelines-resolvers created
+rolebinding.rbac.authorization.k8s.io/tekton-pipelines-resolvers-namespace-rbac created
+configmap/bundleresolver-config created
+configmap/cluster-resolver-config created
+configmap/resolvers-feature-flags created
+configmap/config-leader-election-resolvers created
+configmap/config-logging created
+configmap/config-observability created
+configmap/git-resolver-config created
+configmap/http-resolver-config created
+configmap/hubresolver-config created
+deployment.apps/tekton-pipelines-remote-resolvers created
+service/tekton-pipelines-remote-resolvers created
+horizontalpodautoscaler.autoscaling/tekton-pipelines-webhook created
+deployment.apps/tekton-pipelines-webhook created
+service/tekton-pipelines-webhook created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply --filename https://storage.googleapis.com/tekton-releases/dashboard/latest/release.yaml
+customresourcedefinition.apiextensions.k8s.io/extensions.dashboard.tekton.dev created
+serviceaccount/tekton-dashboard created
+role.rbac.authorization.k8s.io/tekton-dashboard-info created
+clusterrole.rbac.authorization.k8s.io/tekton-dashboard-backend created
+clusterrole.rbac.authorization.k8s.io/tekton-dashboard-tenant created
+rolebinding.rbac.authorization.k8s.io/tekton-dashboard-info created
+clusterrolebinding.rbac.authorization.k8s.io/tekton-dashboard-backend created
+configmap/dashboard-info created
+service/tekton-dashboard created
+deployment.apps/tekton-dashboard created
+clusterrolebinding.rbac.authorization.k8s.io/tekton-dashboard-tenant created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl create secret docker-registry regcred --docker-server=us.icr.io --docker-username=notused --docker-password=notused
+secret/regcred created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl create secret generic jdbc-secret --from-literal=USERID='BLAH' --from-literal=PASSWORD='BLAH' --from-literal=databaseName='BLUDB' --from-literal=serverName='19af6446-6171-4641-8aba-9dcff8e1b6ff.c1ogj3sd0tgtu0lqde00.databases.appdomain.cloud' --from-literal=portNumber='30699'
+secret/jdbc-secret created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f tekton/service-account.yaml
+serviceaccount/ace-tekton-service-account created
+role.rbac.authorization.k8s.io/pipeline-role created
+rolebinding.rbac.authorization.k8s.io/pipeline-role-binding created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f tekton/minimal-image-build/01-ace-minimal-image-build-and-push-task.yaml
+task.tekton.dev/ace-minimal-image-build-and-push created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f tekton/minimal-image-build/02-ace-minimal-build-image-build-and-push-task.yaml
+task.tekton.dev/ace-minimal-build-image-build-and-push created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f tekton/minimal-image-build/ace-minimal-image-pipeline.yaml
+pipeline.tekton.dev/ace-minimal-image-pipeline created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f tekton/minimal-image-build/ace-minimal-build-image-pipeline.yaml
+pipeline.tekton.dev/ace-minimal-build-image-pipeline created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ vi tekton/minimal-image-build/ace-minimal-build-image-pipeline
+ace-minimal-build-image-pipeline-run.yaml ace-minimal-build-image-pipeline.yaml
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ vi tekton/minimal-image-build/ace-minimal-build-image-pipeline
+ace-minimal-build-image-pipeline-run.yaml ace-minimal-build-image-pipeline.yaml
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ vi tekton/minimal-image-build/ace-minimal-build-image-pipeline-run.yaml
+
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl create secret docker-registry ibm-entitlement-key --docker-username=cp --docker-password=ibmEntitlementKey --docker-server=cp.icr.io
+secret/ibm-entitlement-key created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ tkn pr delete ace-minimal-build-image-pipeline-run-1 -f ; kubectl apply -f tekton/minimal-image-build/ace-minimal-build-image-pipeline-run.yaml
+PipelineRuns deleted: "ace-minimal-build-image-pipeline-run-1"
+pipelinerun.tekton.dev/ace-minimal-build-image-pipeline-run-1 created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ tkn pr logs ace-minimal-build-image-pipeline-run-1 -f
+[runtime-image : clone] Cloning into 'ace-docker'...
+[runtime-image : clone] total 120
+[runtime-image : clone] -rw-r--r-- 1 root root 6711 Apr 8 17:49 Dockerfile.alpine
+[runtime-image : clone] -rw-r--r-- 1 root root 6940 Apr 8 17:49 Dockerfile.alpine-java11
+[runtime-image : clone] -rw-r--r-- 1 root root 4963 Apr 8 17:49 Dockerfile.ubuntu
+
+
+
+[build-image : ace-minimal-build-push] INFO[0028] CMD ["/bin/bash"]
+[build-image : ace-minimal-build-push] INFO[0028] Pushing image to 192.168.49.2:5000/default/ace-minimal-build:12.0.11.0-alpine
+[build-image : ace-minimal-build-push] INFO[0028] Pushed 192.168.49.2:5000/default/ace-minimal-build@sha256:c3fcc0155163ed528a7d3c0801a32b93f79b30007865eb095881f353a2edf320
+
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f tekton/10-ibmint-ace-build-task.yaml
+task.tekton.dev/ace-build created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f tekton/20-deploy-to-cluster-task.yaml
+task.tekton.dev/deploy-to-cluster created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f tekton/21-knative-deploy-task.yaml
+task.tekton.dev/knative-deploy created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f tekton/ace-pipeline.yaml
+pipeline.tekton.dev/ace-pipeline created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ tkn pr delete ace-pipeline-run-1 -f ; kubectl apply -f tekton/ace-pipeline-run.yaml
+Error: pipelineruns.tekton.dev "ace-pipeline-run-1" not found
+pipelinerun.tekton.dev/ace-pipeline-run-1 created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ tkn pr logs ace-pipeline-run-1 -f
+[build-from-source : clone] Cloning into 'ace-demo-pipeline'...
+[build-from-source : clone] Setting container tag to 20240408175811-f07980e
+
+[build-from-source : ibmint-build] mqsicreateworkdir: Copying sample server.config.yaml to work directory
+
+
+
+[deploy-to-cluster : deploy-app] deployment.apps/tea-tekton created
+
+[deploy-to-cluster : create-service] service/tea-tekton-service created
+
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ minikube addons enable ingress
+💡 ingress is an addon maintained by Kubernetes. For any concerns contact minikube on GitHub.
+You can view the list of minikube maintainers at: https://github.com/kubernetes/minikube/blob/master/OWNERS
+ ▪ Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0
+ ▪ Using image registry.k8s.io/ingress-nginx/controller:v1.9.4
+ ▪ Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0
+🔎 Verifying ingress addon...
+🌟 The 'ingress' addon is enabled
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f tekton/minikube/tea-tekton-minikube-ingress.yaml
+ingress.networking.k8s.io/tea-ingress created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl port-forward --address 0.0.0.0 svc/tea-tekton-service 7800:7800 &
+[1] 286857
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ Forwarding from 0.0.0.0:7800 -> 7800
+
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ curl http://localhost:7800/tea/index/1
+Handling connection for 7800
+{"name":"Assam","id":"1"}
+
+
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.12.1/serving-crds.yaml
+customresourcedefinition.apiextensions.k8s.io/certificates.networking.internal.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/configurations.serving.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/clusterdomainclaims.networking.internal.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/domainmappings.serving.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/ingresses.networking.internal.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/metrics.autoscaling.internal.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/podautoscalers.autoscaling.internal.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/revisions.serving.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/routes.serving.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/serverlessservices.networking.internal.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/services.serving.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/images.caching.internal.knative.dev created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v1.11.6/eventing-crds.yaml
+customresourcedefinition.apiextensions.k8s.io/apiserversources.sources.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/brokers.eventing.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/channels.messaging.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/containersources.sources.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/eventtypes.eventing.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/parallels.flows.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/pingsources.sources.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/sequences.flows.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/sinkbindings.sources.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/subscriptions.messaging.knative.dev created
+customresourcedefinition.apiextensions.k8s.io/triggers.eventing.knative.dev created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.12.1/serving-core.yaml
+
+namespace/knative-serving created
+role.rbac.authorization.k8s.io/knative-serving-activator created
+clusterrole.rbac.authorization.k8s.io/knative-serving-activator-cluster created
+clusterrole.rbac.authorization.k8s.io/knative-serving-aggregated-addressable-resolver created
+clusterrole.rbac.authorization.k8s.io/knative-serving-addressable-resolver created
+clusterrole.rbac.authorization.k8s.io/knative-serving-namespaced-admin created
+clusterrole.rbac.authorization.k8s.io/knative-serving-namespaced-edit created
+clusterrole.rbac.authorization.k8s.io/knative-serving-namespaced-view created
+clusterrole.rbac.authorization.k8s.io/knative-serving-core created
+clusterrole.rbac.authorization.k8s.io/knative-serving-podspecable-binding created
+serviceaccount/controller created
+clusterrole.rbac.authorization.k8s.io/knative-serving-admin created
+clusterrolebinding.rbac.authorization.k8s.io/knative-serving-controller-admin created
+clusterrolebinding.rbac.authorization.k8s.io/knative-serving-controller-addressable-resolver created
+serviceaccount/activator created
+rolebinding.rbac.authorization.k8s.io/knative-serving-activator created
+clusterrolebinding.rbac.authorization.k8s.io/knative-serving-activator-cluster created
+customresourcedefinition.apiextensions.k8s.io/images.caching.internal.knative.dev unchanged
+customresourcedefinition.apiextensions.k8s.io/certificates.networking.internal.knative.dev unchanged
+customresourcedefinition.apiextensions.k8s.io/configurations.serving.knative.dev unchanged
+customresourcedefinition.apiextensions.k8s.io/clusterdomainclaims.networking.internal.knative.dev unchanged
+customresourcedefinition.apiextensions.k8s.io/domainmappings.serving.knative.dev unchanged
+customresourcedefinition.apiextensions.k8s.io/ingresses.networking.internal.knative.dev unchanged
+customresourcedefinition.apiextensions.k8s.io/metrics.autoscaling.internal.knative.dev unchanged
+customresourcedefinition.apiextensions.k8s.io/podautoscalers.autoscaling.internal.knative.dev unchanged
+customresourcedefinition.apiextensions.k8s.io/revisions.serving.knative.dev unchanged
+customresourcedefinition.apiextensions.k8s.io/routes.serving.knative.dev unchanged
+customresourcedefinition.apiextensions.k8s.io/serverlessservices.networking.internal.knative.dev unchanged
+customresourcedefinition.apiextensions.k8s.io/services.serving.knative.dev unchanged
+secret/serving-certs-ctrl-ca created
+secret/knative-serving-certs created
+secret/routing-serving-certs created
+image.caching.internal.knative.dev/queue-proxy created
+configmap/config-autoscaler created
+configmap/config-defaults created
+configmap/config-deployment created
+configmap/config-domain created
+configmap/config-features created
+configmap/config-gc created
+configmap/config-leader-election created
+configmap/config-logging created
+configmap/config-network created
+configmap/config-observability created
+configmap/config-tracing created
+horizontalpodautoscaler.autoscaling/activator created
+poddisruptionbudget.policy/activator-pdb created
+deployment.apps/activator created
+service/activator-service created
+deployment.apps/autoscaler created
+service/autoscaler created
+deployment.apps/controller created
+service/controller created
+horizontalpodautoscaler.autoscaling/webhook created
+poddisruptionbudget.policy/webhook-pdb created
+deployment.apps/webhook created
+service/webhook created
+validatingwebhookconfiguration.admissionregistration.k8s.io/config.webhook.serving.knative.dev created
+mutatingwebhookconfiguration.admissionregistration.k8s.io/webhook.serving.knative.dev created
+validatingwebhookconfiguration.admissionregistration.k8s.io/validation.webhook.serving.knative.dev created
+secret/webhook-certs created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f https://github.com/knative/net-kourier/releases/download/knative-v1.12.1/kourier.yaml
+namespace/kourier-system created
+configmap/kourier-bootstrap created
+configmap/config-kourier created
+serviceaccount/net-kourier created
+clusterrole.rbac.authorization.k8s.io/net-kourier created
+clusterrolebinding.rbac.authorization.k8s.io/net-kourier created
+deployment.apps/net-kourier-controller created
+service/net-kourier-controller created
+deployment.apps/3scale-kourier-gateway created
+service/kourier created
+service/kourier-internal created
+horizontalpodautoscaler.autoscaling/3scale-kourier-gateway created
+poddisruptionbudget.policy/3scale-kourier-gateway-pdb created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl patch configmap/config-network -n knative-serving --type merge -p '{"data":{"ingress.class":"kourier.ingress.networking.knative.dev"}}'
+configmap/config-network patched
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl apply -f https://projectcontour.io/quickstart/contour.yaml
+
+namespace/projectcontour created
+serviceaccount/contour created
+serviceaccount/envoy created
+configmap/contour created
+customresourcedefinition.apiextensions.k8s.io/contourconfigurations.projectcontour.io created
+customresourcedefinition.apiextensions.k8s.io/contourdeployments.projectcontour.io created
+customresourcedefinition.apiextensions.k8s.io/extensionservices.projectcontour.io created
+customresourcedefinition.apiextensions.k8s.io/httpproxies.projectcontour.io created
+customresourcedefinition.apiextensions.k8s.io/tlscertificatedelegations.projectcontour.io created
+serviceaccount/contour-certgen created
+rolebinding.rbac.authorization.k8s.io/contour created
+role.rbac.authorization.k8s.io/contour-certgen created
+job.batch/contour-certgen-v1-28-2 created
+clusterrolebinding.rbac.authorization.k8s.io/contour created
+rolebinding.rbac.authorization.k8s.io/contour-rolebinding created
+clusterrole.rbac.authorization.k8s.io/contour created
+role.rbac.authorization.k8s.io/contour created
+service/contour created
+service/envoy created
+deployment.apps/contour created
+daemonset.apps/envoy created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ cat < apiVersion: networking.k8s.io/v1
+> kind: Ingress
+> metadata:
+> name: kourier-ingress
+> namespace: kourier-system
+> spec:
+> rules:
+> - http:
+> paths:
+> - path: /
+> pathType: Prefix
+> backend:
+> service:
+> name: kourier
+> port:
+> number: 80
+> EOF
+ingress.networking.k8s.io/kourier-ingress created
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ export ksvc_domain="\"data\":{\""$(minikube ip)".nip.io\": \"\"}"
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kubectl patch configmap/config-domain -n knative-serving --type merge -p "{$ksvc_domain}"
+configmap/config-domain patched
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ tkn pr delete ace-pipeline-run-1 -f ; kubectl apply -f tekton/ace-pipeline-run.yaml ; tkn pr logs ace-pipeline-run-1 -f
+PipelineRuns deleted: "ace-pipeline-run-1"
+pipelinerun.tekton.dev/ace-pipeline-run-1 created
+[build-from-source : clone] Cloning into 'ace-demo-pipeline'...
+
+
+
+[build-from-source : docker-build-and-push] Mon Apr 8 18:51:46 UTC 2024
+
+[deploy-knative-to-cluster : clone] + cd /work
+[deploy-knative-to-cluster : clone] + git clone -b aceaas-and-minikube https://github.com/ot4i/ace-demo-pipeline
+[deploy-knative-to-cluster : clone] Cloning into 'ace-demo-pipeline'...
+[deploy-knative-to-cluster : clone] + echo 192.168.49.2:5000/default
+[deploy-knative-to-cluster : clone] + sed 's/\//\\\//g'
+[deploy-knative-to-cluster : clone] + export 'REG_WITH_ESCAPED_SLASH=192.168.49.2:5000\/default'
+[deploy-knative-to-cluster : clone] + echo '192.168.49.2:5000\/default'
+[deploy-knative-to-cluster : clone] + sed -i 's/DOCKER_REGISTRY/192.168.49.2:5000\/default/g' /work/ace-demo-pipeline/serverless/tea-tekton-knative-service.yaml
+[deploy-knative-to-cluster : clone] 192.168.49.2:5000\/default
+[deploy-knative-to-cluster : clone] + export 'TAG=20240408185001-f07980e'
+[deploy-knative-to-cluster : clone] + echo Using 20240408185001-f07980e as image tag
+[deploy-knative-to-cluster : clone] Using 20240408185001-f07980e as image tag
+[deploy-knative-to-cluster : clone] + sed -i s/IMAGE_TAG/20240408185001-f07980e/g /work/ace-demo-pipeline/serverless/knative-service-account.yaml /work/ace-demo-pipeline/serverless/tea-tekton-knative-service.yaml
+[deploy-knative-to-cluster : clone] + cat /work/ace-demo-pipeline/serverless/tea-tekton-knative-service.yaml
+[deploy-knative-to-cluster : clone] apiVersion: serving.knative.dev/v1
+[deploy-knative-to-cluster : clone] kind: Service
+[deploy-knative-to-cluster : clone] metadata:
+[deploy-knative-to-cluster : clone] name: tea-tekton-knative
+[deploy-knative-to-cluster : clone] spec:
+[deploy-knative-to-cluster : clone] template:
+[deploy-knative-to-cluster : clone] spec:
+[deploy-knative-to-cluster : clone] volumes:
+[deploy-knative-to-cluster : clone] - name: secret-volume-2
+[deploy-knative-to-cluster : clone] secret:
+[deploy-knative-to-cluster : clone] secretName: jdbc-secret
+[deploy-knative-to-cluster : clone] imagePullSecrets:
+[deploy-knative-to-cluster : clone] - name: regcred
+[deploy-knative-to-cluster : clone] containers:
+[deploy-knative-to-cluster : clone] - name: tea-tekton-knative
+[deploy-knative-to-cluster : clone] image: 192.168.49.2:5000/default/tea-tekton:20240408185001-f07980e
+[deploy-knative-to-cluster : clone] ports:
+[deploy-knative-to-cluster : clone] - containerPort: 7800
+[deploy-knative-to-cluster : clone] volumeMounts:
+[deploy-knative-to-cluster : clone] - name: secret-volume-2
+[deploy-knative-to-cluster : clone] mountPath: /var/run/secrets/jdbc
+
+[deploy-knative-to-cluster : create-knative-service] Warning: Kubernetes default value is insecure, Knative may default this to secure in a future release: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation, spec.template.spec.containers[0].securityContext.capabilities, spec.template.spec.containers[0].securityContext.runAsNonRoot, spec.template.spec.containers[0].securityContext.seccompProfile
+[deploy-knative-to-cluster : create-knative-service] service.serving.knative.dev/tea-tekton-knative configured
+
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kn services list
+NAME URL LATEST AGE CONDITIONS READY REASON
+tea-tekton-knative http://tea-tekton-knative.default.192.168.49.2.nip.io tea-tekton-knative-00002 8m51s 1 OK / 3 Unknown
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ kn services list
+NAME URL LATEST AGE CONDITIONS READY REASON
+tea-tekton-knative http://tea-tekton-knative.default.192.168.49.2.nip.io tea-tekton-knative-00003 9m5s 3 OK / 3 True
+ubuntu@minikube-20231123:~/github.com/ace-demo-pipeline$ curl http://tea-tekton-knative.default.192.168.49.2.nip.io/tea/index/1
+{"name":"Assam","id":"1"}
+```
diff --git a/tekton/minikube/minikube-registry-nodeport.yaml b/tekton/minikube/minikube-registry-nodeport.yaml
new file mode 100644
index 0000000..adcc095
--- /dev/null
+++ b/tekton/minikube/minikube-registry-nodeport.yaml
@@ -0,0 +1,15 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: registry-nodeport
+ namespace: kube-system
+spec:
+ type: NodePort
+ selector:
+ actual-registry: 'true'
+ kubernetes.io/minikube-addons: registry
+ ports:
+ - port: 5000
+ # By default and for convenience, the `targetPort` is set to
+ # the same value as the `port` field.
+ targetPort: 5000
diff --git a/tekton/minikube/tea-tekton-minikube-ingress.yaml b/tekton/minikube/tea-tekton-minikube-ingress.yaml
new file mode 100644
index 0000000..e65083a
--- /dev/null
+++ b/tekton/minikube/tea-tekton-minikube-ingress.yaml
@@ -0,0 +1,15 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: tea-ingress
+spec:
+ rules:
+ - http:
+ paths:
+ - pathType: Prefix
+ path: /tea/index
+ backend:
+ service:
+ name: tea-tekton-service
+ port:
+ number: 7800
diff --git a/tekton/minimal-image-build/01-ace-minimal-image-build-and-push-task.yaml b/tekton/minimal-image-build/01-ace-minimal-image-build-and-push-task.yaml
index 60491bf..b947f0b 100644
--- a/tekton/minimal-image-build/01-ace-minimal-image-build-and-push-task.yaml
+++ b/tekton/minimal-image-build/01-ace-minimal-image-build-and-push-task.yaml
@@ -12,14 +12,10 @@ spec:
- name: "HOME"
value: "/tekton/home"
params:
- - name: dockerRegistry
+ - name: outputRegistry
type: string
- name: aceDownloadUrl
type: string
- - name: url
- type: string
- - name: revision
- type: string
- name: runtimeImage
type: string
- name: aceDockerFile
@@ -30,8 +26,11 @@ spec:
script: |
#!/bin/sh
cd /work
- git clone "https://github.com/trevor-dolby-at-ibm-com/ace-docker"
- sed -i 's/alpine:3.18/quay.io\/trevor_dolby\/alpine:3.18/g' /work/ace-docker/experimental/ace-minimal/Dockerfile.alpine
+ git clone https://github.com/ot4i/ace-docker
+
+ # Change the registry location if pull quota exceeded
+ #sed -i 's/alpine:3.18/quay.io\/trevor_dolby\/alpine:3.18/g' /work/ace-docker/experimental/ace-minimal/Dockerfile.alpine
+
ls -l /work/ace-docker/experimental/ace-minimal
volumeMounts:
- mountPath: /work
@@ -46,8 +45,9 @@ spec:
- /kaniko/executor
args:
- --dockerfile=/work/ace-docker/$(params.aceDockerFile)
- - --destination=$(params.dockerRegistry)/$(params.runtimeImage)
+ - --destination=$(params.outputRegistry)/$(params.runtimeImage)
- --context=/work/ace-docker/experimental/ace-minimal
+ - --insecure-registry=192.168.0.0/16
- --build-arg=DOWNLOAD_URL=$(params.aceDownloadUrl)
- --skip-tls-verify
volumeMounts:
diff --git a/tekton/minimal-image-build/02-ace-minimal-build-image-build-and-push-task.yaml b/tekton/minimal-image-build/02-ace-minimal-build-image-build-and-push-task.yaml
index 031a6e2..ed610d8 100644
--- a/tekton/minimal-image-build/02-ace-minimal-build-image-build-and-push-task.yaml
+++ b/tekton/minimal-image-build/02-ace-minimal-build-image-build-and-push-task.yaml
@@ -44,6 +44,7 @@ spec:
- --dockerfile=/work/ace-demo-pipeline/demo-infrastructure/docker/ace-minimal-build/Dockerfile
- --destination=$(params.dockerRegistry)/$(params.buildImage)
- --context=/work/ace-demo-pipeline/demo-infrastructure/docker/ace-minimal-build
+ - --insecure-registry=192.168.0.0/16
- --build-arg=BASE_IMAGE=$(params.dockerRegistry)/$(params.runtimeImage)
- --skip-tls-verify
volumeMounts:
diff --git a/tekton/minimal-image-build/README.md b/tekton/minimal-image-build/README.md
index f341d58..aa7b16c 100644
--- a/tekton/minimal-image-build/README.md
+++ b/tekton/minimal-image-build/README.md
@@ -1,25 +1,64 @@
# Tekton builds of pre-req images
-Used to build images that can then be used to build and run ACE applications.
+Used to build minimal ACE images that can then be used to build and run ACE applications.
![Image build overview](ace-demo-pipeline-tekton-2.png)
+These images are not required for the successful use of the demo pipeline, and
+others can be used for build and test:
+
+- The `ace` image from cp.icr.io can be used as a build image and also as a runtime
+ image for the various containers.
+- The `ace-server-prod` image can be used as a runtime image for CP4i users.
+
+See [ACE containers: choosing a base image](https://community.ibm.com/community/user/integration/blogs/trevor-dolby/2024/02/05/ace-containers-choosing-a-base-image)
+for a discussion on how to decide along with some of the history of the images.
+
+The minimal images can be helpful in some cases:
+
+- For users who do not have an IBM Entitlement Key and therefore cannot use the
+ `ace` image, `ace-minimal` can be built from the freely-available ACE Developer
+ edition and used to experiment with ACE and pipelines.
+- Maven is not installed in the `ace` image and so users wishing to run Maven
+ builds will need to create a new image with Maven installed. Note that Maven is
+ no longer required for the demo pipeline to successfully run.
+- In some cases, the container builds using buildah or Kaniko are unable to cache
+ container images locally, leading to delays in unpacking the images every time.
+ For these situations, `ace-minimal` is faster due to the small image size.
+
## Getting started
- Most of the specific registry names need to be customised: us.icr.io may not be the right region, for example, and us.icr.io/ace-containers
-is unlikely to be writable. Creating registries and so on (though essential) is beyond the scope of this document, but customisation of
-the artifacts in this repo (such as ace-minimal-build-image-pipeline.yaml) will almost certainly be necessary. Note that on Windows, kubectl
-sometimes complains about not being able to validate files; using --validate=false appears to eliminate the issue without causing problems.
+Many of the artifacts in this repo (such as ace-minimal-build-image-pipeline-run.yaml) will need to be
+customized depending on the exact cluster layout. The defaults are set up for Minikube running with Docker
+on Ubuntu, and may need to be modified depending on network addresses, etc. The most-commonly-modified
+files have options in the comments, with [ace-minimal-build-image-pipeline-run.yaml](ace-minimal-build-image-pipeline-run.yaml)
+being one example:
+```
+ - name: dockerRegistry
+ # OpenShift
+ #value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default"
+ #value: "quay.io/trevor_dolby"
+ #value: "us.icr.io/ace-containers"
+ #value: "aceDemoRegistry.azurecr.io"
+ # Minikube
+ value: "192.168.49.2:5000/default"
+```
- The Tekton pipeline relies on docker credentials being provided for Kaniko to use when pushing the built image, and these credentials
-must be associated with the service account for the pipeline. If this has not already been done elsewhere, then create as follows, with
-appropriate changes for a fork of this repo:
+The Tekton pipeline expects docker credentials to be provided for Kaniko to use when pushing the built image, and
+these credentials must be associated with the service account for the pipeline. If this has not already been done
+elsewhere, then create them with the following format for single-node OpenShift using temporary admin credentials
```
-kubectl create secret docker-registry regcred --docker-server=us.icr.io --docker-username=iamapikey --docker-password=
+kubectl create secret docker-registry regcred --docker-server=image-registry.openshift-image-registry.svc.cluster.local:5000 --docker-username=kubeadmin --docker-password=$(oc whoami -t)
kubectl apply -f tekton/service-account.yaml
```
-The service account also has the ability to create services, deployments, etc, which are necessary for running the service. Note that
-Windows kubectl seems to need the `--docker-email` parameter also, but the value can be anything.
+or a dummy variant for Minikube without registry authentication enabled:
+```
+kubectl create secret docker-registry regcred --docker-server=us.icr.io --docker-username=dummy --docker-password=dummy
+kubectl apply -f tekton/service-account.yaml
+```
+The service account also has the ability to create services, deployments, etc, which are necessary for running the service.
+Note that on Windows, kubectl sometimes complains about not being able to validate files (using --validate=false appears to
+eliminate the issue without causing problems) and seems to need the `--docker-email` parameter also, but the value can be anything.
## Setting the correct product version
@@ -49,35 +88,36 @@ kubectl apply -f tekton/minimal-image-build/ace-minimal-image-pipeline-run.yaml
tkn pipelinerun logs ace-minimal-image-pipeline-run-1 -f
```
-Once that has been built, the ace-minimal-build image can be built as follows:
+The ace-minimal-build-image-pipeline builds not only the ace-minimal-build image but also
+builds ace-minimal itself, and so the following can be run on their own to build both images:
```
+kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
+kubectl apply -f tekton/minimal-image-build/01-ace-minimal-image-build-and-push-task.yaml
kubectl apply -f tekton/minimal-image-build/02-ace-minimal-build-image-build-and-push-task.yaml
kubectl apply -f tekton/minimal-image-build/ace-minimal-build-image-pipeline.yaml
kubectl apply -f tekton/minimal-image-build/ace-minimal-build-image-pipeline-run.yaml
tkn pipelinerun logs ace-minimal-build-image-pipeline-run-1 -f
```
-## Issues with Kube nodes and Tekton
-
-In certain cases, the images present in the repository cannot be "seen" by the Tekton pipeline task steps, for
-unclear but credential-related reasons. Starting pods that use the images appears to force the pull to the worker
-node, and this can be done as follows:
-```
-kubectl delete pod force-pull
-kubectl apply -f tekton/force-pull-of-images.yaml
-```
-
## OpenShift
The majority of steps are the same, but the registry authentication is a little different; assuming a session logged in as kubeadmin, it would look as follows:
```
kubectl create secret docker-registry regcred --docker-server=image-registry.openshift-image-registry.svc.cluster.local:5000 --docker-username=kubeadmin --docker-password=$(oc whoami -t)
```
-Note that the actual password itself (as opposed to the hash provided by "oc whoami -t") does not work for registry authentication for some reason.
+Note that the actual password itself (as opposed to the hash provided by "oc whoami -t") does not work for
+registry authentication for some reason when using single-node OpenShift with a temporary admin user.
-After that, the pipeline runs would be
+After that, the pipeline run files need to be adjusted to use the OpenShift registry, such
+as [ace-minimal-build-image-pipeline-run.yaml](ace-minimal-build-image-pipeline-run.yaml):
```
-kubectl apply -f tekton/minimal-image-build/os/ace-minimal-image-pipeline-run.yaml
-kubectl apply -f tekton/minimal-image-build/os/ace-minimal-build-image-pipeline-run.yaml
+ - name: dockerRegistry
+ # OpenShift
+ value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default"
+ #value: "quay.io/trevor_dolby"
+ #value: "us.icr.io/ace-containers"
+ #value: "aceDemoRegistry.azurecr.io"
+ # Minikube
+ #value: "192.168.49.2:5000/default"
```
-to pick up the correct registry default.
+and then the pipelines can be run as usual.
diff --git a/tekton/minimal-image-build/ace-minimal-build-image-pipeline-run.yaml b/tekton/minimal-image-build/ace-minimal-build-image-pipeline-run.yaml
index 2191034..910002e 100644
--- a/tekton/minimal-image-build/ace-minimal-build-image-pipeline-run.yaml
+++ b/tekton/minimal-image-build/ace-minimal-build-image-pipeline-run.yaml
@@ -4,8 +4,32 @@ metadata:
name: ace-minimal-build-image-pipeline-run-1
spec:
serviceAccountName: ace-tekton-service-account
+ # Use this instead if building in a CP4i environment
+ # (normally only needed if using Maven)
+ #serviceAccountName: cp4i-tekton-service-account
pipelineRef:
name: ace-minimal-build-image-pipeline
params:
- name: dockerRegistry
- value: "us.icr.io/ace-containers"
\ No newline at end of file
+ # OpenShift
+ #value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default"
+ #value: "quay.io/trevor_dolby"
+ #value: "us.icr.io/ace-containers"
+ #value: "aceDemoRegistry.azurecr.io"
+ # Minikube
+ value: "192.168.49.2:5000/default"
+ - name: aceDownloadUrl
+ value: "https://iwm.dhe.ibm.com/sdfdl/v2/regs2/mbford/Xa.2/Xb.WJL1CuPI9omKj5inyv9-ir8PMDDUU8v2iYo-Oc5KPVI/Xc.12.0.11.0-ACE-LINUX64-DEVELOPER.tar.gz/Xd./Xf.lPr.D1vk/Xg.12260000/Xi.swg-wmbfd/XY.regsrvs/XZ._4mLAwxEK3xwdP7B6oAhifJgsNyp-ePc/12.0.11.0-ACE-LINUX64-DEVELOPER.tar.gz"
+ - name: runtimeImage
+ # Tag should match the product version; the image will be pushed to
+ # the dockerRegistry specified above.
+ value: "ace-minimal:12.0.11.0-alpine"
+ - name: buildImage
+ # This image is created in this pipeline, and is used as a build
+ # image in the main project pipeline. The image will be pushed to
+ # the dockerRegistry specified above.
+ value: "ace-minimal-build:12.0.11.0-alpine"
+ - name: url
+ value: "https://github.com/ot4i/ace-demo-pipeline"
+ - name: revision
+ value: "main"
diff --git a/tekton/minimal-image-build/ace-minimal-build-image-pipeline.yaml b/tekton/minimal-image-build/ace-minimal-build-image-pipeline.yaml
index dff9b0b..20be92e 100644
--- a/tekton/minimal-image-build/ace-minimal-build-image-pipeline.yaml
+++ b/tekton/minimal-image-build/ace-minimal-build-image-pipeline.yaml
@@ -6,6 +6,8 @@ spec:
params:
- name: dockerRegistry
type: string
+ - name: aceDownloadUrl
+ type: string
- name: url
type: string
default: "https://github.com/ot4i/ace-demo-pipeline"
@@ -14,12 +16,27 @@ spec:
default: "main"
- name: buildImage
type: string
- default: "ace-minimal-build:12.0.10.0-alpine"
+ default: "ace-minimal-build:12.0.11.0-alpine"
- name: runtimeImage
type: string
- default: "ace-minimal:12.0.10.0-alpine"
+ default: "ace-minimal:12.0.11.0-alpine"
+ - name: aceDockerFile
+ type: string
+ default: "experimental/ace-minimal/Dockerfile.alpine"
tasks:
- - name: build-images
+ - name: runtime-image
+ taskRef:
+ name: ace-minimal-image-build-and-push
+ params:
+ - name: outputRegistry
+ value: $(params.dockerRegistry)
+ - name: aceDownloadUrl
+ value: $(params.aceDownloadUrl)
+ - name: runtimeImage
+ value: $(params.runtimeImage)
+ - name: aceDockerFile
+ value: $(params.aceDockerFile)
+ - name: build-image
taskRef:
name: ace-minimal-build-image-build-and-push
params:
@@ -33,3 +50,5 @@ spec:
value: $(params.buildImage)
- name: runtimeImage
value: $(params.runtimeImage)
+ runAfter:
+ - runtime-image
diff --git a/tekton/minimal-image-build/ace-minimal-image-pipeline-run.yaml b/tekton/minimal-image-build/ace-minimal-image-pipeline-run.yaml
index 594fa01..3884a13 100644
--- a/tekton/minimal-image-build/ace-minimal-image-pipeline-run.yaml
+++ b/tekton/minimal-image-build/ace-minimal-image-pipeline-run.yaml
@@ -4,10 +4,23 @@ metadata:
name: ace-minimal-image-pipeline-run-1
spec:
serviceAccountName: ace-tekton-service-account
+ # Use this instead if building in a CP4i environment
+ # (normally only needed if using Maven)
+ #serviceAccountName: cp4i-tekton-service-account
pipelineRef:
name: ace-minimal-image-pipeline
params:
- name: dockerRegistry
- value: "us.icr.io/ace-containers"
+ # OpenShift
+ #value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default"
+ #value: "quay.io/trevor_dolby"
+ #value: "us.icr.io/ace-containers"
+ #value: "aceDemoRegistry.azurecr.io"
+ # Minikube
+ value: "192.168.49.2:5000/default"
- name: aceDownloadUrl
- value: "http://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/integration/12.0.10.0-ACE-LINUX64-DEVELOPER.tar.gz"
+ value: "https://iwm.dhe.ibm.com/sdfdl/v2/regs2/mbford/Xa.2/Xb.WJL1CuPI9omKj5inyv9-ir8PMDDUU8v2iYo-Oc5KPVI/Xc.12.0.11.0-ACE-LINUX64-DEVELOPER.tar.gz/Xd./Xf.lPr.D1vk/Xg.12260000/Xi.swg-wmbfd/XY.regsrvs/XZ._4mLAwxEK3xwdP7B6oAhifJgsNyp-ePc/12.0.11.0-ACE-LINUX64-DEVELOPER.tar.gz"
+ - name: runtimeImage
+ # Tag should match the product version; the image will be pushed to
+ # the dockerRegistry specified above.
+ value: "ace-minimal:12.0.11.0-alpine"
diff --git a/tekton/minimal-image-build/ace-minimal-image-pipeline.yaml b/tekton/minimal-image-build/ace-minimal-image-pipeline.yaml
index bd5c47b..e719770 100644
--- a/tekton/minimal-image-build/ace-minimal-image-pipeline.yaml
+++ b/tekton/minimal-image-build/ace-minimal-image-pipeline.yaml
@@ -8,15 +8,9 @@ spec:
type: string
- name: aceDownloadUrl
type: string
- - name: url
- type: string
- default: "https://github.com/ot4i/ace-demo-pipeline"
- - name: revision
- type: string
- default: "main"
- name: runtimeImage
type: string
- default: "ace-minimal:12.0.10.0-alpine"
+ default: "ace-minimal:12.0.11.0-alpine"
- name: aceDockerFile
type: string
default: "experimental/ace-minimal/Dockerfile.alpine"
@@ -25,14 +19,10 @@ spec:
taskRef:
name: ace-minimal-image-build-and-push
params:
- - name: dockerRegistry
+ - name: outputRegistry
value: $(params.dockerRegistry)
- name: aceDownloadUrl
value: $(params.aceDownloadUrl)
- - name: url
- value: $(params.url)
- - name: revision
- value: $(params.revision)
- name: runtimeImage
value: $(params.runtimeImage)
- name: aceDockerFile
diff --git a/tekton/minimal-image-build/aks/ace-minimal-build-image-pipeline-run-aks.yaml b/tekton/minimal-image-build/aks/ace-minimal-build-image-pipeline-run-aks.yaml
deleted file mode 100644
index 44c5342..0000000
--- a/tekton/minimal-image-build/aks/ace-minimal-build-image-pipeline-run-aks.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: PipelineRun
-metadata:
- name: ace-minimal-build-image-pipeline-run-1
-spec:
- serviceAccountName: ace-tekton-service-account
- pipelineRef:
- name: ace-minimal-build-image-pipeline
- params:
- - name: dockerRegistry
- value: "aceDemoRegistry.azurecr.io"
diff --git a/tekton/minimal-image-build/aks/ace-minimal-image-pipeline-run-aks.yaml b/tekton/minimal-image-build/aks/ace-minimal-image-pipeline-run-aks.yaml
deleted file mode 100644
index dd67d62..0000000
--- a/tekton/minimal-image-build/aks/ace-minimal-image-pipeline-run-aks.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: PipelineRun
-metadata:
- name: ace-minimal-image-pipeline-run-1
-spec:
- serviceAccountName: ace-tekton-service-account
- pipelineRef:
- name: ace-minimal-image-pipeline
- params:
- - name: dockerRegistry
- value: "aceDemoRegistry.azurecr.io"
- - name: aceDownloadUrl
- value: "http://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/integration/12.0.4.0-ACE-LINUX64-DEVELOPER.tar.gz"
diff --git a/tekton/minimal-image-build/apply-yaml.sh b/tekton/minimal-image-build/apply-yaml.sh
new file mode 100755
index 0000000..25fae2e
--- /dev/null
+++ b/tekton/minimal-image-build/apply-yaml.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# Assumes the current shell has kubectl in PATH, is logged in, and has
+# the correct namespace set as default. The cluster is also assumed to
+# have Tekton installed and a service account created for the pipeline.
+
+# We might be run from the root of the repo or from the subdirectory
+export YAMLDIR=`dirname $0`
+
+set -e # Exit on error
+set -x # Show what we're doing
+kubectl apply -f ${YAMLDIR}/01-ace-minimal-image-build-and-push-task.yaml
+kubectl apply -f ${YAMLDIR}/02-ace-minimal-build-image-build-and-push-task.yaml
+kubectl apply -f ${YAMLDIR}/ace-minimal-build-image-pipeline.yaml
+kubectl apply -f ${YAMLDIR}/ace-minimal-image-pipeline.yaml
+
+set +x
+echo "Success; the pipeline can now be run after the *-run.yaml files are customized."
+echo "Use ${YAMLDIR}/ace-minimal-build-image-pipeline-run.yaml to build both images, or ${YAMLDIR}/ace-minimal-image-pipeline-run.yaml for only ace-minimal"
+echo
+echo "Example command sequence to run the pipeline and show the Tekton logs:"
+echo
+echo "kubectl apply -f ${YAMLDIR}/ace-minimal-build-image-pipeline-run.yaml ; tkn pr logs ace-minimal-build-image-pipeline-run-1 -f"
\ No newline at end of file
diff --git a/tekton/minimal-image-build/os/ace-minimal-build-image-pipeline-run-crc.yaml b/tekton/minimal-image-build/os/ace-minimal-build-image-pipeline-run-crc.yaml
deleted file mode 100644
index 14d19a5..0000000
--- a/tekton/minimal-image-build/os/ace-minimal-build-image-pipeline-run-crc.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: PipelineRun
-metadata:
- name: ace-minimal-build-image-pipeline-run-1
-spec:
- serviceAccountName: ace-tekton-service-account
- pipelineRef:
- name: ace-minimal-build-image-pipeline
- params:
- - name: dockerRegistry
- value: "image-registry.openshift-image-registry.svc:5000/default"
diff --git a/tekton/minimal-image-build/os/ace-minimal-build-image-pipeline-run.yaml b/tekton/minimal-image-build/os/ace-minimal-build-image-pipeline-run.yaml
deleted file mode 100644
index 4cd4ad8..0000000
--- a/tekton/minimal-image-build/os/ace-minimal-build-image-pipeline-run.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: PipelineRun
-metadata:
- name: ace-minimal-build-image-pipeline-run-1
-spec:
- serviceAccountName: ace-tekton-service-account
- pipelineRef:
- name: ace-minimal-build-image-pipeline
- params:
- - name: dockerRegistry
- value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default"
-# - name: buildImage
-# value: "ace-minimal-build:12.0.10.0-alpine-java11"
-# - name: runtimeImage
-# value: "ace-minimal:12.0.10.0-alpine-java11"
diff --git a/tekton/minimal-image-build/os/ace-minimal-image-pipeline-run-crc.yaml b/tekton/minimal-image-build/os/ace-minimal-image-pipeline-run-crc.yaml
deleted file mode 100644
index ac2eadf..0000000
--- a/tekton/minimal-image-build/os/ace-minimal-image-pipeline-run-crc.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: PipelineRun
-metadata:
- name: ace-minimal-image-pipeline-run-1
-spec:
- serviceAccountName: ace-tekton-service-account
- pipelineRef:
- name: ace-minimal-image-pipeline
- params:
- - name: dockerRegistry
- value: "image-registry.openshift-image-registry.svc:5000/default"
- - name: aceDownloadUrl
- value: "http://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/integration/12.0.4.0-ACE-LINUX64-DEVELOPER.tar.gz"
diff --git a/tekton/minimal-image-build/os/ace-minimal-image-pipeline-run.yaml b/tekton/minimal-image-build/os/ace-minimal-image-pipeline-run.yaml
deleted file mode 100644
index f5493b5..0000000
--- a/tekton/minimal-image-build/os/ace-minimal-image-pipeline-run.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: PipelineRun
-metadata:
- name: ace-minimal-image-pipeline-run-1
-spec:
- serviceAccountName: ace-tekton-service-account
- pipelineRef:
- name: ace-minimal-image-pipeline
- params:
- - name: dockerRegistry
- value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default"
- - name: aceDownloadUrl
- value: "https://iwm.dhe.ibm.com/sdfdl/v2/regs2/mbford/Xa.2/Xb.WJL1CuPI9omKj5inyv9-ir8PMDDUU8v2iYo-Oc5KPVI/Xc.12.0.8.0-ACE-LINUX64-DEVELOPER.tar.gz/Xd./Xf.lPr.D1vk/Xg.12260000/Xi.swg-wmbfd/XY.regsrvs/XZ._4mLAwxEK3xwdP7B6oAhifJgsNyp-ePc/12.0.8.0-ACE-LINUX64-DEVELOPER.tar.gz"
-# - name: runtimeImage
-# value: "ace-minimal:12.0.10.0-alpine-java11"
-# - name: aceDockerFile
-# value: "experimental/ace-minimal/Dockerfile.alpine-java11"
diff --git a/tekton/os/ace-pipeline-run-crc.yaml b/tekton/os/ace-pipeline-run-crc.yaml
deleted file mode 100644
index 3c880a7..0000000
--- a/tekton/os/ace-pipeline-run-crc.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: PipelineRun
-metadata:
- name: ace-pipeline-run-1
-spec:
- serviceAccountName: ace-tekton-service-account
- pipelineRef:
- name: ace-pipeline
- params:
- - name: dockerRegistry
- value: "image-registry.openshift-image-registry.svc:5000/default"
diff --git a/tekton/os/cp4i/12-ibmint-cp4i-build-task.yaml b/tekton/os/cp4i/12-ibmint-cp4i-build-task.yaml
new file mode 100644
index 0000000..7876db6
--- /dev/null
+++ b/tekton/os/cp4i/12-ibmint-cp4i-build-task.yaml
@@ -0,0 +1,290 @@
+apiVersion: tekton.dev/v1beta1
+kind: Task
+#
+# This task builds the main application, runs the unit tests, runs the component
+# tests, and builds two images: the first is the main application image, and the
+# second is the component test image (built using the first image as the base).
+#
+# The second image is not used in this task and is intended to be run in CP4i by
+# the next task. Component tests are run in this task as well but do not use the
+# CP4i-style configuration mechanism, so verification in CP4i is needed also.
+#
+metadata:
+ name: cp4i-build
+ namespace: cp4i
+spec:
+ # The security and environment settings are needed for OpenShift in a non-default
+ # namespace such as cp4i. Kaniko is expecting to be root in the container.
+ stepTemplate:
+ securityContext:
+ runAsUser: 0
+ env:
+ - name: "HOME"
+ value: "/tekton/home"
+ - name: "LICENSE"
+ value: "accept"
+ params:
+ - name: outputRegistry
+ type: string
+ - name: url
+ type: string
+ - name: revision
+ type: string
+ - name: buildImage
+ type: string
+ - name: runtimeBaseImage
+ type: string
+ results:
+ - name: tag
+ description: image tag of the form 20240220135127-6fe9106
+ - name: sha
+ description: container image hash for the application container
+ - name: ctsha
+ description: container image hash for the component test container
+ steps:
+ - name: clone
+ image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
+ #
+ # The script clones the repo and sets the TAG result
+ #
+ script: |
+ #!/bin/sh
+ set -e # Fail on error
+ cd /work
+ git clone -b $(params.revision) $(params.url)
+ cd ace-demo-pipeline
+ export DATE=$(date '+%Y%m%d%H%M%S')
+ export COMMIT=$(git log -1 --pretty=%h)
+ export TAG="$DATE"-"$COMMIT"
+ echo Setting container tag to "$TAG"
+ echo -n "$TAG" > $(results.tag.path)
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: ibmint-build
+ image: $(params.buildImage)
+ #image: cp.icr.io/cp/appc/ace:12.0.11.0-r1
+ #
+ # Runs the build and unit test phases, leaving the results in the work directory
+ # for later steps.
+ #
+ script: |
+ #!/bin/bash
+
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error - this must be done after the profile in case the container has the profile loaded already
+
+ cd /work/ace-demo-pipeline
+ mkdir /work/ibmint-output
+ mqsicreateworkdir /work/ibmint-output/ace-server
+ # Using --compile-maps-and-schemas for 12.0.11 and later . . .
+ ibmint deploy --input-path . --output-work-directory /work/ibmint-output/ace-server --project TeaSharedLibraryJava --project TeaSharedLibrary --project TeaRESTApplication --compile-maps-and-schemas
+ ibmint optimize server --work-dir /work/ibmint-output/ace-server
+
+ # Copy the contents of the work directory into a new unit-test-specific work directory
+ # This avoids the risk of unit tests files being deployed in the real containers, and
+ # is quicker than building the application again
+ mqsicreateworkdir /work/ut-work-dir
+ (cd /work/ibmint-output/ace-server && tar -cf - * ) | (cd /work/ut-work-dir && tar -xf - )
+ # Build just the unit tests
+ ibmint deploy --input-path . --output-work-directory /work/ut-work-dir --project TeaRESTApplication_UnitTest
+
+ # Run the unit tests
+ IntegrationServer -w /work/ut-work-dir --no-nodejs --start-msgflows false --test-project TeaRESTApplication_UnitTest
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: container-setup
+ image: $(params.buildImage)
+ #
+ # Copies files around and changes permissions to allow Kaniko/buildah to build the actual
+ # runtime image in the next step. Also copies the Dockerfile into place for Kaniko/buildah.
+ #
+ script: |
+ #!/bin/bash
+ cd /work/ibmint-output
+ cp /work/ace-demo-pipeline/tekton/os/cp4i/Dockerfile Dockerfile
+ # Fix permissions issues
+ chmod 777 /work/ibmint-output /work/ibmint-output/Dockerfile
+ chmod -R a+r /work/ibmint-output
+ find /work/ibmint-output -type d -print | xargs chmod 775
+ ls -l /work/ibmint-output
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: docker-build-and-push
+ #image: quay.io/buildah/stable:v1
+ image: registry.redhat.io/rhel8/buildah:8.9-5
+ securityContext:
+ runAsUser: 0
+ # Needed for hostPath volumes on OpenShift
+ privileged: true
+ capabilities:
+ add: ["CHOWN", "DAC_OVERRIDE","FOWNER","SETFCAP","SETGID","SETUID"]
+ # specifying DOCKER_CONFIG is required to allow buildah to detect docker credential
+ env:
+ - name: "DOCKER_CONFIG"
+ value: "/tekton/home/.docker/"
+ script: |
+ date
+ export TAG=`cat $(results.tag.path)`
+ echo Using $TAG as image tag
+ buildah --storage-driver=overlay bud --format=oci --tls-verify=false --no-cache \
+ --build-arg BASE_IMAGE=$(params.runtimeBaseImage) \
+ -f /work/ibmint-output/Dockerfile -t $(params.outputRegistry)/tea-tekton-cp4i:$TAG /work/ibmint-output
+ date
+ buildah --storage-driver=overlay push --tls-verify=false --digestfile /tmp/image-digest \
+ $(params.outputRegistry)/tea-tekton-cp4i:$TAG "docker://$(params.outputRegistry)/tea-tekton-cp4i:$TAG"
+ echo image digest:
+ cat /tmp/image-digest && echo
+ cat /tmp/image-digest >> $(results.sha.path)
+ date
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: varlibcontainers
+ mountPath: /var/lib/containers
+ - name: component-test
+ image: $(params.buildImage)
+ #image: cp.icr.io/cp/appc/ace:12.0.11.0-r1
+ #
+ # Builds and runs the component tests using the JDBC credentials provided from
+ # the secret. Also uses the same init-creds.sh script used by the non-CP4i image
+ # to load credentials at startup.
+ #
+ # Leaves the resulting component test project in the work directory to be picked
+ # up by the second Kaniko build in the next step.
+ #
+ script: |
+ #!/bin/bash
+
+ export LICENSE=accept
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error
+
+ export PATH=/opt/ibm/ace-12/common/jdk/bin:$PATH
+ # Slightly hacky, but quicker than building everything again!
+ (cd /work/ibmint-output/ace-server && tar -cf - * ) | (cd /home/aceuser/ace-server && tar -xf - )
+ ls -l /home/aceuser/ace-server
+ # Set up credentials for the component tests; init-creds.sh looks in /tmp for policy
+ cp /work/ace-demo-pipeline/demo-infrastructure/TEAJDBC.policyxml /tmp/
+ bash /work/ace-demo-pipeline/demo-infrastructure/init-creds.sh
+ # Build and run the tests
+ cd /work/ace-demo-pipeline
+
+ # Build just the component tests
+ ibmint deploy --input-path . --output-work-directory /home/aceuser/ace-server --project TeaRESTApplication_ComponentTest
+
+ # Run the component tests
+ IntegrationServer -w /home/aceuser/ace-server --no-nodejs --start-msgflows false --test-project TeaRESTApplication_ComponentTest
+
+ # Quicker than building everything again - we just copy the component test work directory
+ # into the correct location for the container build in the next step
+ (cd /home/aceuser/ace-server && tar -cf - * ) | (cd /work/ibmint-output/ace-server && tar -xf - )
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: secret-volume-2
+ mountPath: /var/run/secrets/jdbc
+ - name: container-setup-ct
+ image: $(params.buildImage)
+ #
+ # Copies files around and changes permissions to allow Kaniko to build the component
+ # test image in the next step. Also copies the Dockerfile into place for Kaniko.
+ #
+ script: |
+ #!/bin/bash
+ cd /work/ibmint-output
+ cp /work/ace-demo-pipeline/tekton/os/cp4i/Dockerfile Dockerfile
+ # Fix permissions issues
+ chmod 777 /work/ibmint-output /work/ibmint-output/Dockerfile
+ chmod -R a+r /work/ibmint-output
+ find /work/ibmint-output -type d -print | xargs chmod 775
+ ls -l /work/ibmint-output/ace-server/run
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: docker-build-and-push-ct
+ #image: quay.io/buildah/stable:v1
+ image: registry.redhat.io/rhel8/buildah:8.9-5
+ securityContext:
+ runAsUser: 0
+ # Needed for hostPath volumes on OpenShift
+ privileged: true
+ capabilities:
+ add: ["CHOWN", "DAC_OVERRIDE","FOWNER","SETFCAP","SETGID","SETUID"]
+ # specifying DOCKER_CONFIG is required to allow buildah to detect docker credential
+ env:
+ - name: "DOCKER_CONFIG"
+ value: "/tekton/home/.docker/"
+ script: |
+ date
+ export TAG=`cat $(results.tag.path)`
+ echo Using $TAG as image tag
+ buildah --storage-driver=overlay bud --format=oci --tls-verify=false --no-cache \
+ --build-arg BASE_IMAGE=$(params.outputRegistry)/tea-tekton-cp4i:$TAG \
+ -f /work/ibmint-output/Dockerfile -t $(params.outputRegistry)/tea-tekton-cp4i-ct:$TAG /work/ibmint-output
+ date
+ buildah --storage-driver=overlay push --tls-verify=false --digestfile /tmp/image-digest \
+ $(params.outputRegistry)/tea-tekton-cp4i-ct:$TAG "docker://$(params.outputRegistry)/tea-tekton-cp4i-ct:$TAG"
+ echo image digest:
+ cat /tmp/image-digest && echo
+ cat /tmp/image-digest >> $(results.ctsha.path)
+ date
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: varlibcontainers
+ mountPath: /var/lib/containers
+ volumes:
+ - name: work
+ emptyDir: {}
+ - name: secret-volume-2
+ secret:
+ secretName: jdbc-secret
+ #
+ # Default buildah approach using emptyDir; takes about 2 minutes on a test SNO cluster
+ #
+ - name: varlibcontainers
+ emptyDir: {}
+ #
+ # Local directory for this pipeline; takes a few seconds after initial pull
+ #
+ #- name: varlibcontainers
+ # hostPath:
+ # path: '/var/hostPath/buildah-cache'
+ # type: Directory
+ #
+ # Sharing the host containers; takes a few seconds
+ #
+ #- name: varlibcontainers
+ # hostPath:
+ # path: '/var/lib/containers'
+ # type: Directory
+ #
+ # Local disk using LVM operator on SNO; same speed as hostPath
+ #
+ #- name: varlibcontainers
+ # persistentVolumeClaim:
+ # claimName: buildah-cache
+ #
+ # NFS mount from same subnet; initial pull took 35 minutes, and
+ # subsequent builds took around 9 minutes.
+ #
+ # May also see messages like
+ #
+ # time="2024-02-15T00:55:11Z" level=error msg="'overlay' is not supported over nfs at \"/var/lib/containers/storage/overlay\""
+ #
+ # or possibly failing with
+ #
+ # time="2024-02-15T20:05:58Z" level=warning msg="Network file system detected as backing store. Enforcing overlay option `force_mask=\"700\"`. Add it to storage.conf to silence this warning"
+ # Error: mount /var/lib/containers/storage/overlay:/var/lib/containers/storage/overlay, flags: 0x1000: permission denied
+ # time="2024-02-15T20:05:58Z" level=warning msg="Network file system detected as backing store. Enforcing overlay option `force_mask=\"700\"`. Add it to storage.conf to silence this warning"
+ # time="2024-02-15T20:05:58Z" level=warning msg="failed to shutdown storage: \"mount /var/lib/containers/storage/overlay:/var/lib/containers/storage/overlay, flags: 0x1000: permission denied\""
+ #
+ # if not running privileged
+ #- name: varlibcontainers
+ # persistentVolumeClaim:
+ # claimName: buildah-cache-nfs
diff --git a/tekton/os/cp4i/12-maven-cp4i-build-task.yaml b/tekton/os/cp4i/12-maven-cp4i-build-task.yaml
index 4c7cb96..6775a94 100644
--- a/tekton/os/cp4i/12-maven-cp4i-build-task.yaml
+++ b/tekton/os/cp4i/12-maven-cp4i-build-task.yaml
@@ -10,7 +10,7 @@ kind: Task
# CP4i-style configuration mechanism, so verification in CP4i is needed also.
#
metadata:
- name: maven-cp4i-build
+ name: cp4i-build
namespace: cp4i
spec:
# The security and environment settings are needed for OpenShift in a non-default
@@ -22,7 +22,7 @@ spec:
- name: "HOME"
value: "/tekton/home"
params:
- - name: dockerRegistry
+ - name: outputRegistry
type: string
- name: url
type: string
@@ -30,23 +30,37 @@ spec:
type: string
- name: buildImage
type: string
- - name: runtimeImage
+ - name: runtimeBaseImage
type: string
+ results:
+ - name: tag
+ description: image tag of the form 20240220135127-6fe9106
+ - name: sha
+ description: container image hash for the application container
+ - name: ctsha
+ description: container image hash for the component test container
steps:
- name: clone
image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
#
- # The script just clones the repo but could be extended.
+ # The script clones the repo and sets the TAG result
#
script: |
#!/bin/sh
+ set -e # Fail on error
cd /work
git clone -b $(params.revision) $(params.url)
+ cd ace-demo-pipeline
+ export DATE=$(date '+%Y%m%d%H%M%S')
+ export COMMIT=$(git log -1 --pretty=%h)
+ export TAG="$DATE"-"$COMMIT"
+ echo Setting container tag to "$TAG"
+ echo -n "$TAG" > $(results.tag.path)
volumeMounts:
- mountPath: /work
name: work
- name: maven-build
- image: $(params.dockerRegistry)/$(params.buildImage)
+ image: $(params.buildImage)
#
# Runs the build and unit test phases, leaving the results in the work directory
# for later steps.
@@ -65,7 +79,7 @@ spec:
- mountPath: /work
name: work
- name: container-setup
- image: $(params.dockerRegistry)/$(params.buildImage)
+ image: $(params.buildImage)
#
# Copies files around and changes permissions to allow Kaniko to build the actual
# runtime image in the next step. Also copies the Dockerfile into place for Kaniko.
@@ -83,35 +97,39 @@ spec:
- mountPath: /work
name: work
- name: docker-build-and-push
- # kaniko is expecting to run as root to build images
+ #image: quay.io/buildah/stable:v1
+ image: registry.redhat.io/rhel8/buildah:8.9-5
securityContext:
runAsUser: 0
+ # Needed for hostPath volumes on OpenShift
+ #privileged: true
capabilities:
add: ["CHOWN", "DAC_OVERRIDE","FOWNER","SETFCAP","SETGID","SETUID"]
- image: gcr.io/kaniko-project/executor:latest
- # specifying DOCKER_CONFIG is required to allow kaniko to detect docker credential
+ # specifying DOCKER_CONFIG is required to allow buildah to detect docker credential
env:
- name: "DOCKER_CONFIG"
value: "/tekton/home/.docker/"
- command:
- - /kaniko/executor
- args:
- - --dockerfile=/work/maven-output/Dockerfile
- - --destination=$(params.dockerRegistry)/tea-tekton-cp4i
- - --context=/work/maven-output
- - --build-arg=BASE_IMAGE=$(params.dockerRegistry)/$(params.runtimeImage)
- - --skip-tls-verify
- - --snapshot-mode=redo
- - --use-new-run
- #- --cache-dir=/cache
- #- --cache=true
+ script: |
+ date
+ export TAG=`cat $(results.tag.path)`
+ echo Using $TAG as image tag
+ buildah --storage-driver=overlay bud --format=oci --tls-verify=false --no-cache \
+ --build-arg BASE_IMAGE=$(params.runtimeBaseImage) \
+ -f /work/maven-output/Dockerfile -t $(params.outputRegistry)/tea-tekton-cp4i:$TAG /work/maven-output
+ date
+ buildah --storage-driver=overlay push --tls-verify=false --digestfile /tmp/image-digest \
+ $(params.outputRegistry)/tea-tekton-cp4i:$TAG "docker://$(params.outputRegistry)/tea-tekton-cp4i:$TAG"
+ echo image digest:
+ cat /tmp/image-digest && echo
+ cat /tmp/image-digest >> $(results.sha.path)
+ date
volumeMounts:
- mountPath: /work
name: work
- #- name: kaniko-cache
- # mountPath: /cache
+ - name: varlibcontainers
+ mountPath: /var/lib/containers
- name: component-test
- image: $(params.dockerRegistry)/$(params.buildImage)
+ image: $(params.buildImage)
#
# Builds and runs the component tests using the JDBC credentials provided from
# the secret. Also uses the same init-creds.sh script used by the non-CP4i image
@@ -132,6 +150,9 @@ spec:
cp /work/ace-demo-pipeline/demo-infrastructure/TEAJDBC.policyxml /tmp/
bash /work/ace-demo-pipeline/demo-infrastructure/init-creds.sh
# Build and run the tests
+
+ set -e # Fail on error
+
cd /work/ace-demo-pipeline/TeaRESTApplication_ComponentTest
mvn --no-transfer-progress -Dct.work.directory=/home/aceuser/ace-server verify
# Slightly hacky, but quicker than building everything again!
@@ -142,7 +163,7 @@ spec:
- name: secret-volume-2
mountPath: /var/run/secrets/jdbc
- name: container-setup-ct
- image: $(params.dockerRegistry)/$(params.buildImage)
+ image: $(params.buildImage)
#
# Copies files around and changes permissions to allow Kaniko to build the component
# test image in the next step. Also copies the Dockerfile into place for Kaniko.
@@ -160,39 +181,76 @@ spec:
- mountPath: /work
name: work
- name: docker-build-and-push-ct
- # kaniko is expecting to run as root to build images
+ #image: quay.io/buildah/stable:v1
+ image: registry.redhat.io/rhel8/buildah:8.9-5
securityContext:
runAsUser: 0
+ # Needed for hostPath volumes on OpenShift
+ #privileged: true
capabilities:
add: ["CHOWN", "DAC_OVERRIDE","FOWNER","SETFCAP","SETGID","SETUID"]
- image: gcr.io/kaniko-project/executor:latest
- # specifying DOCKER_CONFIG is required to allow kaniko to detect docker credential
+ # specifying DOCKER_CONFIG is required to allow buildah to detect docker credential
env:
- name: "DOCKER_CONFIG"
value: "/tekton/home/.docker/"
- command:
- - /kaniko/executor
- args:
- - --dockerfile=/work/maven-output/Dockerfile
- - --destination=$(params.dockerRegistry)/tea-tekton-cp4i-ct
- - --context=/work/maven-output
- - --build-arg=BASE_IMAGE=$(params.dockerRegistry)/tea-tekton-cp4i
- - --skip-tls-verify
- - --snapshot-mode=redo
- - --use-new-run
- #- --cache-dir=/cache
- #- --cache=true
+ script: |
+ date
+ export TAG=`cat $(results.tag.path)`
+ echo Using $TAG as image tag
+ buildah --storage-driver=overlay bud --format=oci --tls-verify=false --no-cache \
+ --build-arg BASE_IMAGE=$(params.outputRegistry)/tea-tekton-cp4i:$TAG \
+ -f /work/maven-output/Dockerfile -t $(params.outputRegistry)/tea-tekton-cp4i-ct:$TAG /work/maven-output
+ date
+ buildah --storage-driver=overlay push --tls-verify=false --digestfile /tmp/image-digest \
+ $(params.outputRegistry)/tea-tekton-cp4i-ct:$TAG "docker://$(params.outputRegistry)/tea-tekton-cp4i-ct:$TAG"
+ echo image digest:
+ cat /tmp/image-digest && echo
+ cat /tmp/image-digest >> $(results.ctsha.path)
+ date
volumeMounts:
- mountPath: /work
name: work
- #- name: kaniko-cache
- # mountPath: /cache
+ - name: varlibcontainers
+ mountPath: /var/lib/containers
volumes:
- name: work
emptyDir: {}
- name: secret-volume-2
secret:
secretName: jdbc-secret
- #- name: kaniko-cache
+ #
+ # Default buildah approach using emptyDir; takes about 2 minutes on a test SNO cluster
+ #
+ - name: varlibcontainers
+ emptyDir: {}
+ #
+ # Local directory for this pipeline; takes a few seconds after initial pull
+ #
+ #- name: varlibcontainers
+ # hostPath:
+ # path: '/var/hostPath/buildah-cache'
+ # type: Directory
+ #
+ # Sharing the host containers; takes a few seconds
+ #
+ #- name: varlibcontainers
+ # hostPath:
+ # path: '/var/lib/containers'
+ # type: Directory
+ #
+ # Local disk using LVM operator on SNO; same speed as hostPath
+ #
+ #- name: varlibcontainers
+ # persistentVolumeClaim:
+ # claimName: buildah-cache
+ #
+ # NFS mount from same subnet; initial pull took 35 minutes, and
+ # subsequent builds took around 9 minutes.
+ #
+ # May also see messages like
+ #
+ # time="2024-02-15T00:55:11Z" level=error msg="'overlay' is not supported over nfs at \"/var/lib/containers/storage/overlay\""
+ #
+ #- name: varlibcontainers
# persistentVolumeClaim:
- # claimName: kaniko-cache-pvc
+ # claimName: buildah-cache-nfs
diff --git a/tekton/os/cp4i/13-component-test-in-cp4i-task.yaml b/tekton/os/cp4i/13-component-test-in-cp4i-task.yaml
index 498c852..219ef32 100644
--- a/tekton/os/cp4i/13-component-test-in-cp4i-task.yaml
+++ b/tekton/os/cp4i/13-component-test-in-cp4i-task.yaml
@@ -25,6 +25,10 @@ spec:
type: string
- name: revision
type: string
+ - name: tag
+ type: string
+ - name: ctsha
+ type: string
steps:
- name: clone
image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
@@ -39,6 +43,10 @@ spec:
export REG_WITH_ESCAPED_SLASH=`echo $(params.dockerRegistry) | sed 's/\//\\\\\\//g'`
echo Using $REG_WITH_ESCAPED_SLASH as registry name
sed -i "s/DOCKER_REGISTRY/$REG_WITH_ESCAPED_SLASH/g" /work/ace-demo-pipeline/tekton/os/cp4i/*.yaml
+ export SHA=$(params.ctsha)
+ export TAG=$(params.tag)
+ echo Using $TAG@$SHA as image tag
+ sed -i "s/IMAGE_TAG/$TAG@$SHA/g" /work/ace-demo-pipeline/tekton/os/cp4i/*.yaml
volumeMounts:
- mountPath: /work
name: work
@@ -89,11 +97,14 @@ spec:
#!/bin/sh
#set -x
cd /work
+ echo "CR YAML for component test:"
+ cat /work/ace-demo-pipeline/tekton/os/cp4i/create-integrationruntime-ct.yaml
+ echo
echo "Creating CR for component test at " `date`
kubectl apply -f /work/ace-demo-pipeline/tekton/os/cp4i/create-integrationruntime-ct.yaml
rc=1
echo "Starting polling for operator IR at " `date`
- for i in `seq 1 24`
+ for i in `seq 1 60`
do
#echo $i
#kubectl get -n cp4i IntegrationRuntime/tea-tekton-cp4i-ct
@@ -142,6 +153,7 @@ spec:
if [ "$lastValue" == "IntegrationServer" ]; then
echo "Server still running at " `date`
else
+ echo "Server stopped at " `date`
rc=0
break
fi
diff --git a/tekton/os/cp4i/14-force-image-pull-cp4i-task.yaml b/tekton/os/cp4i/14-force-image-pull-cp4i-task.yaml
deleted file mode 100644
index 382d4cf..0000000
--- a/tekton/os/cp4i/14-force-image-pull-cp4i-task.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: Task
-metadata:
- name: force-image-pull-cp4i
- namespace: cp4i
-spec:
- params:
- - name: dockerRegistry
- type: string
- steps:
- - name: clone
- image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
- script: |
- #!/bin/sh
- #set -x
- cd /work
- git clone "https://github.com/ot4i/ace-demo-pipeline"
- export REG_WITH_ESCAPED_SLASH=`echo $(params.dockerRegistry) | sed 's/\//\\\\\\//g'`
- echo Using $REG_WITH_ESCAPED_SLASH as registry name
- sed -i "s/DOCKER_REGISTRY/$REG_WITH_ESCAPED_SLASH/g" /work/ace-demo-pipeline/tekton/os/cp4i/*.yaml
- volumeMounts:
- - mountPath: /work
- name: work
- - name: pull-images
- image: lachlanevenson/k8s-kubectl
- script: |
- #!/bin/sh
- #set -x
- cd /work
- kubectl get pods -n cp4i
-
- echo "Checking for previous componenimage pull pods at " `date`
- kubectl get pod -n cp4i force-pull-cp4i
- if [ "$?" == "0" ]; then
- echo "Found pod; deleting it at " `date`
- kubectl delete pod -n cp4i force-pull-cp4i
- sleep 5
- else
- echo "No pod found at " `date`
- fi
- echo "Creating pod to force the pull of images at " `date`
- kubectl apply -f /work/ace-demo-pipeline/tekton/os/cp4i/force-pull-cp4i.yaml
- rc=1
- echo "Starting polling for pod to finish at " `date`
- for i in `seq 1 24`
- do
- echo $i
- kubectl get pod -n cp4i force-pull-cp4i
- lastError=`kubectl get pod -n cp4i force-pull-cp4i --template={{.status.phase}} 2>&1`
- if [ "$lastError" == "Succeeded" ]; then
- echo "Pod image pull complete at " `date`
- kubectl delete pod -n cp4i force-pull-cp4i
- rc=0
- break
- fi
- sleep 5
- done
- if [ "$rc" == "1" ]; then
- echo "Image pull failed; giving up at " `date`
- echo "Current state of the pod:"
- echo "----------------------------------------"
- kubectl get pod -n cp4i force-pull-cp4i
- echo "----------------------------------------"
- return 1
- fi
- volumeMounts:
- - mountPath: /work
- name: work
- volumes:
- - name: work
- emptyDir: {}
diff --git a/tekton/os/cp4i/22-deploy-to-cp4i-task.yaml b/tekton/os/cp4i/22-deploy-to-cp4i-task.yaml
index ec086c4..4d29b20 100644
--- a/tekton/os/cp4i/22-deploy-to-cp4i-task.yaml
+++ b/tekton/os/cp4i/22-deploy-to-cp4i-task.yaml
@@ -7,6 +7,14 @@ spec:
params:
- name: dockerRegistry
type: string
+ - name: url
+ type: string
+ - name: revision
+ type: string
+ - name: tag
+ type: string
+ - name: sha
+ type: string
steps:
- name: clone
image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
@@ -14,10 +22,14 @@ spec:
#!/bin/sh
set -x
cd /work
- git clone "https://github.com/ot4i/ace-demo-pipeline"
+ git clone -b $(params.revision) $(params.url)
export REG_WITH_ESCAPED_SLASH=`echo $(params.dockerRegistry) | sed 's/\//\\\\\\//g'`
echo $REG_WITH_ESCAPED_SLASH
sed -i "s/DOCKER_REGISTRY/$REG_WITH_ESCAPED_SLASH/g" /work/ace-demo-pipeline/tekton/os/cp4i/*.yaml
+ export SHA=$(params.sha)
+ export TAG=$(params.tag)
+ echo Using $TAG@$SHA as image tag
+ sed -i "s/IMAGE_TAG/$TAG@$SHA/g" /work/ace-demo-pipeline/tekton/os/cp4i/*.yaml
volumeMounts:
- mountPath: /work
name: work
@@ -29,6 +41,9 @@ spec:
cd /work
kubectl get pods -n cp4i
+ echo "CR YAML for application:"
+ cat /work/ace-demo-pipeline/tekton/os/cp4i/create-integrationruntime.yaml
+ echo
echo "Creating CR for application at " `date`
kubectl apply -f /work/ace-demo-pipeline/tekton/os/cp4i/create-integrationruntime.yaml
rc=1
diff --git a/tekton/os/cp4i/40-kaniko-cache-warmer-cp4i-task.yaml b/tekton/os/cp4i/40-kaniko-cache-warmer-cp4i-task.yaml
deleted file mode 100644
index ad1af92..0000000
--- a/tekton/os/cp4i/40-kaniko-cache-warmer-cp4i-task.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: Task
-metadata:
- name: kaniko-cache-warmer-cp4i
- namespace: cp4i
-spec:
- # The security and environment settings are needed for OpenShift in a non-default
- # namespace such as cp4i. Kaniko is expecting to be root in the container.
- stepTemplate:
- securityContext:
- runAsUser: 0
- env:
- - name: "HOME"
- value: "/tekton/home"
- steps:
- - name: kaniko-warmer
- # kaniko is expecting to run as root to build images
- securityContext:
- runAsUser: 0
- capabilities:
- add: ["CHOWN", "DAC_OVERRIDE","FOWNER","SETFCAP","SETGID","SETUID"]
- image: gcr.io/kaniko-project/warmer:latest
- # specifying DOCKER_CONFIG is required to allow kaniko to detect docker credential
- env:
- - name: "DOCKER_CONFIG"
- value: "/tekton/home/.docker/"
- args:
- - --image=cp.icr.io/cp/appc/ace-server-prod:12.0.10.0-r1-20230421-154140
- - --cache-dir=/cache
- volumeMounts:
- - name: kaniko-cache
- mountPath: /cache
- volumes:
- - name: kaniko-cache
- persistentVolumeClaim:
- claimName: kaniko-cache-pvc
diff --git a/tekton/os/cp4i/README.md b/tekton/os/cp4i/README.md
index 50ced1d..be83996 100644
--- a/tekton/os/cp4i/README.md
+++ b/tekton/os/cp4i/README.md
@@ -4,12 +4,12 @@ This pipeline is similar to the main project pipeline, but is designed to work w
and uses the App Connect Enterprise certified containers for runtime. It also runs component tests in a CP4i container to
allow JDBC connections to be tested using the same CP4i configurations used by the deployed application itself.
-![Pipeline overview](images/cp4i-pipeline.png)
+![Pipeline overview](/demo-infrastructure/images/tekton-cp4i-pipeline.png)
## Container builds
The pipeline creates the main application image first, and then builds the component test image on top of the first image.
-Kaniko is used to build both images in the pipeline, with Maven building the applications and libraries.
+Kaniko is used to build both images in the pipeline, with ibmint or Maven building the applications and libraries.
![Container images](images/cp4i-container-images.png)
@@ -29,11 +29,15 @@ The test run strategy is as follows:
- Collect the output and return code from kubectl exec as usual, allowing the pipeline to stop on failed tests.
- Delete the CR, and then send another kill -INT 1 to make the runaceserver code exit.
-See [13-component-test-in-cp4i-task.yaml](13-component-test-in-cp4i-task.yaml) for details on running the tests.
+See [13-component-test-in-cp4i-task.yaml](13-component-test-in-cp4i-task.yaml) for details on running the tests. Despite
+MQSI_PREVENT_CONTAINER_SHUTDOWN being set, there are still liveness probes running in the background, and these check for
+port 7600 to be active. In most cases, the component test server will start quickly enough to be listening on port 7600
+before the container is killed, but it is possible that very slow server startup (on an overloaded node, for example)
+could miss the required window. Setting `livenessProbe.failureThreshold` (see [Integration Runtime Reference](https://www.ibm.com/docs/en/app-connect/containers_cd?topic=resources-integration-runtime-reference)) to a large value should eliminate this issue.
-Note that this splits responsibilities between the ACE operator (create the work directory and run the initial server) and the
-ACE product itself (run the tests and report the results); the operator support code in the container does not know anything
-about running tests.
+Note that this approach splits responsibilities between the ACE operator (create the work directory and run the initial server)
+and the ACE product itself (run the tests and report the results); the operator support code in the container does not know
+anything about running tests.
- Anything that would also affect production (such as issues with CP4i configuration formats and other related matters) would fall under CP4i support.
- Issues with ACE application code, JUnit options, etc, would fall under ACE product support.
- As the tests are using the operator, the [ot4i/ace-docker](https://github.com/ot4i/ace-docker) repo is not involved, so issues should be
@@ -42,7 +46,7 @@ about running tests.
## Pipeline setup and run
Many of the steps are the same as the main repo, but use the `cp4i` namespace. Security constraints are more of an issue
-in OpenShift, and Kaniko seems to require quite a lot of extra permissions when not running in the default namespace.
+in OpenShift, and buildah/Kaniko seems to require quite a lot of extra permissions when not running in the default namespace.
The pipeline assumes the CP4i ACE integration server image has been copied to the local image registry to make the
container builds go faster; the image must match the locations in the YAML files. See
@@ -72,7 +76,7 @@ Configurations need to be created for the JDBC credentials (teajdbc-policy and t
in a server.conf.yaml configuration (default-policy). See [configurations/README.md](configurations/README.md) for details.
The JDBC credentials also need to be placed in a Kubernetes secret called `jdbc-secret` so that the the non-CP4i
-component test can access them during the pipeline run. This step (`component-test` in [maven-cp4i-build](12-maven-cp4i-build-task.yaml))
+component test can access them during the pipeline run. This step (`component-test` in [ibmint-cp4i-build](12-ibmint-cp4i-build-task.yaml))
proves that the code itself is working and connections are possible to the specified DB2 instance, while the later
[CP4i-based component test](13-component-test-in-cp4i-task.yaml) demonstrates that the configurations are also valid
and that the ACE server in the certified container can connect to DB2.
@@ -84,7 +88,7 @@ kubectl create secret -n cp4i docker-registry regcred --docker-server=image-regi
kubectl apply -f tekton/os/cp4i/cp4i-scc.yaml
kubectl apply -f tekton/os/cp4i/service-account-cp4i.yaml
oc adm policy add-scc-to-user cp4i-scc -n cp4i -z cp4i-tekton-service-account
-kubectl apply -f tekton/os/cp4i/12-maven-cp4i-build-task.yaml
+kubectl apply -f tekton/os/cp4i/12-ibmint-cp4i-build-task.yaml
kubectl apply -f tekton/os/cp4i/13-component-test-in-cp4i-task.yaml
kubectl apply -f tekton/os/cp4i/22-deploy-to-cp4i-task.yaml
kubectl apply -f tekton/os/cp4i/cp4i-pipeline.yaml
diff --git a/tekton/os/cp4i/cp4i-force-image-pull-taskrun.yaml b/tekton/os/cp4i/cp4i-force-image-pull-taskrun.yaml
deleted file mode 100644
index 211995e..0000000
--- a/tekton/os/cp4i/cp4i-force-image-pull-taskrun.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: TaskRun
-metadata:
- name: cp4i-force-image-pull-taskrun-1
- namespace: cp4i
-spec:
- serviceAccountName: cp4i-tekton-service-account
- taskRef:
- name: force-image-pull-cp4i
- params:
- - name: dockerRegistry
- value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default"
diff --git a/tekton/os/cp4i/cp4i-kaniko-cache-warmer-taskrun.yaml b/tekton/os/cp4i/cp4i-kaniko-cache-warmer-taskrun.yaml
deleted file mode 100644
index 28da17a..0000000
--- a/tekton/os/cp4i/cp4i-kaniko-cache-warmer-taskrun.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: TaskRun
-metadata:
- name: cp4i-kaniko-cache-warmer-taskrun-1
- namespace: cp4i
-spec:
- serviceAccountName: cp4i-tekton-service-account
- taskRef:
- name: kaniko-cache-warmer-cp4i
diff --git a/tekton/os/cp4i/cp4i-pipeline-ct-only.yaml b/tekton/os/cp4i/cp4i-pipeline-ct-only.yaml
deleted file mode 100644
index 44e91b3..0000000
--- a/tekton/os/cp4i/cp4i-pipeline-ct-only.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: Pipeline
-metadata:
- name: cp4i-pipeline
- namespace: cp4i
-spec:
- params:
- - name: dockerRegistry
- type: string
- tasks:
- - name: component-test-in-cp4i
- taskRef:
- name: component-test-in-cp4i
- params:
- - name: dockerRegistry
- value: $(params.dockerRegistry)
diff --git a/tekton/os/cp4i/cp4i-pipeline-deploy-only.yaml b/tekton/os/cp4i/cp4i-pipeline-deploy-only.yaml
deleted file mode 100644
index ea13313..0000000
--- a/tekton/os/cp4i/cp4i-pipeline-deploy-only.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: Pipeline
-metadata:
- name: cp4i-pipeline
- namespace: cp4i
-spec:
- params:
- - name: dockerRegistry
- type: string
- tasks:
- - name: deploy-to-cp4i
- taskRef:
- name: deploy-to-cp4i
- params:
- - name: dockerRegistry
- value: $(params.dockerRegistry)
diff --git a/tekton/os/cp4i/cp4i-pipeline-run.yaml b/tekton/os/cp4i/cp4i-pipeline-run.yaml
index b4b0252..74f6089 100644
--- a/tekton/os/cp4i/cp4i-pipeline-run.yaml
+++ b/tekton/os/cp4i/cp4i-pipeline-run.yaml
@@ -10,3 +10,12 @@ spec:
params:
- name: dockerRegistry
value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default"
+ - name: url
+ value: "https://github.com/ot4i/ace-demo-pipeline"
+ - name: revision
+ value: "main"
+ - name: buildImage
+ #value: "cp.icr.io/cp/appc/ace:12.0.11.0-r1"
+ value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default/ace-minimal-build:12.0.11.0-alpine"
+ - name: runtimeBaseImage
+ value: "image-registry.openshift-image-registry.svc.cluster.local:5000/default/ace-server-prod:12.0.11.1-r1-20240125-170703"
diff --git a/tekton/os/cp4i/cp4i-pipeline.yaml b/tekton/os/cp4i/cp4i-pipeline.yaml
index 4a2898b..3c2d81e 100644
--- a/tekton/os/cp4i/cp4i-pipeline.yaml
+++ b/tekton/os/cp4i/cp4i-pipeline.yaml
@@ -7,6 +7,7 @@ spec:
params:
- name: dockerRegistry
type: string
+ default: "image-registry.openshift-image-registry.svc.cluster.local:5000/default"
- name: url
type: string
default: "https://github.com/ot4i/ace-demo-pipeline"
@@ -15,16 +16,18 @@ spec:
default: "main"
- name: buildImage
type: string
- default: "ace-minimal-build:12.0.10.0-alpine"
- - name: runtimeImage
+ default: "cp.icr.io/cp/appc/ace:12.0.11.0-r1"
+ #default: "image-registry.openshift-image-registry.svc.cluster.local:5000/default/ace-minimal-build:12.0.10.0-alpine"
+ - name: runtimeBaseImage
type: string
- default: "ace-server-prod:12.0.10.0-r1"
+ default: "cp.icr.io/cp/appc/ace-server-prod:12.0.11.1-r1-20240125-170703"
+ #default: "image-registry.openshift-image-registry.svc.cluster.local:5000/default/ace-server-prod:12.0.11.1-r1-20240125-170703"
tasks:
- name: build-from-source
taskRef:
- name: maven-cp4i-build
+ name: cp4i-build
params:
- - name: dockerRegistry
+ - name: outputRegistry
value: $(params.dockerRegistry)
- name: url
value: $(params.url)
@@ -32,8 +35,8 @@ spec:
value: $(params.revision)
- name: buildImage
value: $(params.buildImage)
- - name: runtimeImage
- value: $(params.runtimeImage)
+ - name: runtimeBaseImage
+ value: $(params.runtimeBaseImage)
- name: component-test-in-cp4i
taskRef:
name: component-test-in-cp4i
@@ -44,6 +47,10 @@ spec:
value: $(params.url)
- name: revision
value: $(params.revision)
+ - name: tag
+ value: "$(tasks.build-from-source.results.tag)"
+ - name: ctsha
+ value: "$(tasks.build-from-source.results.ctsha)"
runAfter:
- build-from-source
- name: deploy-to-cp4i
@@ -56,5 +63,9 @@ spec:
value: $(params.url)
- name: revision
value: $(params.revision)
+ - name: tag
+ value: "$(tasks.build-from-source.results.tag)"
+ - name: sha
+ value: "$(tasks.build-from-source.results.sha)"
runAfter:
- component-test-in-cp4i
diff --git a/tekton/os/cp4i/create-integrationruntime-ct.yaml b/tekton/os/cp4i/create-integrationruntime-ct.yaml
index 3eba635..b9a7880 100644
--- a/tekton/os/cp4i/create-integrationruntime-ct.yaml
+++ b/tekton/os/cp4i/create-integrationruntime-ct.yaml
@@ -10,7 +10,7 @@ spec:
toolkitFlow: true
license:
accept: true
- license: L-UTKS-P46KK2
+ license: L-DMRW-D3HQHQ
use: AppConnectEnterpriseNonProductionFREE
configurations:
- teajdbc-policy
@@ -20,7 +20,7 @@ spec:
spec:
containers:
- image: >-
- DOCKER_REGISTRY/tea-tekton-cp4i-ct:latest
+ DOCKER_REGISTRY/tea-tekton-cp4i-ct:IMAGE_TAG
env:
- name: MQSI_PREVENT_CONTAINER_SHUTDOWN
value: 'true'
@@ -38,4 +38,4 @@ spec:
memory: 1024Mi
imagePullSecrets:
- name: regcred
- version: 12.0.10.0-r1
\ No newline at end of file
+ version: 12.0.11.1-r1
\ No newline at end of file
diff --git a/tekton/os/cp4i/create-integrationruntime.yaml b/tekton/os/cp4i/create-integrationruntime.yaml
index 6c32b04..e5d1d08 100644
--- a/tekton/os/cp4i/create-integrationruntime.yaml
+++ b/tekton/os/cp4i/create-integrationruntime.yaml
@@ -10,13 +10,13 @@ spec:
toolkitFlow: true
license:
accept: true
- license: L-UTKS-P46KK2
+ license: L-DMRW-D3HQHQ
use: AppConnectEnterpriseNonProductionFREE
template:
spec:
containers:
- image: >-
- DOCKER_REGISTRY/tea-tekton-cp4i:latest
+ DOCKER_REGISTRY/tea-tekton-cp4i:IMAGE_TAG
imagePullPolicy: Always
name: runtime
resources:
@@ -29,4 +29,4 @@ spec:
- teajdbc-policy
- default-policy
- teajdbc
- version: 12.0.10.0-r1
\ No newline at end of file
+ version: 12.0.11.1-r1
\ No newline at end of file
diff --git a/tekton/os/cp4i/force-pull-cp4i.yaml b/tekton/os/cp4i/force-pull-cp4i.yaml
deleted file mode 100644
index c112487..0000000
--- a/tekton/os/cp4i/force-pull-cp4i.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- name: force-pull-cp4i
- namespace: cp4i
-spec:
- selector:
- matchLabels:
- name: force-pull-cp4i
- template:
- metadata:
- labels:
- name: force-pull-cp4i
- spec:
- containers:
- - name: force-pull-minimal-build
- imagePullPolicy: Always
- image: image-registry.openshift-image-registry.svc.cluster.local:5000/default/ace-minimal-build:12.0.10.0-alpine
- command: ["sleep"]
- args: ["1000000"]
- imagePullSecrets:
- - name: 'regcred'
diff --git a/tekton/os/cp4i/minikube/minikube-dashboard-ingress.yaml b/tekton/os/cp4i/minikube/minikube-dashboard-ingress.yaml
new file mode 100644
index 0000000..ed2d371
--- /dev/null
+++ b/tekton/os/cp4i/minikube/minikube-dashboard-ingress.yaml
@@ -0,0 +1,16 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: db01-ingress
+ namespace: ace-operator
+spec:
+ rules:
+ - http:
+ paths:
+ - pathType: Prefix
+ path: /tea/index
+ backend:
+ service:
+ name: tea-tekton-service
+ port:
+ number: 8300
diff --git a/tekton/os/cp4i/minikube/minikube-install-catalog-source.yaml b/tekton/os/cp4i/minikube/minikube-install-catalog-source.yaml
new file mode 100644
index 0000000..587aeb7
--- /dev/null
+++ b/tekton/os/cp4i/minikube/minikube-install-catalog-source.yaml
@@ -0,0 +1,15 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: CatalogSource
+metadata:
+ name: ibm-appconnect-catalog
+ namespace: olm
+spec:
+ displayName: "IBM App Connect Operator Catalog k8S"
+ publisher: IBM
+ sourceType: grpc
+ image: icr.io/cpopen/appconnect-operator-catalog-k8s
+ updateStrategy:
+ registryPoll:
+ interval: 45m
+ grpcPodConfig:
+ securityContextConfig: restricted
diff --git a/tekton/os/cp4i/minikube/minikube-install-dashboard.yaml b/tekton/os/cp4i/minikube/minikube-install-dashboard.yaml
new file mode 100644
index 0000000..8d94f04
--- /dev/null
+++ b/tekton/os/cp4i/minikube/minikube-install-dashboard.yaml
@@ -0,0 +1,35 @@
+apiVersion: appconnect.ibm.com/v1beta1
+kind: Dashboard
+metadata:
+ name: db01
+ namespace: ace-operator
+spec:
+ license:
+ accept: true
+ license: L-UTKS-P46KK2
+ use: AppConnectEnterpriseProduction
+ pod:
+ containers:
+ content-server:
+ resources:
+ limits:
+ memory: 512Mi
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ control-ui:
+ resources:
+ limits:
+ memory: 512Mi
+ requests:
+ cpu: 50m
+ memory: 125Mi
+ switchServer:
+ name: default
+ replicas: 1
+ storage:
+ sizeLimit: 1Gi
+ type: ephemeral
+ useCommonServices: false
+ version: '12.0'
+ displayMode: IntegrationRuntimes
diff --git a/tekton/os/cp4i/minikube/minikube-install-og.yaml b/tekton/os/cp4i/minikube/minikube-install-og.yaml
new file mode 100644
index 0000000..4cadf2a
--- /dev/null
+++ b/tekton/os/cp4i/minikube/minikube-install-og.yaml
@@ -0,0 +1,8 @@
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ name: ace-operator-group
+ namespace: ace-operator
+spec:
+ targetNamespaces:
+ - ace-operator
diff --git a/tekton/os/cp4i/minikube/minikube-install-subscription.yaml b/tekton/os/cp4i/minikube/minikube-install-subscription.yaml
new file mode 100644
index 0000000..ab1a4a1
--- /dev/null
+++ b/tekton/os/cp4i/minikube/minikube-install-subscription.yaml
@@ -0,0 +1,10 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: ibm-appconnect
+ namespace: ace-operator
+spec:
+ channel: v10.1
+ name: ibm-appconnect
+ source: ibm-appconnect-catalog
+ sourceNamespace: olm
diff --git a/tekton/os/cp4i/service-account-cp4i.yaml b/tekton/os/cp4i/service-account-cp4i.yaml
index 3c949b1..ad749ce 100644
--- a/tekton/os/cp4i/service-account-cp4i.yaml
+++ b/tekton/os/cp4i/service-account-cp4i.yaml
@@ -18,7 +18,7 @@ metadata:
name: pipeline-role
rules:
- apiGroups: ["extensions", "apps", "appconnect.ibm.com", "", "v1"]
- resources: ["services", "deployments", "pods", "integrationservers", "pods/exec", "integrationruntimes"]
+ resources: ["services", "deployments", "pods", "integrationservers", "pods/exec", "pods/log", "integrationruntimes"]
verbs: ["get", "create", "update", "patch", "list", "delete", "exec", "watch"]
---
diff --git a/tekton/os/cp4i/tea-tekton-deployment.yaml b/tekton/os/cp4i/tea-tekton-deployment.yaml
deleted file mode 100644
index dbbb7b7..0000000
--- a/tekton/os/cp4i/tea-tekton-deployment.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: tea-tekton-cp4i
-spec:
- selector:
- matchLabels:
- app: tea-tekton-cp4i
- replicas: 1
- template:
- metadata:
- labels:
- app: tea-tekton-cp4i
- annotations:
- productName: "IBM App Connect Enterprise for non-production"
- productID: eb5b5e73f62b4dcf8c434c6274a158a7
- productMetric: FREE
- spec:
- volumes:
- - name: secret-volume-2
- secret:
- secretName: jdbc-secret
- containers:
- - name: tea-tekton-cp4i
- image: DOCKER_REGISTRY/tea-tekton-cp4i:latest
- ports:
- - containerPort: 7800
- volumeMounts:
- - name: secret-volume-2
- mountPath: /var/run/secrets/jdbc
diff --git a/tekton/os/tea-tekton-route.yaml b/tekton/os/tea-tekton-route.yaml
index baf3679..e371fce 100644
--- a/tekton/os/tea-tekton-route.yaml
+++ b/tekton/os/tea-tekton-route.yaml
@@ -3,6 +3,7 @@ apiVersion: route.openshift.io/v1
metadata:
name: tea-route
spec:
+ # Adjust for namespace (replace default) and cluster name (replace openshift.mycompany.com)
host: tea-route-default.apps.openshift.mycompany.com
to:
kind: Service
diff --git a/tekton/service-account.yaml b/tekton/service-account.yaml
index 497b8bb..09f7436 100644
--- a/tekton/service-account.yaml
+++ b/tekton/service-account.yaml
@@ -4,9 +4,14 @@ metadata:
name: ace-tekton-service-account
imagePullSecrets:
- name: regcred
+ # Needed for cp.icr.io if using the "ace" image
+ # but not needed for ace-minimal
+ #- name: ibm-entitlement-key
secrets:
- name: regcred
-
+ # Needed for cp.icr.io if using the "ace" image
+ # but not needed for ace-minimal
+ #- name: ibm-entitlement-key
---
kind: Role
@@ -14,7 +19,7 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: pipeline-role
rules:
-- apiGroups: ["extensions", "apps", ""]
+- apiGroups: ["extensions", "apps", "serving.knative.dev", ""]
resources: ["services", "deployments", "pods", "pods/exec", "pods/log"]
verbs: ["get", "create", "update", "patch", "list", "delete", "exec"]
diff --git a/tekton/tea-tekton-deployment.yaml b/tekton/tea-tekton-deployment.yaml
index e101de7..6d77b94 100644
--- a/tekton/tea-tekton-deployment.yaml
+++ b/tekton/tea-tekton-deployment.yaml
@@ -24,7 +24,7 @@ spec:
- name: regcred
containers:
- name: tea-tekton
- image: DOCKER_REGISTRY/tea-tekton:latest
+ image: DOCKER_REGISTRY/tea-tekton:IMAGE_TAG
ports:
- containerPort: 7800
imagePullPolicy: Always
diff --git a/tekton/temp-db2/14-ibmint-ace-build-temp-db2-task.yaml b/tekton/temp-db2/14-ibmint-ace-build-temp-db2-task.yaml
new file mode 100644
index 0000000..633b942
--- /dev/null
+++ b/tekton/temp-db2/14-ibmint-ace-build-temp-db2-task.yaml
@@ -0,0 +1,291 @@
+apiVersion: tekton.dev/v1beta1
+kind: Task
+metadata:
+ name: ace-build
+ labels:
+ variant: temp-db2
+spec:
+ # The security and environment settings are needed for OpenShift in a non-default
+ # namespace such as cp4i. Kaniko is expecting to be root in the container.
+ stepTemplate:
+ securityContext:
+ runAsUser: 0
+ env:
+ - name: "HOME"
+ value: "/tekton/home"
+ - name: "LICENSE"
+ value: "accept"
+ params:
+ - name: outputRegistry
+ type: string
+ - name: url
+ type: string
+ - name: revision
+ type: string
+ - name: buildImage
+ type: string
+ - name: runtimeBaseImage
+ type: string
+ - name: useTransientDatabase
+ type: string
+ description: "Start a temporary DB2 database for use in testing; may be slow to start depending on cluster capabilities."
+ default: "true"
+ results:
+ - name: tag
+ description: image tag of the form 20240220135127-6fe9106
+ steps:
+ - name: clone
+ image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
+ script: |
+ #!/bin/sh
+
+ set -e # Fail on error
+ cd /work
+ git clone -b $(params.revision) $(params.url)
+ cd ace-demo-pipeline
+ export DATE=$(date '+%Y%m%d%H%M%S')
+ export COMMIT=$(git log -1 --pretty=%h)
+ export TAG="$DATE"-"$COMMIT"
+ echo Setting container tag to "$TAG"
+ echo -n "$TAG" > $(results.tag.path)
+
+ # Slightly hacky but works . . .
+ chmod -R 777 /work/ace-demo-pipeline
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: start-db2
+ image: lachlanevenson/k8s-kubectl
+ #
+ # Create the DB2 database container if needed
+ #
+ script: |
+ #!/bin/sh
+ #set -x
+ if [ "$(params.useTransientDatabase)" = "true" ]; then
+ export NS=$(context.taskRun.namespace)
+ echo Launching DB2
+ apk add bash
+ cd /work
+ ls -l /work/ace-demo-pipeline/tekton
+ bash /work/ace-demo-pipeline/tekton/temp-db2/start-db2-container.sh
+ echo "Finished launching DB2"
+ fi
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: ibmint-build
+ image: $(params.buildImage)
+ #image: cp.icr.io/cp/appc/ace:12.0.11.0-r1
+ #
+ # Runs the build and unit test phases, leaving the results in the work directory
+ # for later steps.
+ #
+ script: |
+ #!/bin/bash
+
+ export LICENSE=accept
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error - this must be done after the profile in case the container has the profile loaded already
+
+ cd /work/ace-demo-pipeline
+ mkdir /work/ibmint-output
+ mqsicreateworkdir /work/ibmint-output/ace-server
+ # Using --compile-maps-and-schemas for 12.0.11 and later . . .
+ ibmint deploy --input-path . --output-work-directory /work/ibmint-output/ace-server --project TeaSharedLibraryJava --project TeaSharedLibrary --project TeaRESTApplication --compile-maps-and-schemas
+ ibmint optimize server --work-dir /work/ibmint-output/ace-server --disable NodeJS
+
+ # Copy the contents of the work directory into a new unit-test-specific work directory
+ # This avoids the risk of unit tests files being deployed in the real containers, and
+ # is quicker than building the application again
+ mqsicreateworkdir /work/ut-work-dir
+ (cd /work/ibmint-output/ace-server && tar -cf - * ) | (cd /work/ut-work-dir && tar -xf - )
+ # Build just the unit tests
+ ibmint deploy --input-path . --output-work-directory /work/ut-work-dir --project TeaRESTApplication_UnitTest
+
+ # Run the unit tests
+ IntegrationServer -w /work/ut-work-dir --no-nodejs --start-msgflows false --test-project TeaRESTApplication_UnitTest
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: wait-for-db2
+ image: lachlanevenson/k8s-kubectl
+ #
+ # Waits for the DB2 database container if needed
+ #
+ script: |
+ #!/bin/sh
+ #set -x
+ if [ "$(params.useTransientDatabase)" = "true" ]; then
+ export NS=$(context.taskRun.namespace)
+ echo Waiting for DB2
+ apk add bash
+ cd /work
+ ls -l /work/ace-demo-pipeline/tekton
+ bash /work/ace-demo-pipeline/tekton/temp-db2/wait-for-db2-container.sh
+ echo "Finished starting DB2"
+ ls -lR /work/jdbc
+ fi
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: component-test
+ image: $(params.buildImage)
+ #image: cp.icr.io/cp/appc/ace:12.0.11.0-r1
+ #
+ # Builds and runs the component tests using the JDBC credentials provided from
+ # the secret. Also uses the same init-creds.sh script used by the non-CP4i image
+ # to load credentials at startup.
+ #
+ # Leaves the resulting component test project in the work directory to be picked
+ # up by the second Kaniko build in the next step.
+ #
+ script: |
+ #!/bin/bash
+
+ . /opt/ibm/ace-12/server/bin/mqsiprofile
+
+ set -e # Fail on error
+
+ export PATH=/opt/ibm/ace-12/common/jdk/bin:$PATH
+ # Slightly hacky, but quicker than building everything again!
+ (cd /work/ibmint-output/ace-server && tar -cf - * ) | (cd /home/aceuser/ace-server && tar -xf - )
+ ls -l /home/aceuser/ace-server
+ # Set up credentials for the component tests; init-creds.sh looks in /tmp for policy
+ cp /work/ace-demo-pipeline/demo-infrastructure/TEAJDBC.policyxml /tmp/
+ bash /work/ace-demo-pipeline/demo-infrastructure/init-creds.sh
+ # Build and run the tests
+ cd /work/ace-demo-pipeline
+
+ # Build just the component tests
+ ibmint deploy --input-path . --output-work-directory /home/aceuser/ace-server --project TeaRESTApplication_ComponentTest
+
+ # Run the component tests
+ IntegrationServer -w /home/aceuser/ace-server --no-nodejs --start-msgflows false --test-project TeaRESTApplication_ComponentTest
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: secret-volume-2
+ mountPath: /var/run/secrets/jdbc
+ - name: stop-db2
+ image: lachlanevenson/k8s-kubectl
+ #
+ # Stop the DB2 database container if needed
+ #
+ script: |
+ #!/bin/sh
+ #set -x
+ if [ "$(params.useTransientDatabase)" = "true" ]; then
+ export NS=$(context.taskRun.namespace)
+ echo Stopping DB2
+ apk add bash
+ cd /work
+ ls -l /work/ace-demo-pipeline/tekton
+ bash /work/ace-demo-pipeline/tekton/temp-db2/stop-db2-container.sh
+ echo "Finished stopping DB2"
+ fi
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: next-stage-container-setup
+ image: $(params.buildImage)
+ script: |
+ #!/bin/bash
+
+ set -e # Fail on error
+
+ cd /work/ibmint-output
+ cp /work/ace-demo-pipeline/tekton/Dockerfile Dockerfile
+
+ # Copy in various startup files
+ cp /work/ace-demo-pipeline/demo-infrastructure/TEAJDBC.policyxml ace-server/
+ cp /work/ace-demo-pipeline/demo-infrastructure/application-overrides.txt ace-server/
+ cp /work/ace-demo-pipeline/demo-infrastructure/init-creds.sh ace-server/ace-startup-script.sh
+ cp /work/ace-demo-pipeline/demo-infrastructure/read-hashicorp-creds.sh ace-server/
+ cp /work/ace-demo-pipeline/demo-infrastructure/read-xml-creds.sh ace-server/
+
+ echo Contents of /work/ibmint-output/ace-server/server.components.yaml
+ cat /work/ibmint-output/ace-server/server.components.yaml || /bin/true
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: docker-build-and-push
+ #image: quay.io/buildah/stable:v1
+ image: registry.redhat.io/rhel8/buildah:8.9-5
+ securityContext:
+ runAsUser: 0
+ # Needed for hostPath volumes on OpenShift
+ privileged: true
+ capabilities:
+ add: ["CHOWN", "DAC_OVERRIDE","FOWNER","SETFCAP","SETGID","SETUID"]
+ # specifying DOCKER_CONFIG is required to allow buildah to detect docker credential
+ env:
+ - name: "DOCKER_CONFIG"
+ value: "/tekton/home/.docker/"
+ script: |
+ date
+ export TAG=`cat $(results.tag.path)`
+ echo Using $TAG as image tag
+ buildah --storage-driver=overlay bud --format=oci --tls-verify=false --no-cache \
+ --build-arg BASE_IMAGE=$(params.runtimeBaseImage) \
+ -f /work/ibmint-output/Dockerfile -t $(params.outputRegistry)/tea-tekton:$TAG /work/ibmint-output
+ date
+ buildah --storage-driver=overlay push --tls-verify=false --digestfile /tmp/image-digest \
+ $(params.outputRegistry)/tea-tekton:$TAG "docker://$(params.outputRegistry)/tea-tekton:$TAG"
+ date
+ volumeMounts:
+ - mountPath: /work
+ name: work
+ - name: varlibcontainers
+ mountPath: /var/lib/containers
+ volumes:
+ - name: work
+ emptyDir: {}
+ - name: secret-volume-2
+ secret:
+ secretName: jdbc-secret
+ #
+ # Default buildah approach using emptyDir; takes about 2 minutes on a test SNO cluster
+ #
+ - name: varlibcontainers
+ emptyDir: {}
+ #
+ # Local directory for this pipeline; takes a few seconds after initial pull
+ #
+ #- name: varlibcontainers
+ # hostPath:
+ # path: '/var/hostPath/buildah-cache'
+ # type: Directory
+ #
+ # Sharing the host containers; takes a few seconds
+ #
+ #- name: varlibcontainers
+ # hostPath:
+ # path: '/var/lib/containers'
+ # type: Directory
+ #
+ # Local disk using LVM operator on SNO; same speed as hostPath
+ #
+ #- name: varlibcontainers
+ # persistentVolumeClaim:
+ # claimName: buildah-cache
+ #
+ # NFS mount from same subnet; initial pull took 35 minutes, and
+ # subsequent builds took around 9 minutes.
+ #
+ # May also see messages like
+ #
+ # time="2024-02-15T00:55:11Z" level=error msg="'overlay' is not supported over nfs at \"/var/lib/containers/storage/overlay\""
+ #
+ # or possibly failing with
+ #
+ # time="2024-02-15T20:05:58Z" level=warning msg="Network file system detected as backing store. Enforcing overlay option `force_mask=\"700\"`. Add it to storage.conf to silence this warning"
+ # Error: mount /var/lib/containers/storage/overlay:/var/lib/containers/storage/overlay, flags: 0x1000: permission denied
+ # time="2024-02-15T20:05:58Z" level=warning msg="Network file system detected as backing store. Enforcing overlay option `force_mask=\"700\"`. Add it to storage.conf to silence this warning"
+ # time="2024-02-15T20:05:58Z" level=warning msg="failed to shutdown storage: \"mount /var/lib/containers/storage/overlay:/var/lib/containers/storage/overlay, flags: 0x1000: permission denied\""
+ #
+ # if not running privileged
+ #- name: varlibcontainers
+ # persistentVolumeClaim:
+ # claimName: buildah-cache-nfs
diff --git a/tekton/temp-db2/14-maven-ace-build-temp-db2-task.yaml b/tekton/temp-db2/14-maven-ace-build-temp-db2-task.yaml
deleted file mode 100644
index 9b228e5..0000000
--- a/tekton/temp-db2/14-maven-ace-build-temp-db2-task.yaml
+++ /dev/null
@@ -1,160 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: Task
-metadata:
- name: maven-ace-build
- labels:
- variant: temp-db2
-spec:
- params:
- - name: dockerRegistry
- type: string
- - name: url
- type: string
- - name: revision
- type: string
- - name: buildImage
- type: string
- - name: runtimeImage
- type: string
- - name: useTransientDatabase
- type: string
- description: "Start a temporary DB2 database for use in testing; may be slow to start depending on cluster capabilities."
- default: "true"
- steps:
- - name: clone
- image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.18.1
- script: |
- #!/bin/sh
- cd /work
- git clone -b $(params.revision) $(params.url)
- # Slightly hacky but works
- chmod -R 777 /work/ace-demo-pipeline
- volumeMounts:
- - mountPath: /work
- name: work
- - name: start-db2
- image: lachlanevenson/k8s-kubectl
- #
- # Create the DB2 database container if needed
- #
- script: |
- #!/bin/sh
- #set -x
- if [ "$(params.useTransientDatabase)" = "true" ]; then
- echo PROD
- apk add bash
- cd /work
- ls -l /work/ace-demo-pipeline/tekton
- bash /work/ace-demo-pipeline/tekton/temp-db2/start-db2-container.sh
- echo "Finished starting DB2"
- fi
- volumeMounts:
- - mountPath: /work
- name: work
- - name: maven-build
- image: $(params.dockerRegistry)/$(params.buildImage)
- script: |
- #!/bin/bash
- export LICENSE=accept
- . /opt/ibm/ace-12/server/bin/mqsiprofile
- export PATH=/opt/ibm/ace-12/common/jdk/bin:$PATH
- which javac
- javac -version
- mkdir /work/maven-output
- cd /work/ace-demo-pipeline
- id
- mvn --no-transfer-progress -Dinstall.work.directory=/work/maven-output/ace-server install
- volumeMounts:
- - mountPath: /work
- name: work
- - name: wait-for-db2
- image: lachlanevenson/k8s-kubectl
- #
- # Waits for the DB2 database container if needed
- #
- script: |
- #!/bin/sh
- #set -x
- if [ "$(params.useTransientDatabase)" = "true" ]; then
- echo PROD
- apk add bash
- cd /work
- ls -l /work/ace-demo-pipeline/tekton
- bash /work/ace-demo-pipeline/tekton/temp-db2/wait-for-db2-container.sh
- echo "Finished starting DB2"
- ls -lR /work/jdbc
- fi
- volumeMounts:
- - mountPath: /work
- name: work
- - name: component-test
- image: $(params.dockerRegistry)/$(params.buildImage)
- script: |
- #!/bin/bash
- export LICENSE=accept
- . /opt/ibm/ace-12/server/bin/mqsiprofile
- export PATH=/opt/ibm/ace-12/common/jdk/bin:$PATH
- # Slightly hacky, but quicker than building everything again!
- (cd /work/maven-output/ace-server/run && tar -cf - * ) | (cd /home/aceuser/ace-server/run && tar -xf - )
- # Set up credentials for the component tests; init-creds.sh looks in /tmp for policy
- cp /work/ace-demo-pipeline/demo-infrastructure/TEAJDBC.policyxml /tmp/
- bash /work/ace-demo-pipeline/demo-infrastructure/init-creds.sh
- # Build and run the tests
- cd /work/ace-demo-pipeline/TeaRESTApplication_ComponentTest
- mvn --no-transfer-progress -Dct.work.directory=/home/aceuser/ace-server verify
- volumeMounts:
- - mountPath: /work
- name: work
- - name: secret-volume-2
- mountPath: /var/run/secrets/jdbc
- - name: stop-db2
- image: lachlanevenson/k8s-kubectl
- #
- # Stop the DB2 database container if needed
- #
- script: |
- #!/bin/sh
- #set -x
- if [ "$(params.useTransientDatabase)" = "true" ]; then
- echo PROD
- apk add bash
- cd /work
- ls -l /work/ace-demo-pipeline/tekton
- bash /work/ace-demo-pipeline/tekton/temp-db2/stop-db2-container.sh
- echo "Finished stopping DB2"
- fi
- volumeMounts:
- - mountPath: /work
- name: work
- - name: next-stage-container-setup
- image: $(params.dockerRegistry)/$(params.buildImage)
- script: |
- #!/bin/bash
- cd /work/maven-output
- cp /work/ace-demo-pipeline/tekton/Dockerfile Dockerfile
- volumeMounts:
- - mountPath: /work
- name: work
- - name: docker-build-and-push
- image: gcr.io/kaniko-project/executor:latest
- # specifying DOCKER_CONFIG is required to allow kaniko to detect docker credential
- env:
- - name: "DOCKER_CONFIG"
- value: "/tekton/home/.docker/"
- command:
- - /kaniko/executor
- args:
- - --dockerfile=/work/maven-output/Dockerfile
- - --destination=$(params.dockerRegistry)/tea-tekton
- - --context=/work/maven-output
- - --build-arg=BASE_IMAGE=$(params.dockerRegistry)/$(params.runtimeImage)
- - --skip-tls-verify
- volumeMounts:
- - mountPath: /work
- name: work
- volumes:
- - name: work
- emptyDir: {}
- - name: secret-volume-2
- secret:
- secretName: jdbc-secret
diff --git a/tekton/temp-db2/README.md b/tekton/temp-db2/README.md
index 098ffb2..6441dc5 100644
--- a/tekton/temp-db2/README.md
+++ b/tekton/temp-db2/README.md
@@ -6,11 +6,11 @@ for each pipeline run, creating and deleting the container as part of the build.
![Pipeline overview](temp-db2-pipeline-20230301.png)
-The rest of the pipeline remains unchanged, with only the `maven-ace-build` task changing.
+The rest of the pipeline remains unchanged, with only the `ibmint-ace-build` task changing.
## Overview
-The modified `maven-ace-build` task adds three new steps in order to run new DB2 database
+The modified `ibmint-ace-build` task adds three new steps in order to run new DB2 database
for each pipeline run. Running a new database each time ensures that the test results are
repeatable and not influenced by previous test runs, but also requires more cluster resources
as a new database must be created and started each time.
@@ -79,10 +79,10 @@ Resource consumption is the biggest disdvantage, as it causes some issues:
## Getting started
-The main change required is the updating of the `maven-ace-build` task to include the DB2
+The main change required is the updating of the `ibmint-ace-build` task to include the DB2
container interactions; this requires applying the task YAML in this directory:
```
-kubectl apply -f tekton/temp-db2/14-maven-ace-build-temp-db2-task.yaml
+kubectl apply -f tekton/temp-db2/14-ibmint-ace-build-temp-db2-task.yaml
```
The task steps rely on the other files in this directory, but no other configuration changes
are needed and the pipeline can be run to verify the new steps and configuration are used.
@@ -90,7 +90,7 @@ are needed and the pipeline can be run to verify the new steps and configuration
## Notes
The main tekton service account has been adjusted to allow container logs to be queried, which is
-needed in order to determin ewhen the database has finished starting up.
+needed in order to determine when the database has finished starting up.
Very small clusters may become unstable if multiple DB2 containers are run simultaneously; this is
likely to be due to resource contention issues.
diff --git a/tekton/temp-db2/start-db2-container.sh b/tekton/temp-db2/start-db2-container.sh
index 6e52c11..845591b 100644
--- a/tekton/temp-db2/start-db2-container.sh
+++ b/tekton/temp-db2/start-db2-container.sh
@@ -1,6 +1,9 @@
#!/bin/bash
-export NS=default
+if [ "$NS" == "" ]; then
+ echo "Using default namespace"
+ export NS=default
+fi
export POD_NAME=db2-test-pod
kubectl get pods -n ${NS}
diff --git a/tekton/temp-db2/stop-db2-container.sh b/tekton/temp-db2/stop-db2-container.sh
index 397dbad..a3bfa09 100644
--- a/tekton/temp-db2/stop-db2-container.sh
+++ b/tekton/temp-db2/stop-db2-container.sh
@@ -1,6 +1,9 @@
#!/bin/bash
-export NS=default
+if [ "$NS" == "" ]; then
+ echo "Using default namespace"
+ export NS=default
+fi
export POD_NAME=db2-test-pod
kubectl get pods -n ${NS}
diff --git a/tekton/temp-db2/wait-for-db2-container.sh b/tekton/temp-db2/wait-for-db2-container.sh
index 8c73533..80458f4 100644
--- a/tekton/temp-db2/wait-for-db2-container.sh
+++ b/tekton/temp-db2/wait-for-db2-container.sh
@@ -1,6 +1,9 @@
#!/bin/bash
-export NS=default
+if [ "$NS" == "" ]; then
+ echo "Using default namespace"
+ export NS=default
+fi
export POD_NAME=db2-test-pod
rc=1