forked from 5733d9e2be6485d52ffa08870cabdee0/sandbox
-
Notifications
You must be signed in to change notification settings - Fork 0
214 lines (214 loc) · 11 KB
/
Manager-CI-v2.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
name: Manager - CI - v2
on:
pull_request_target:
types: [ opened, labeled, unlabeled, synchronize ]
branches:
- 'main'
paths-ignore:
- 'LICENSE'
- '**/.gitignore'
- '**.md'
- '**.adoc'
- '*.txt'
- '.github/**'
- 'kustomize/**'
- 'dev/**'
- 'app-interface/**'
jobs:
start-runner:
name: Start self-hosted EC2 runner
runs-on: ubuntu-latest
outputs:
label: ${{ steps.start-aws-runner.outputs.label }}
ec2-instance-id: ${{ steps.start-aws-runner.outputs.ec2-instance-id }}
steps:
- name: Check labels
# Security due to https://securitylab.github.com/research/github-actions-preventing-pwn-requests/
if: ${{ !contains( github.event.pull_request.labels.*.name, 'safe to test') }}
run: |
echo "Please add the 'safe to test' label in order to run 'Manager - CI' pipeline if it's safe to test this code"
exit 1
- name: Checkout
uses: actions/checkout@v3
- name: Start AWS runner
id: start-aws-runner
uses: ./.github/actions/start-aws-runner
with:
aws-access-key-id: ${{ env.GHA_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ env.GHA_AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.GHA_AWS_REGION }}
github-token: ${{ env.GHA_GH_PERSONAL_ACCESS_TOKEN }}
env:
GHA_AWS_ACCESS_KEY_ID: ${{ secrets.GHA_AWS_ACCESS_KEY_ID }}
GHA_AWS_SECRET_ACCESS_KEY: ${{ secrets.GHA_AWS_SECRET_ACCESS_KEY }}
GHA_AWS_REGION: ${{ secrets.GHA_AWS_REGION }}
GHA_GH_PERSONAL_ACCESS_TOKEN: ${{ secrets.GHA_GH_PERSONAL_ACCESS_TOKEN }}
event-bridge-build:
needs: start-runner # required to start the main job when the runner is ready
runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner
env:
FLEET_MANAGER_CONTAINER_NAME: openbridge/fleet-manager:${{ github.sha }}
FLEET_SHARD_MINIKUBE_CONTAINER_NAME: openbridge/fleet-shard:${{ github.sha }}
EXECUTOR_CONTAINER_NAME: openbridge/executor:${{ github.sha }}
# This hostname will be used as hostname for the ingress in Kind and is set in the hosts file of the machine
KIND_MAIN_NODE_HOSTNAME: kind-control-plane
concurrency:
group: event-bridge-manager-v2-pr-${{ github.head_ref }}
cancel-in-progress: true
timeout-minutes: 60
if: github.repository == '5733d9e2be6485d52ffa08870cabdee0/sandbox'
name: Build all images and run E2E tests
steps:
- name: Checkout
uses: actions/checkout@v3
with:
ref: "refs/pull/${{ github.event.number }}/merge"
- name: Java and Maven Setup
uses: ./.github/actions/java-maven-setup
with:
cache-key-prefix: ${{ runner.os }}
- name: Re-Checkout # since Java And Maven Setup step is checking out the main branch, we have to checkout the pull request branch again
uses: actions/checkout@v3
with:
ref: "refs/pull/${{ github.event.number }}/merge"
- name: Set up KinD
uses: ./.github/actions/kind
with:
kind-node-hostname: ${{ env.KIND_MAIN_NODE_HOSTNAME }}
- name: External Secret Operator Setup
uses: ./.github/actions/external-secret-operator-setup
- uses: imranismail/setup-kustomize@v2
- name: Use built Fleet manager and operator images
run: |
kustomize edit set image event-bridge-manager=$FLEET_MANAGER_CONTAINER_NAME
kustomize edit set image event-bridge-shard-operator=$FLEET_SHARD_MINIKUBE_CONTAINER_NAME
working-directory: kustomize/overlays/ci
- name: Use built Ingress and Executor images
run: |
sed -i -E "s|(.*EVENT_BRIDGE_EXECUTOR_IMAGE:).*|\1 $EXECUTOR_CONTAINER_NAME|" overlays/ci/shard/patches/deploy-config.yaml
sed -i -E "s|(.*INGRESS_OVERRIDE_HOSTNAME:).*|\1 $KIND_MAIN_NODE_HOSTNAME|" overlays/ci/shard/patches/deploy-config.yaml
sed -i -E "s|(.*EVENT_BRIDGE_K8S_ORCHESTRATOR:).*|\1 kind|" overlays/ci/shard/patches/deploy-config.yaml
working-directory: kustomize
- name: Set Kind hostname in the Manager configuration
run: |
sed -i -E "s|(.*EVENT_BRIDGE_RESOURCE_PREFIX:).*|\1 ci-$GITHUB_RUN_ID-$GITHUB_RUN_ATTEMPT-|" overlays/ci/manager/patches/deploy-config.yaml
sed -i -E "s|(.*EVENT_BRIDGE_DNS_INGRESS_OVERRIDE_HOSTNAME:).*|\1 $KIND_MAIN_NODE_HOSTNAME|" overlays/ci/manager/patches/deploy-config.yaml
sed -i -E "s|(.*EVENT_BRIDGE_K8S_ORCHESTRATOR:).*|\1 kind|" overlays/ci/manager/patches/deploy-config.yaml
working-directory: kustomize
- name: Configure external Managed Kafka cluster
shell: bash
env:
AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
run: |
sed -i -E "s|(.*access-key:).*|\1 $AWS_ACCESS_KEY|" manager/awssm-secret.yaml
sed -i -E "s|(.*secret-access-key:).*|\1 $AWS_SECRET_ACCESS_KEY|" manager/awssm-secret.yaml
sed -i -E "s|(.*access-key:).*|\1 $AWS_ACCESS_KEY|" shard/awssm-secret.yaml
sed -i -E "s|(.*secret-access-key:).*|\1 $AWS_SECRET_ACCESS_KEY|" shard/awssm-secret.yaml
working-directory: kustomize/overlays/ci
- name: Build all images and resources
uses: ./.github/actions/maven
with:
maven-command: clean install --errors --quiet -Dquickly
- name: Build and Publish JVM Container - Fleet Manager
run: |
docker build -f docker/Dockerfile.jvm -t $FLEET_MANAGER_CONTAINER_NAME manager-parent/manager/
kind load docker-image $FLEET_MANAGER_CONTAINER_NAME
- name: Build and Publish JVM Container - Minikube Fleet Shard
run: |
docker build -f docker/Dockerfile.jvm -t $FLEET_SHARD_MINIKUBE_CONTAINER_NAME shard-operator-parent/shard-operator/
kind load docker-image $FLEET_SHARD_MINIKUBE_CONTAINER_NAME
- name: Build and Publish JVM Container - Executor
run: |
docker build -f docker/Dockerfile.jvm -t $EXECUTOR_CONTAINER_NAME executor/
kind load docker-image $EXECUTOR_CONTAINER_NAME
- name: Istio Setup
uses: ./.github/actions/istio-setup
- name: Install knative
run: |
chmod +x dev/bin/knative-installer.sh
./dev/bin/knative-installer.sh
- name: Install Camel K
run: |
chmod +x dev/bin/camel-k-installer.sh
./dev/bin/camel-k-installer.sh --registry kind-registry:5000
- name: Install all resources
run: kustomize build overlays/ci | kubectl apply -f -
working-directory: kustomize
continue-on-error: true
- name: Install all resources, second attempt (first one fails as CRD is not properly propagated fast enough)
run: kustomize build overlays/ci | kubectl apply -f -
working-directory: kustomize
- name: Update all resources and delete current operator pod(to refresh operator config)
run: |
kustomize build overlays/ci | kubectl apply -f -
kubectl delete pod --selector=app=event-bridge-shard-operator -n event-bridge-operator
working-directory: kustomize
- name: Wait for operator and manager to start
run: |
kubectl wait --for=condition=available --timeout=600s deployment/event-bridge -n event-bridge-manager
kubectl wait --for=condition=available --timeout=600s deployment/event-bridge-shard-operator -n event-bridge-operator
- name: Prepare test configuration for E2E tests
uses: ./.github/actions/e2e-test-config
env:
IT_AWS_ACCESS_KEY_ID: ${{ secrets.IT_AWS_ACCESS_KEY_ID }}
IT_AWS_ACCOUNT_ID: ${{ secrets.IT_AWS_ACCOUNT_ID }}
IT_AWS_SECRET_ACCESS_KEY: ${{ secrets.IT_AWS_SECRET_ACCESS_KEY }}
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_TOKEN: ${{ secrets.SLACK_WEBHOOK_TOKEN }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_WEBHOOK_URL_SECOND: ${{ secrets.SLACK_WEBHOOK_URL_SECOND }}
SLACK_CHANNEL_SECOND: ${{ secrets.SLACK_CHANNEL_SECOND }}
WEBHOOK_SITE_TOKEN: ${{ secrets.WEBHOOK_SITE_TOKEN }}
WEBHOOK_SITE_TOKEN_SECOND: ${{ secrets.WEBHOOK_SITE_TOKEN_SECOND }}
- name: Run integration tests
env:
DEV_SSO_CLIENT_ID: ${{ secrets.DEV_SSO_CLIENT_ID }}
DEV_SSO_CLIENT_SECRET: ${{ secrets.DEV_SSO_CLIENT_SECRET }}
MANAGED_KAFKA_BOOTSTRAP_SERVER: ${{ secrets.MANAGED_KAFKA_BOOTSTRAP_SERVER }}
MANAGED_KAFKA_ADMIN_CLIENT_ID: ${{ secrets.MANAGED_KAFKA_ADMIN_CLIENT_ID }}
MANAGED_KAFKA_ADMIN_CLIENT_SECRET: ${{ secrets.MANAGED_KAFKA_ADMIN_CLIENT_SECRET }}
MANAGED_KAFKA_OPS_CLIENT_ID: ${{ secrets.MANAGED_KAFKA_OPS_CLIENT_ID }}
MANAGED_KAFKA_OPS_CLIENT_SECRET: ${{ secrets.MANAGED_KAFKA_OPS_CLIENT_SECRET }}
uses: ./.github/actions/maven
with:
maven-command: clean verify -Pcucumber -Dparallel -Devent-bridge.manager.url=http://localhost:80/manager -Dkeycloak.realm.url=https://sso.redhat.com/auth/realms/redhat-external -Dbridge.client.id=$DEV_SSO_CLIENT_ID -Dbridge.client.secret=$DEV_SSO_CLIENT_SECRET -Dmanaged.kafka.sso.auth-server-url=https://sso.redhat.com/auth/realms/redhat-external -Dtest.credentials.file=e2e-test-config.yaml
working-directory: integration-tests/manager-integration-tests/manager-integration-tests-v2
- name: Print operator log
if: always()
run: |
kubectl logs deployment/event-bridge-shard-operator -n event-bridge-operator
- name: Print manager log
if: always()
run: |
kubectl logs deployment/event-bridge -n event-bridge-manager
- name: Upload test logs
if: always()
uses: actions/upload-artifact@v3
with:
name: log
path: ./integration-tests/manager-integration-tests/manager-integration-tests-v2/target/cucumber-logs/
stop-runner:
name: Stop self-hosted EC2 runner
needs:
- start-runner # required to get output from the start-runner job
- event-bridge-build # required to wait when the main job is done
runs-on: ubuntu-latest
if: ${{ always() }} # required to stop the runner even if the error happened in the previous jobs
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Stop AWS runner
uses: ./.github/actions/stop-aws-runner
with:
aws-access-key-id: ${{ env.GHA_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ env.GHA_AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.GHA_AWS_REGION }}
github-token: ${{ env.GHA_GH_PERSONAL_ACCESS_TOKEN }}
label: ${{ needs.start-runner.outputs.label }}
ec2-instance-id: ${{ needs.start-runner.outputs.ec2-instance-id }}
env:
GHA_AWS_ACCESS_KEY_ID: ${{ secrets.GHA_AWS_ACCESS_KEY_ID }}
GHA_AWS_SECRET_ACCESS_KEY: ${{ secrets.GHA_AWS_SECRET_ACCESS_KEY }}
GHA_AWS_REGION: ${{ secrets.GHA_AWS_REGION }}
GHA_GH_PERSONAL_ACCESS_TOKEN: ${{ secrets.GHA_GH_PERSONAL_ACCESS_TOKEN }}