diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 14af0916b..812ba5774 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -2,7 +2,13 @@ version: 2 updates: - package-ecosystem: "github-actions" directory: "/" - target-branch: "main" + target-branch: "dev" schedule: interval: "weekly" open-pull-requests-limit: 10 + - package-ecosystem: "docker" + directory: "/" + target-branch: "dev" + schedule: + interval: "daily" + open-pull-requests-limit: 10 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index df389176f..a762936a3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: submodules: recursive - name: Cache - GHA - uses: actions/cache@v1 + uses: actions/cache@v2 with: path: ~/.cache/coursier/v1/https key: ${{ runner.OS }}-coursier-cache diff --git a/.github/workflows/deploy-dev.yml b/.github/workflows/deploy-dev.yml index 6c74c47aa..82f2ccaaa 100644 --- a/.github/workflows/deploy-dev.yml +++ b/.github/workflows/deploy-dev.yml @@ -10,7 +10,6 @@ env: RUMPEL: live RUMPEL_ALT: latest RUMPEL_BUCKET: dswift-hat-stage-frontend-build-artifacts - REPOSITORY: dataswift/hat jobs: build: @@ -21,15 +20,16 @@ jobs: - name: Checkout uses: actions/checkout@v2 with: + fetch-depth: 0 submodules: recursive - name: Cache - GHA - uses: actions/cache@v1 + uses: actions/cache@v2 with: path: ~/.cache/coursier/v1/https key: ${{ runner.OS }}-coursier-cache - - name: Add extra resolvers + - name: Cache - Resolvers run: curl https://${{ secrets.ARTIFACTS_CACHE_BUCKET }}/resolvers.sbt --create-dirs -o ~/.sbt/resolvers.sbt - name: Setup Java @@ -40,7 +40,7 @@ jobs: java-package: jdk architecture: x64 - - name: Frontend + - name: App - Frontend env: AWS_ACCESS_KEY_ID: ${{ secrets.DEPLOYER_STAGING_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.DEPLOYER_STAGING_SECRET_ACCESS_KEY }} @@ -57,25 +57,53 @@ jobs: rm -r alt-rumpel cd - - - name: Build - run: | - sbt -Denv=prod -Dhttp.port=8080 -Dpidfile.path=/dev/null -Dplay.server.pidfile.path=/dev/null docker:stage - docker build -t ${{ env.REPOSITORY }}:${{ github.sha }} hat/target/docker/stage + - name: App - Build + run: sbt Docker/stage + + - name: Container - Setup QEMU + uses: docker/setup-qemu-action@v1 - - name: Container Registry - Login + - name: Container - Setup Buildx + uses: docker/setup-buildx-action@v1 + + - name: Container - Login DockerHub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_DATASWIFT_USER }} password: ${{ secrets.DOCKERHUB_DATASWIFT_PASS }} - - name: Container Registry - Push - run: docker push ${{ env.REPOSITORY }}:${{ github.sha }} + - id: date + run: | + echo "::set-output name=ts::$(date +%s)" + + - name: Container - Meta + id: meta + uses: docker/metadata-action@v3 + with: + images: | + dataswift/hat + tags: | + type=sha,prefix=dev-,suffix=-${{ steps.date.outputs.ts }},format=long + + - name: Container - Build & Push + uses: docker/build-push-action@v2 + with: + push: true + context: hat/target/docker/stage + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/amd64,linux/arm64 + cache-from: type=registry,ref=ghcr.io/datasiwft/hat:latest - - name: Invoke deployment - uses: benc-uk/workflow-dispatch@v1 + - name: Slack + uses: lazy-actions/slatify@master + if: failure() with: - workflow: Deploy - ref: refs/heads/main - repo: dataswift/deployments - token: ${{ secrets.BOT_GITHUB_TOKEN }} - inputs: '{ "service": "hat", "environment": "dev", "version": "${{ env.REPOSITORY }}:${{ github.sha }}" }' + type: ${{ job.status }} + job_name: "*${{ env.GITHUB_WORKFLOW }}*" + channel: "ci" + commit: true + mention: "here" + mention_if: "failure" + token: ${{ secrets.GITHUB_TOKEN }} + url: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/deploy-main.yml b/.github/workflows/deploy-main.yml index f4247f1d0..695c01224 100644 --- a/.github/workflows/deploy-main.yml +++ b/.github/workflows/deploy-main.yml @@ -9,7 +9,6 @@ env: RUMPEL: live RUMPEL_ALT: latest RUMPEL_BUCKET: dswift-hat-sandbox-frontend-build-artifacts - REPOSITORY: dataswift/hat jobs: build: @@ -20,15 +19,16 @@ jobs: - name: Checkout uses: actions/checkout@v2 with: + fetch-depth: 0 submodules: recursive - name: Cache - GHA - uses: actions/cache@v1 + uses: actions/cache@v2 with: path: ~/.cache/coursier/v1/https key: ${{ runner.OS }}-coursier-cache - - name: Add extra resolvers + - name: Cache - Resolvers run: curl https://${{ secrets.ARTIFACTS_CACHE_BUCKET }}/resolvers.sbt --create-dirs -o ~/.sbt/resolvers.sbt - name: Setup Java @@ -39,7 +39,7 @@ jobs: java-package: jdk architecture: x64 - - name: Frontend + - name: App - Frontend env: AWS_ACCESS_KEY_ID: ${{ secrets.DEPLOYER_SANDBOX_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.DEPLOYER_SANDBOX_SECRET_ACCESS_KEY }} @@ -56,25 +56,54 @@ jobs: rm -r alt-rumpel cd - - - name: Build - run: | - sbt -Denv=prod -Dhttp.port=8080 -Dpidfile.path=/dev/null -Dplay.server.pidfile.path=/dev/null docker:stage - docker build -t ${{ env.REPOSITORY }}:${{ github.sha }} hat/target/docker/stage + - name: App - Build + run: sbt Docker/stage + + - name: Container - Setup QEMU + uses: docker/setup-qemu-action@v1 - - name: Container Registry - Login + - name: Container - Setup Buildx + uses: docker/setup-buildx-action@v1 + + - name: Container - Login DockerHub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_DATASWIFT_USER }} password: ${{ secrets.DOCKERHUB_DATASWIFT_PASS }} - - name: Container Registry - Push - run: docker push ${{ env.REPOSITORY }}:${{ github.sha }} + - id: date + run: | + echo "::set-output name=ts::$(date +%s)" + + - name: Container - Meta + id: meta + uses: docker/metadata-action@v3 + with: + images: | + dataswift/hat + tags: | + type=sha,prefix=main-,suffix=-${{ steps.date.outputs.ts }},format=short + + - name: Container - Build & Push + uses: docker/build-push-action@v2 + with: + push: true + context: . + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/amd64,linux/arm64 + cache-from: type=registry,ref=ghcr.io/datasiwft/hat:latest + file: hat/target/docker/stage/Dockerfile - - name: Invoke deployment - uses: benc-uk/workflow-dispatch@v1 + - name: Slack + uses: lazy-actions/slatify@master + if: failure() with: - workflow: Deploy - ref: refs/heads/main - repo: dataswift/deployments - token: ${{ secrets.BOT_GITHUB_TOKEN }} - inputs: '{ "service": "hat", "environment": "sandbox", "version": "${{ env.REPOSITORY }}:${{ github.sha }}" }' + type: ${{ job.status }} + job_name: "*${{ env.GITHUB_WORKFLOW }}*" + channel: "ci" + commit: true + mention: "here" + mention_if: "failure" + token: ${{ secrets.GITHUB_TOKEN }} + url: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/deploy-release.yml b/.github/workflows/deploy-release.yml index f1281b96d..3a39072d9 100644 --- a/.github/workflows/deploy-release.yml +++ b/.github/workflows/deploy-release.yml @@ -9,7 +9,6 @@ env: RUMPEL: live RUMPEL_ALT: latest RUMPEL_BUCKET: hubofallthings-net-frontend-build-artifacts - REPOSITORY: dataswift/hat jobs: build: @@ -20,15 +19,16 @@ jobs: - name: Checkout uses: actions/checkout@v2 with: + fetch-depth: 0 submodules: recursive - name: Cache - GHA - uses: actions/cache@v1 + uses: actions/cache@v2 with: path: ~/.cache/coursier/v1/https key: ${{ runner.OS }}-coursier-cache - - name: Add extra resolvers + - name: Cache - Resolvers run: curl https://${{ secrets.ARTIFACTS_CACHE_BUCKET }}/resolvers.sbt --create-dirs -o ~/.sbt/resolvers.sbt - name: Setup Java @@ -39,7 +39,7 @@ jobs: java-package: jdk architecture: x64 - - name: Frontend + - name: App - Frontend env: AWS_ACCESS_KEY_ID: ${{ secrets.DEPLOYER_MASTER_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.DEPLOYER_MASTER_SECRET_ACCESS_KEY }} @@ -56,32 +56,61 @@ jobs: rm -r alt-rumpel cd - - - name: Build - run: | - sbt -Denv=prod -Dhttp.port=8080 -Dpidfile.path=/dev/null -Dplay.server.pidfile.path=/dev/null docker:stage - docker build -t ${{ env.REPOSITORY }}:${{ github.sha }} hat/target/docker/stage + - name: App - Build + run: sbt Docker/stage + + - name: Container - Setup QEMU + uses: docker/setup-qemu-action@v1 - - name: Container Registry - Login + - name: Container - Setup Buildx + uses: docker/setup-buildx-action@v1 + + - name: Container - Login DockerHub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_DATASWIFT_USER }} password: ${{ secrets.DOCKERHUB_DATASWIFT_PASS }} - - name: Get the tag - id: git - run: echo ::set-output name=TAG::$(echo $GITHUB_REF | sed -e "s/refs\/tags\///g") + - name: Container - Login GitHub + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{ secrets.BOT_GITHUB_NAME }} + password: ${{ secrets.BOT_GITHUB_TOKEN }} - - name: Container Registry - Tag - run: docker tag ${{ env.REPOSITORY }}:${{ github.sha }} ${{ env.REPOSITORY }}:${{ steps.git.outputs.TAG }} + - name: Container - Meta + id: meta + uses: docker/metadata-action@v3 + with: + images: | + dataswift/hat + ghcr.io/dataswift/hat + tags: | + type=raw,value=latest + type=semver,prefix=v,pattern={{version}} + type=semver,prefix=v,pattern={{major}} + type=semver,prefix=v,pattern={{major}}.{{minor}} - - name: Container Registry - Push - run: docker push ${{ env.REPOSITORY }}:${{ steps.git.outputs.TAG }} + - name: Container - Build & Push + uses: docker/build-push-action@v2 + with: + push: true + context: . + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/amd64,linux/arm64 + cache-from: type=registry,ref=ghcr.io/datasiwft/hat:latest + file: hat/target/docker/stage/Dockerfile - - name: Invoke deployment - uses: benc-uk/workflow-dispatch@v1 + - name: Slack + uses: lazy-actions/slatify@master + if: failure() with: - workflow: Deploy - ref: refs/heads/main - repo: dataswift/deployments - token: ${{ secrets.BOT_GITHUB_TOKEN }} - inputs: '{ "service": "hat", "environment": "production", "version": "${{ env.REPOSITORY }}:${{ steps.git.outputs.TAG }}" }' + type: ${{ job.status }} + job_name: "*${{ env.GITHUB_WORKFLOW }}*" + channel: "ci" + commit: true + mention: "here" + mention_if: "failure" + token: ${{ secrets.GITHUB_TOKEN }} + url: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 3d4a08e9c..43bd0f850 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -3,10 +3,10 @@ name: Trivy on: workflow_dispatch: schedule: - - cron: "0 4 * * MON" + - cron: "30 3 * * *" env: - REGISTRY: dataswift/hat + REGISTRY: ghcr.io/datasiwft/hat:latest jobs: trivy-scan: @@ -14,55 +14,19 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 20 steps: - - name: Container - Login - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_DATASWIFT_USER }} - password: ${{ secrets.DOCKERHUB_DATASWIFT_PASS }} - - name: Container - Pull - run: docker pull ${{ env.REGISTRY }}:${{ github.sha }} - - - name: Container - Scan - create git issue - uses: dataswift/gha-trivy@main - id: trivy - with: - token: ${{ secrets.GITHUB_TOKEN }} - image: ${{ env.REGISTRY }}:${{ github.sha }} - issue: "true" - issue_label: trivy, vulnerability, security - issue_title: Trivy Security Alert - - - name: Jira Login - if: steps.trivy.outputs.issue_number != '' - uses: atlassian/gajira-login@master - env: - JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} - JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} - JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} - - - name: Create Jira ticket from Issue - id: jira - if: steps.trivy.outputs.issue_number != '' - uses: atlassian/gajira-create@master - with: - project: ${{ secrets.JIRA_TRIVY_PROJECT }} - issuetype: ${{ secrets.JIRA_TRIVY_ISSUE_TYPE }} - summary: Trivy Security Alert - ${{ github.repository }} - description: ${{steps.trivy.outputs.html_url}} + run: docker pull ${{ env.REGISTRY }} - - name: Container - Scan - Save Result - if: steps.trivy.outputs.issue_number != '' + - name: Container - Scan uses: aquasecurity/trivy-action@master with: - image-ref: "${{ env.REGISTRY }}:${{ github.sha }}" + image-ref: ${{ env.REGISTRY }} format: "template" template: "@/contrib/sarif.tpl" output: "trivy-results.sarif" severity: "CRITICAL,HIGH" - - name: Upload scan results - if: steps.trivy.outputs.issue_number != '' + - name: Container - Result uses: github/codeql-action/upload-sarif@v1 with: sarif_file: "trivy-results.sarif" diff --git a/README.md b/README.md index dbbcf1897..92f560cb2 100755 --- a/README.md +++ b/README.md @@ -129,8 +129,7 @@ Specifically, it has 4 major sections: ## Using docker-compose -We have put together a [docker-compose](https://docs.docker.com/compose/) file that will allow you to run a PostgreSQL -node and a HAT node easily. +We have put together a [docker-compose](https://docs.docker.com/compose/) file that will allow you to run a PostgreSQL node and a HAT node easily. ### Get the Source and the submodules @@ -142,6 +141,10 @@ node and a HAT node easily. > docker-compose up > open [https://bobtheplumber.example:9001](https://bobtheplumber.example:9001) +## Using Helm 3 + +The HAT solution is easy deployable on top of Kubernetes via [Helm 3 chart](charts). + ## Additional information - API documentation can be found at the [developers' portal](https://developers.hubofallthings.com) diff --git a/build.sbt b/build.sbt index 7223cbaf6..ac1edcf7a 100644 --- a/build.sbt +++ b/build.sbt @@ -1,4 +1,5 @@ -import Dependencies.Library +import Dependencies._ +import play.sbt.PlayImport import sbt.Keys._ val codeguruURI = @@ -15,49 +16,37 @@ lazy val hat = project .disablePlugins(PlayLogback) .settings( libraryDependencies ++= Seq( - Library.Play.ws, - filters, - ehcache, - Library.Play.cache, - Library.Play.playGuard, - Library.Play.json, - Library.Play.jsonJoda, - Library.Play.test, - Library.Play.Silhouette.passwordBcrypt, - Library.Play.Silhouette.persistence, - Library.Play.Silhouette.cryptoJca, - Library.Play.Silhouette.silhouette, - Library.Play.Jwt.atlassianJwtCore, - Library.Play.Jwt.bouncyCastlePkix, - Library.Backend.logPlay, - Library.Backend.redisCache, - Library.HATDeX.dexClient, - Library.HATDeX.codegen, - Library.Utils.awsJavaS3Sdk, - Library.Utils.awsJavaSesSdk, - Library.Utils.awsJavaLambdaSdk, - Library.Utils.prettyTime, - Library.Utils.nbvcxz, - Library.Utils.alpakkaAwsLambda, - Library.scalaGuice, - Library.circeConfig, - Library.ContractLibrary.adjudicator, - Library.Utils.apacheCommonLang, - Library.Prometheus.filters, - Library.janino + PlayImport.ehcache, + PlayImport.filters, + PlayImport.guice, + PlayImport.ws, + DsLib.Adjudicator, + DsLib.DexClient, + DsLib.PlayCommon, + DsLib.RedisCache, + DsLib.SilhouetteCryptoJca, + DsLib.SilhouettePasswordBcrypt, + DsLib.SilhouettePersistence, + DsLib.SlickPostgresDriver, + Lib.AwsV1Sdk, + Lib.BouncyCastle, + Lib.Ficus, + Lib.Guard, + Lib.Nbvcxz, + Lib.PlayJson, + Lib.PlayJsonJoda, + Lib.PlaySlick, + Lib.PlayTest, + Lib.PrometheusFilter, + Lib.ScalaGuice, + LocalThirdParty.AlpakkaAwsLambda, + LocalThirdParty.CirceConfig, + LocalThirdParty.PrettyTime, + DsLib.IntegrationTestCommon % Test, + DsLib.SilhouetteTestkit % Test, + Lib.ScalaTestScalaCheck % Test, + LocalThirdParty.ScalaTestplusMockito % Test ), - libraryDependencies := (buildEnv.value match { - case BuildEnv.Developement | BuildEnv.Test => - libraryDependencies.value ++ Seq( - Library.Play.Silhouette.silhouetteTestkit, - Library.ScalaTest.scalaplaytestmock, - Library.Dataswift.integrationTestCommon, - Library.ScalaTest.mockitoCore - ) - case BuildEnv.Stage | BuildEnv.Production => - libraryDependencies.value - }), - excludeDependencies := Seq(ExclusionRule("org.slf4j", "slf4j-nop")), Test / parallelExecution := false, Assets / pipelineStages := Seq(digest), Assets / sourceDirectory := baseDirectory.value / "app" / "org" / "hatdex" / "hat" / "phata" / "assets", @@ -102,7 +91,6 @@ lazy val hat = project // Enable the semantic DB for scalafix inThisBuild( List( - scalaVersion := "2.13.5", scalafixScalaBinaryVersion := "2.13", semanticdbEnabled := true, semanticdbVersion := scalafixSemanticdb.revision, diff --git a/charts/hat/Chart.yaml b/charts/hat/Chart.yaml index 6a63e3f9a..80c35c273 100644 --- a/charts/hat/Chart.yaml +++ b/charts/hat/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.9 +version: 0.1.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/charts/hat/templates/configmap.yaml b/charts/hat/templates/configmap.yaml index 0746049ac..82b9a5e78 100644 --- a/charts/hat/templates/configmap.yaml +++ b/charts/hat/templates/configmap.yaml @@ -3,6 +3,7 @@ kind: ConfigMap metadata: name: {{ include "hat.fullname" . }} labels: + app.kubernetes.io/component: app {{- include "hat.labels" . | nindent 4 }} data: {{- range $key, $val := .Values.env.config }} diff --git a/charts/hat/templates/deployment.yaml b/charts/hat/templates/deployment.yaml index e27adf778..49f5f718e 100644 --- a/charts/hat/templates/deployment.yaml +++ b/charts/hat/templates/deployment.yaml @@ -3,6 +3,7 @@ kind: Deployment metadata: name: {{ include "hat.fullname" . }} labels: + app.kubernetes.io/component: app {{- include "hat.labels" . | nindent 4 }} spec: {{- if not .Values.autoscaling.enabled }} @@ -10,6 +11,7 @@ spec: {{- end }} selector: matchLabels: + app.kubernetes.io/component: app {{- include "hat.selectorLabels" . | nindent 6 }} template: metadata: @@ -19,6 +21,7 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} labels: + app.kubernetes.io/component: app {{- include "hat.selectorLabels" . | nindent 8 }} spec: {{- with .Values.imagePullSecrets }} diff --git a/charts/hat/templates/hpa.yaml b/charts/hat/templates/hpa.yaml index 07e31fdf3..d4b03fa30 100644 --- a/charts/hat/templates/hpa.yaml +++ b/charts/hat/templates/hpa.yaml @@ -4,6 +4,7 @@ kind: HorizontalPodAutoscaler metadata: name: {{ include "hat.fullname" . }} labels: + app.kubernetes.io/component: app {{- include "hat.labels" . | nindent 4 }} spec: scaleTargetRef: diff --git a/charts/hat/templates/ingress.yaml b/charts/hat/templates/ingress.yaml index 006351b51..4ecb51e1e 100644 --- a/charts/hat/templates/ingress.yaml +++ b/charts/hat/templates/ingress.yaml @@ -10,6 +10,7 @@ kind: Ingress metadata: name: {{ $fullName }} labels: + app.kubernetes.io/component: app {{- include "hat.labels" . | nindent 4 }} {{- with .Values.ingress.annotations }} annotations: diff --git a/charts/hat/templates/prometheusrules.yaml b/charts/hat/templates/prometheusrules.yaml new file mode 100644 index 000000000..5c3276b0e --- /dev/null +++ b/charts/hat/templates/prometheusrules.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "hat.fullname" . }} +{{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace | quote }} +{{- else }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} + labels: + app.kubernetes.io/component: metrics + {{- include "hat.labels" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml .Values.metrics.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: +{{- if .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ include "hat.fullname" . }} + rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 4 }} +{{- end }} +{{- end }} diff --git a/charts/hat/templates/service.yaml b/charts/hat/templates/service-app.yaml similarity index 82% rename from charts/hat/templates/service.yaml rename to charts/hat/templates/service-app.yaml index a920ec071..562c43739 100644 --- a/charts/hat/templates/service.yaml +++ b/charts/hat/templates/service-app.yaml @@ -3,6 +3,7 @@ kind: Service metadata: name: {{ include "hat.fullname" . }} labels: + app.kubernetes.io/component: app {{- include "hat.labels" . | nindent 4 }} spec: type: {{ .Values.service.type }} @@ -12,4 +13,5 @@ spec: protocol: TCP name: http selector: + app.kubernetes.io/component: app {{- include "hat.selectorLabels" . | nindent 4 }} diff --git a/charts/hat/templates/service-metrics.yaml b/charts/hat/templates/service-metrics.yaml new file mode 100644 index 000000000..3588f0126 --- /dev/null +++ b/charts/hat/templates/service-metrics.yaml @@ -0,0 +1,25 @@ + +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "hat.fullname" . }}-metrics + namespace: {{ .Release.Namespace | quote }} + labels: + app.kubernetes.io/component: metrics + {{- include "hat.labels" . | nindent 4 }} + {{- with .Values.metrics.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - port: {{ .Values.metrics.service.port }} + targetPort: http + protocol: TCP + name: metrics + selector: + app.kubernetes.io/component: app + {{- include "hat.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/charts/hat/templates/serviceaccount.yaml b/charts/hat/templates/serviceaccount.yaml index 80c5f106b..96bcdb695 100644 --- a/charts/hat/templates/serviceaccount.yaml +++ b/charts/hat/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ kind: ServiceAccount metadata: name: {{ include "hat.serviceAccountName" . }} labels: + app.kubernetes.io/component: app {{- include "hat.labels" . | nindent 4 }} {{- with .Values.serviceAccount.annotations }} annotations: diff --git a/charts/hat/templates/servicemonitor.yaml b/charts/hat/templates/servicemonitor.yaml new file mode 100644 index 000000000..7400ce07d --- /dev/null +++ b/charts/hat/templates/servicemonitor.yaml @@ -0,0 +1,46 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "hat.fullname" . }} +{{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace | quote }} +{{- else }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} + labels: + {{- include "hat.labels" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + interval: {{ .Values.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{ toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} +{{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel | quote }} +{{- end }} +{{- if .Values.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.metrics.serviceMonitor.namespaceSelector | nindent 4 }} +{{ else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} +{{- if .Values.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- range .Values.metrics.serviceMonitor.targetLabels }} + - {{ . }} + {{- end }} +{{- end }} + selector: + matchLabels: + app.kubernetes.io/component: metrics + {{- include "hat.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/hat/values.yaml b/charts/hat/values.yaml index d92912f69..d126a546f 100644 --- a/charts/hat/values.yaml +++ b/charts/hat/values.yaml @@ -63,6 +63,72 @@ ingress: # hosts: # - chart-example.local +metrics: + enabled: true + + service: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9000" + + type: ClusterIP + port: 9000 + + serviceMonitor: + enabled: true + additionalLabels: {} + # The label to use to retrieve the job name from. + # jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + # Default: scrape .Release.Namespace only + # To scrape all, use the following: + # namespaceSelector: + # any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + metricRelabelings: [] + + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little diff --git a/deployment/README.md b/deployment/README.md deleted file mode 100644 index b7db251c9..000000000 --- a/deployment/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# WARNING: the current scripts in these directories are obsolete and will not work out of the box - -You can use them as an indication of what could work for a setup in the different environments - -Pull requests welcome. \ No newline at end of file diff --git a/deployment/docker/README.md b/deployment/docker/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/deployment/docker/archive/README.md b/deployment/docker/archive/README.md deleted file mode 100644 index 345909c96..000000000 --- a/deployment/docker/archive/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# HAT docker scripts - -Create the docker hat and hat-postgres images: - - deployment/docker/docker-build-images.sh - -Test both images (*WARNING that it stops and removes all hat docker images!*): - - deployment/docker/docker-test-images.sh - -Pushes both images to their respective docker hub repositories: - - deployment/docker/docker-push-images.sh - -## Testing docker images - -If you want to give it a test drive just execute: - - docker-test-images.sh - -It downloads the latest images from docker-hub so you dont need to build them yourself. - -It starts 3 hat and 3 hat-postgres images. All accessible on localhost with different ports. - -Find the corresponding port with: - - docker ps - -And finally test in your browser or better still - postman (you might need to set port, username and password accordingly): - - http://127.0.0.1:3003/users/access_token?username=junior@gmail.com&password=junior diff --git a/deployment/docker/archive/database.conf.template b/deployment/docker/archive/database.conf.template deleted file mode 100644 index 5d1aaf467..000000000 --- a/deployment/docker/archive/database.conf.template +++ /dev/null @@ -1,11 +0,0 @@ -devdb = { - dataSourceClass = "org.postgresql.ds.PGSimpleDataSource" - properties = { - databaseName = ${?POSTGRES_DB} - user = ${?POSTGRES_USER} - password = ${?POSTGRES_PASSWORD} - serverName = ${?POSTGRES_HOST} - portNumber = ${?POSTGRES_PORT} - } - numThreads = 3 -} \ No newline at end of file diff --git a/deployment/docker/archive/docker-build-images.sh b/deployment/docker/archive/docker-build-images.sh deleted file mode 100755 index e4dd15931..000000000 --- a/deployment/docker/archive/docker-build-images.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash - -HAT_HOME=${HAT_HOME:-"$PWD"} #if executing from deployment/ : "$PWD/../.." -DOCKER=${DOCKER:-"${HAT_HOME}/deployment/docker"} - -bash ${DOCKER}/hat/docker-build.sh -#bash ${DOCKER}/hat-postgres/docker-build.sh diff --git a/deployment/docker/archive/docker-push-images.sh b/deployment/docker/archive/docker-push-images.sh deleted file mode 100755 index dec22fa06..000000000 --- a/deployment/docker/archive/docker-push-images.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -docker push hubofallthings/hat-experimental -docker push hubofallthings/hat-postgres diff --git a/deployment/docker/archive/docker-test-images.sh b/deployment/docker/archive/docker-test-images.sh deleted file mode 100755 index 0e8d95842..000000000 --- a/deployment/docker/archive/docker-test-images.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash - -docker stop $(docker ps -a | grep "hubofallthings/hat" | awk '{print $1}') -docker rm $(docker ps -a | grep "hubofallthings/hat" | awk '{print $1}') - -port=3001 - -for name in jorge nichola junior; do - - docker run\ - -e "DATABASE=$name"\ - -e "DBUSER=$name"\ - -e "DBPASS=$name"\ - -e "POSTGRES_PASSWORD=$name"\ - -e "POSTGRES_USER=$name"\ - -e "POSTGRES_DB=$name"\ - -e "POSTGRES_HOST=hat-postgres-$name"\ - -e "HAT_OWNER=$name"\ - -e "HAT_OWNER_NAME=$name"\ - -e "HAT_OWNER_PASSWORD=$name"\ - -d --name hat-postgres-$name hubofallthings/hat-postgres - - docker run\ - -e "DATABASE=$name"\ - -e "DBUSER=$name"\ - -e "DBPASS=$name"\ - -e "POSTGRES_PASSWORD=$name"\ - -e "POSTGRES_USER=$name"\ - -e "POSTGRES_DB=$name"\ - -e "POSTGRES_HOST=hat-postgres-$name"\ - -e "HAT_NAME=$name"\ - -e "HAT_OWNER=$name"\ - -e "HAT_OWNER_NAME=$name"\ - -e "HAT_OWNER_PASSWORD=$name"\ - -d --name hat-$name --link hat-postgres-$name -p $port:8080 hubofallthings/hat - - echo -n "The hat-$name is linked to:" - docker inspect -f "{{ .HostConfig.Links }}" hat-$name - - echo -n "The hat-$name IP is:" - ip=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' hat-$name) - echo "$ip:$port" - #echo "The hat-postgres-$name IP is:" - #pg=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' hat-postgres-$name) - #echo $pg - - port=$((port+1)) -done - -echo "Running processes:" -docker ps diff --git a/deployment/docker/archive/hat-postgres/Dockerfile b/deployment/docker/archive/hat-postgres/Dockerfile deleted file mode 100644 index e36055e5f..000000000 --- a/deployment/docker/archive/hat-postgres/Dockerfile +++ /dev/null @@ -1,85 +0,0 @@ -# vim:set ft=dockerfile: -FROM debian:jessie -#FROM gliderlabs/alpine:3.1 - -# explicitly set user/group IDs -RUN groupadd -r postgres --gid=999 && useradd -r -g postgres --uid=999 postgres - -# grab gosu for easy step-down from root -RUN gpg --keyserver pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 -RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates wget && rm -rf /var/lib/apt/lists/* \ - && wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/1.2/gosu-$(dpkg --print-architecture)" \ - && wget -O /usr/local/bin/gosu.asc "https://github.com/tianon/gosu/releases/download/1.2/gosu-$(dpkg --print-architecture).asc" \ - && gpg --verify /usr/local/bin/gosu.asc \ - && rm /usr/local/bin/gosu.asc \ - && chmod +x /usr/local/bin/gosu \ - && apt-get purge -y --auto-remove ca-certificates wget - -# make the "en_US.UTF-8" locale so postgres will be utf-8 enabled by default -RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \ - && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 -ENV LANG en_US.utf8 - -RUN mkdir /docker-entrypoint-initdb.d - -RUN apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 - -ENV PG_MAJOR 9.4 -ENV PG_VERSION 9.4.9-0+deb8u1 - -RUN echo 'deb http://apt.postgresql.org/pub/repos/apt/ jessie-pgdg main' $PG_MAJOR > /etc/apt/sources.list.d/pgdg.list - -RUN apt-get update \ - && apt-get install -y postgresql-common \ - && sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf \ - && apt-get install -y \ - postgresql-$PG_MAJOR=$PG_VERSION \ - postgresql-contrib-$PG_MAJOR=$PG_VERSION \ - && rm -rf /var/lib/apt/lists/* - -RUN mkdir -p /var/run/postgresql && chown -R postgres /var/run/postgresql - -ENV PATH /usr/lib/postgresql/$PG_MAJOR/bin:$PATH -ENV PGDATA /var/lib/postgresql/data -VOLUME /var/lib/postgresql/data - -ADD init/docker-entrypoint.sh / - -ENTRYPOINT ["/docker-entrypoint.sh"] - -#EXPOSE 5432 -CMD ["postgres"] - -#------------------------HAT SPECIFIC------------------------------- - -#Setup environment variables used by docker-deploy-db.sh in docker -# Here just the default values get set, can be overriden by providing --env parameters -ENV HAT_HOME=/opt/hat -ENV DATABASE=hat20 -ENV DBUSER=hat20 -ENV DBPASS=hat20 - -ENV HAT_OWNER=bob@gmail.com -ENV HAT_OWNER_ID=5974832d-2dc1-4f49-adf1-c6d8bc790274 -ENV HAT_OWNER_NAME=bob -ENV HAT_OWNER_PASSWORD=pa55word - -ENV HAT_PLATFORM=hatdex.org -ENV HAT_PLATFORM_ID=47dffdfd-55e8-4575-836c-151e30bb5a50 -ENV HAT_PLATFORM_NAME=hatdex -ENV HAT_PLATFORM_PASSWORD_HASH='$2a$04$oL2CXTHzB..OekL1z8Vijus3RkHQjSsbkAYOiA5Rj.7.6GA7a4qAq' - -#Required by the postgres container (docker-entrypoint.sh) -#Also sets up authMethod=md5 -#Check : https://github.com/docker-library/postgres/blob/ed23320582f4ec5b0e5e35c99d98966dacbc6ed8/9.4/docker-entrypoint.sh -ENV POSTGRES_PASSWORD=$DBPASS -ENV POSTGRES_USER=$DBUSER -ENV POSTGRES_DB=$DBPASS - -#Get HAT sql files sql. -RUN mkdir -p /opt/hat -ADD required/ /opt/hat/ - -#This will initialize the database -ADD init/docker-deploy-db.sh /docker-entrypoint-initdb.d/ - diff --git a/deployment/docker/archive/hat-postgres/docker-build.sh b/deployment/docker/archive/hat-postgres/docker-build.sh deleted file mode 100755 index 3f4aa0a8a..000000000 --- a/deployment/docker/archive/hat-postgres/docker-build.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -set -e - -DATABASE=${DATABASE:-"hat20"} -DBUSER=${DBUSER:-$DATABASE} -DBPASS=${DBPASS:-"hat20"} -#tipically docker folder is in HAT2.0/deployment/docker -HAT_HOME=${HAT_HOME:-"$PWD"} #if executing from deployment/ : "$PWD/../.." -DOCKER=${DOCKER:-"$PWD/deployment/docker/hat-postgres"} -DOCKER_DEPLOY=${DOCKER}/docker-deploy - -echo "Creating $DOCKER_DEPLOY" -mkdir ${DOCKER_DEPLOY} -mkdir ${DOCKER_DEPLOY}/required - -echo "Copying required files" -cp -r ${HAT_HOME}/hat-database-schema/*.sql ${DOCKER_DEPLOY}/required/ -cp -r ${HAT_HOME}/hat-database-schema/*.sql.template ${DOCKER_DEPLOY}/required/ -cp -r ${HAT_HOME}/hat-database-schema/setupAccess.sh ${DOCKER_DEPLOY}/required/ - -cp -r ${DOCKER}/init ${DOCKER_DEPLOY}/init -cp ${DOCKER}/Dockerfile ${DOCKER_DEPLOY}/Dockerfile - -#echo "Building db docker image: docker-hat-postgres" -docker build -t hubofallthings/hat-postgres ${DOCKER_DEPLOY} - -echo "Cleaning up" -rm -r ${DOCKER_DEPLOY} diff --git a/deployment/docker/archive/hat-postgres/init/docker-deploy-db.sh b/deployment/docker/archive/hat-postgres/init/docker-deploy-db.sh deleted file mode 100644 index c81697550..000000000 --- a/deployment/docker/archive/hat-postgres/init/docker-deploy-db.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash - -#THIS SCRIPT SHOULD EXECUTE WITHIN THE DOCKER POSTGRES IMAGE! - -DATABASE=${DATABASE:-"hat20"} -DBUSER=${DBUSER:-$DATABASE} -DBPASS=${DBPASS:-"hat20"} -#In case we are not executing the deploy from the repo (e.g., in container) -HAT_HOME=${HAT_HOME:-"/opt/hat"} - -export POSTGRES_PASSWORD=$DBPASS -export POSTGRES_USER=$DBUSER -export POSTGRES_DB=$DATABASE -export PGUSER=postgres - -#DBUSER wouldnt have required permissions to drop/create public schema otherwise -echo "Setting up database" -psql ${DATABASE} < ${HAT_HOME}/01_init.sql - -echo "Setting up main schema" -psql ${DATABASE} -U$DBUSER < ${HAT_HOME}/11_hat.sql - -echo "Setting up evolutions (without running the evolutions engine)" -psql ${DATABASE} -U$DBUSER < ${HAT_HOME}/12_hatEvolutions.sql -psql ${DATABASE} -U$DBUSER < ${HAT_HOME}/13_liveEvolutions.sql - - -# Setup HAT access -echo "Setting up HAT access" -HAT_OWNER=${HAT_OWNER:-'bob@gmail.com'} -HAT_OWNER_ID=${HAT_OWNER_ID:-5974832d-2dc1-4f49-adf1-c6d8bc790274} -HAT_OWNER_NAME=${HAT_OWNER_NAME:-'Bob'} -HAT_OWNER_PASSWORD=${HAT_OWNER_PASSWORD:-'pa55w0rd'} - -HAT_PLATFORM=${HAT_PLATFORM:-'hatdex.org'} -HAT_PLATFORM_ID=${HAT_PLATFORM_ID:-47dffdfd-55e8-4575-836c-151e30bb5a50} -HAT_PLATFORM_NAME=${HAT_PLATFORM_NAME:-'hatdex'} -HAT_PLATFORM_PASSWORD_HASH=${HAT_PLATFORM_PASSWORD_HASH:-'$2a$04$oL2CXTHzB..OekL1z8Vijus3RkHQjSsbkAYOiA5Rj.7.6GA7a4qAq'} - -cd ${HAT_HOME} -bash ./setupAccess.sh - -# Execute the sql script -psql $DATABASE -U$DBUSER < ${HAT_HOME}/41_authentication.sql - -# Remove the sql file with sensitive credentials -rm ${HAT_HOME}/41_authentication.sql - -echo "Boilerplate setup" -psql $DATABASE -U$DBUSER < ${HAT_HOME}/31_properties.sql -psql $DATABASE -U$DBUSER < ${HAT_HOME}/32_relationships.sql -psql $DATABASE -U$DBUSER < ${HAT_HOME}/33_staticData.sql -psql $DATABASE -U$DBUSER < ${HAT_HOME}/35_sampleCollections.sql - -env diff --git a/deployment/docker/archive/hat-postgres/init/docker-entrypoint.sh b/deployment/docker/archive/hat-postgres/init/docker-entrypoint.sh deleted file mode 100755 index 87d7e3b87..000000000 --- a/deployment/docker/archive/hat-postgres/init/docker-entrypoint.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash -set -e - -set_listen_addresses() { - sedEscapedValue="$(echo "$1" | sed 's/[\/&]/\\&/g')" - sed -ri "s/^#?(listen_addresses\s*=\s*)\S+/\1'$sedEscapedValue'/" "$PGDATA/postgresql.conf" -} - -if [ "$1" = 'postgres' ]; then - mkdir -p "$PGDATA" - chown -R postgres "$PGDATA" - - chmod g+s /run/postgresql - chown -R postgres /run/postgresql - - # look specifically for PG_VERSION, as it is expected in the DB dir - if [ ! -s "$PGDATA/PG_VERSION" ]; then - gosu postgres initdb - - # check password first so we can output the warning before postgres - # messes it up - if [ "$POSTGRES_PASSWORD" ]; then - pass="PASSWORD '$POSTGRES_PASSWORD'" - authMethod=md5 - else - # The - option suppresses leading tabs but *not* spaces. :) - cat >&2 <<-'EOWARN' - **************************************************** - WARNING: No password has been set for the database. - This will allow anyone with access to the - Postgres port to access your database. In - Docker's default configuration, this is - effectively any other container on the same - system. - - Use "-e POSTGRES_PASSWORD=password" to set - it in "docker run". - **************************************************** - EOWARN - - pass= - authMethod=trust - fi - - { echo; echo "host all all 0.0.0.0/0 $authMethod"; } >> "$PGDATA/pg_hba.conf" - - # internal start of server in order to allow set-up using psql-client - # does not listen on TCP/IP and waits until start finishes - gosu postgres pg_ctl -D "$PGDATA" \ - -o "-c listen_addresses=''" \ - -w start - - : ${POSTGRES_USER:=postgres} - : ${POSTGRES_DB:=$POSTGRES_USER} - export POSTGRES_USER POSTGRES_DB - - if [ "$POSTGRES_DB" != 'postgres' ]; then - psql --username postgres <<-EOSQL - CREATE DATABASE "$POSTGRES_DB" ; - EOSQL - echo - fi - - if [ "$POSTGRES_USER" = 'postgres' ]; then - op='ALTER' - else - op='CREATE' - fi - - psql --username postgres <<-EOSQL - $op USER "$POSTGRES_USER" WITH SUPERUSER $pass ; - EOSQL - echo - - echo - for f in /docker-entrypoint-initdb.d/*; do - case "$f" in - *.sh) echo "$0: running $f"; . "$f" ;; - *.sql) echo "$0: running $f"; psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < "$f" && echo ;; - *) echo "$0: ignoring $f" ;; - esac - echo - done - - gosu postgres pg_ctl -D "$PGDATA" -m fast -w stop - set_listen_addresses '*' - - echo - echo 'PostgreSQL init process complete; ready for start up.' - echo - fi - - exec gosu postgres "$@" -fi - -exec "$@" diff --git a/deployment/docker/archive/hat/Dockerfile b/deployment/docker/archive/hat/Dockerfile deleted file mode 100644 index 078a07996..000000000 --- a/deployment/docker/archive/hat/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM openjdk:8-jre -WORKDIR /opt/docker -ADD opt /opt -COPY conf/keystore.jks /opt -RUN ["chown", "-R", "daemon:daemon", "."] -USER daemon -EXPOSE 8080 -ENV JAVA_OPTS="-Xmx500m -Xms100m" -ENTRYPOINT ["bin/hat", "-Dhttp.port=80", "-Dhttps.port=8080"] -CMD [] diff --git a/deployment/docker/archive/hat/docker-build.sh b/deployment/docker/archive/hat/docker-build.sh deleted file mode 100755 index 5e5c481c2..000000000 --- a/deployment/docker/archive/hat/docker-build.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash - -set -e - -#typically Docker HAT container folder is in HAT2.0/deployment/docker/hat -HAT_HOME=${HAT_HOME:-"$PWD/hat"} #if executing from deployment/ : "$PWD/../.." -DOCKER=${DOCKER:-"$PWD/deployment/docker/hat"} -DOCKER_DEPLOY=$DOCKER/docker-deploy -APP=${APPLICATION_NAME:-hat} - -echo "Creating $DOCKER_DEPLOY" -mkdir $DOCKER_DEPLOY - -echo "Building ${APP} : sbt docker:stage" -sbt "project hat" docker:stage - -if [ ! -f "$HAT_HOME/target/docker/Dockerfile" ]; then - echo "Missing $HAT_HOME/target/docker/Dockerfile" - echo "The docker-hat container was not created." - echo "Please run 'sbt docker:stage' on main folder and re-run this script to generate it." - exit -fi - -cp -r $HAT_HOME/target/docker/stage/opt $DOCKER_DEPLOY/ - -cp $DOCKER/Dockerfile $DOCKER_DEPLOY/Dockerfile - -echo "Building hat docker image: docker-hat hubofallthings/${APP}" -docker build -t hubofallthings/${APP} $DOCKER_DEPLOY - -echo "Cleaning up" -rm -r $DOCKER_DEPLOY diff --git a/deployment/docker/docker-compose.yml b/deployment/docker/docker-compose.yml deleted file mode 100644 index 0cc4e8f9b..000000000 --- a/deployment/docker/docker-compose.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: "3" - -services: - - database: - image: hubofallthings/hat-postgres:latest - expose: ["5432/tcp"] - networks: ["sandbox"] - environment: - - POSTGRES_USER=master - - POSTGRES_PASSWORD=pa55w0rd - - hat_server: - image: hubofallthings/hat:v2.6.6 - ports: ["9000:9000", "9001:9001"] - networks: ["sandbox"] - volumes: - - .:/code - depends_on: - - database - environment: - - DB_USER=master - - DB_PASSWORD=pa55w0rd - - DB_HOST=database - -networks: - sandbox: - driver: bridge diff --git a/deployment/docker/hat-postgres/001-create-db-user.sql b/deployment/docker/hat-postgres/001-create-db-user.sql deleted file mode 100644 index 18a665c36..000000000 --- a/deployment/docker/hat-postgres/001-create-db-user.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE DATABASE testhatdb1; -CREATE USER testhatdb1 WITH PASSWORD 'testing'; -GRANT CREATE ON DATABASE testhatdb1 TO testhatdb1; diff --git a/deployment/docker/hat-postgres/Dockerfile b/deployment/docker/hat-postgres/Dockerfile deleted file mode 100644 index 8bf2735b2..000000000 --- a/deployment/docker/hat-postgres/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM postgres:9.6-alpine - -COPY 001-create-db-user.sql /docker-entrypoint-initdb.d/001-create-db-user.sql diff --git a/deployment/docker/hat-postgres/docker-db-deploy.sh b/deployment/docker/hat-postgres/docker-db-deploy.sh deleted file mode 100755 index 5d4675025..000000000 --- a/deployment/docker/hat-postgres/docker-db-deploy.sh +++ /dev/null @@ -1,26 +0,0 @@ -##!/usr/bin/env bash -#set -e -# -#DATABASE=${DATABASE:-"hat20"} -#DBUSER=${DBUSER:-$DATABASE} -#DBPASS=${DBPASS:-"hat20"} -##tipically docker folder is in HAT2.0/deployment/docker -#HAT_HOME=${HAT_HOME:-"$PWD"} #if executing from deployment/ : "$PWD/../.." -#DOCKER=${DOCKER:-"$PWD/deployment/docker/hat-postgres"} -#DOCKER_DEPLOY=${DOCKER}/docker-deploy -# -#echo "Creating $DOCKER_DEPLOY" -#mkdir ${DOCKER_DEPLOY} -#mkdir ${DOCKER_DEPLOY}/required -# -#echo "Copying required files" -#cp -r ${HAT_HOME}/hat-database-schema/*.sql ${DOCKER_DEPLOY}/required/ -#cp -r ${HAT_HOME}/hat-database-schema/*.sql.template ${DOCKER_DEPLOY}/required/ -#cp -r ${HAT_HOME}/hat-database-schema/setupAccess.sh ${DOCKER_DEPLOY}/required/ -# -#cp -r ${DOCKER}/init ${DOCKER_DEPLOY}/init -#cp ${DOCKER}/Dockerfile ${DOCKER_DEPLOY}/Dockerfile - -echo "Building DB docker image: hat-postgres" -docker build -t hubofallthings/hat-postgres:latest . -docker push hubofallthings/hat-postgres:latest diff --git a/deployment/docker/hat/docker-public-deploy.sh b/deployment/docker/hat/docker-public-deploy.sh deleted file mode 100755 index cd9868a0d..000000000 --- a/deployment/docker/hat/docker-public-deploy.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -set -e - -REPOSITORY_NAME=${REPOSITORY_NAME:-hubofallthings} - -VERSION=${HAT_VERSION:-$(git log --format="%H" -n 1)} -APPLICATION_NAME="hat" - -echo "Build ${APPLICATION_NAME}:${VERSION}" -sbt "project ${APPLICATION_NAME}" docker:stage - -echo "Create package" -cd ${APPLICATION_NAME}/target/docker/stage -docker build -t ${REPOSITORY_NAME}/${APPLICATION_NAME}:${VERSION} . -docker push ${REPOSITORY_NAME}/${APPLICATION_NAME}:${VERSION} diff --git a/deployment/ecs/docker-aws-deploy.sh b/deployment/ecs/docker-aws-deploy.sh deleted file mode 100755 index 9c53f1cdd..000000000 --- a/deployment/ecs/docker-aws-deploy.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -set -e - -REPOSITORY_NAME=${REPOSITORY_NAME:-hubofallthings} - -VERSION=`git log --format="%H" -n 1` -APPLICATION_NAME="hat" - -echo "Build ${APPLICATION_NAME} ${VERSION}" -sbt "project ${APPLICATION_NAME}" docker:stage - -echo "Create package" -cd ${APPLICATION_NAME}/target/docker/stage -docker build -t ${REPOSITORY_NAME}/${APPLICATION_NAME}:${VERSION} . -cd - diff --git a/deployment/elasticBeanstalk/.ebextensions/00_nginx_https_rw.config b/deployment/elasticBeanstalk/.ebextensions/00_nginx_https_rw.config deleted file mode 100644 index 7bb4514ce..000000000 --- a/deployment/elasticBeanstalk/.ebextensions/00_nginx_https_rw.config +++ /dev/null @@ -1,34 +0,0 @@ -files: - "/tmp/45_nginx_https_rw.sh" : - owner: root - group: root - mode: "000644" - content: | - #! /bin/bash - echo "Starting redirect setup!" >> /var/log/customlog.out - logger -t nginx_rw "Setting up HTTP redirection" - - CONFIGURED=`grep -c "return 301 https" /etc/nginx/sites-enabled/elasticbeanstalk-nginx-docker-proxy.conf` - - logger -t nginx_rws "Already configured? $CONFIGURED" - - if [ $CONFIGURED = 0 ] - then - sed -i '/listen 80;/a \ if ($http_x_forwarded_proto = "http") { return 301 https://$host$request_uri; }\n' /etc/nginx/sites-enabled/elasticbeanstalk-nginx-docker-proxy.conf - logger -t nginx_rw "https rewrite rules added" - /etc/init.d/nginx reload - exit 0 - else - logger -t nginx_rw "https rewrite rules already set" - exit 0 - fi - -container_commands: - 00_appdeploy_rewrite_hook: - command: cp -v /tmp/45_nginx_https_rw.sh /opt/elasticbeanstalk/hooks/appdeploy/enact - 01_configdeploy_rewrite_hook: - command: cp -v /tmp/45_nginx_https_rw.sh /opt/elasticbeanstalk/hooks/configdeploy/enact - 02_rewrite_hook_perms: - command: chmod 755 /opt/elasticbeanstalk/hooks/appdeploy/enact/45_nginx_https_rw.sh /opt/elasticbeanstalk/hooks/configdeploy/enact/45_nginx_https_rw.sh - 03_rewrite_hook_ownership: - command: chown root:users /opt/elasticbeanstalk/hooks/appdeploy/enact/45_nginx_https_rw.sh /opt/elasticbeanstalk/hooks/configdeploy/enact/45_nginx_https_rw.sh \ No newline at end of file diff --git a/deployment/elasticBeanstalk/.ebextensions/01_https_certificate.config b/deployment/elasticBeanstalk/.ebextensions/01_https_certificate.config deleted file mode 100644 index 6455e7afc..000000000 --- a/deployment/elasticBeanstalk/.ebextensions/01_https_certificate.config +++ /dev/null @@ -1,12 +0,0 @@ -option_settings: - aws:elb:listener:443: - ListenerProtocol: HTTPS - SSLCertificateId: "arn:aws:acm:eu-west-1:974966015544:certificate/5c37c175-ba39-4668-bf80-6a4bff361593" - InstancePort: 80 - InstanceProtocol: HTTP - ListenerEnabled: true - aws:elb:listener:80: - ListenerProtocol: HTTP - InstancePort: 80 - InstanceProtocol: HTTP - ListenerEnabled: true diff --git a/deployment/elasticBeanstalk/Dockerrun.aws.json b/deployment/elasticBeanstalk/Dockerrun.aws.json deleted file mode 100644 index 6a50cc9fb..000000000 --- a/deployment/elasticBeanstalk/Dockerrun.aws.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "AWSEBDockerrunVersion": "1", - "Ports": [ - { - "ContainerPort": "9000" - } - ] -} \ No newline at end of file diff --git a/deployment/elasticBeanstalk/eb-deploy.sh b/deployment/elasticBeanstalk/eb-deploy.sh deleted file mode 100755 index 480dffffd..000000000 --- a/deployment/elasticBeanstalk/eb-deploy.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -set -e - -BUCKET=${AWS_BUCKET:-hat-cloud-formation-eu} -VERSION=`git log --format="%H" -n 1` -APPLICATION_NAME="hat" - -echo "Build version ${APPLICATION_NAME} ${VERSION}" -sbt "project hat" docker:stage - -echo "Create package" -cp -r deployment/elasticBeanstalk/Dockerrun.aws.json deployment/elasticBeanstalk/.ebextensions hat/target/docker/stage -cd hat/target/docker/stage -zip -q -r ${APPLICATION_NAME}-${VERSION}.zip * .ebextensions - -echo "Upload package" -aws s3 cp ${APPLICATION_NAME}-${VERSION}.zip s3://$BUCKET/apps/${APPLICATION_NAME}-${VERSION} - -echo "Cleanup" -rm ${APPLICATION_NAME}-${VERSION}.zip diff --git a/hat/conf/cache.conf b/hat/conf/cache.conf index 6474e52fd..ccac4d83d 100644 --- a/hat/conf/cache.conf +++ b/hat/conf/cache.conf @@ -13,6 +13,7 @@ akka { } serialization-bindings { "io.dataswift.models.hat.applications.HatApplication" = kryo + "io.dataswift.models.hat.applications.Application" = kryo "org.hatdex.hat.authentication.models.HatUser" = kryo "org.hatdex.hat.resourceManagement.models.HatSignup" = kryo "play.api.cache.SerializableResult" = kryo diff --git a/hat/conf/docker.conf b/hat/conf/docker.conf index d8852f653..7703e14a3 100644 --- a/hat/conf/docker.conf +++ b/hat/conf/docker.conf @@ -9,11 +9,9 @@ play { filters { enabled = [] - enabled += "play.filters.hosts.AllowedHostsFilter" enabled += "play.filters.cors.CORSFilter" enabled += "play.filters.gzip.GzipFilter" enabled += "org.hatdex.hat.utils.LoggingFilter" - enabled += "org.hatdex.hat.utils.TLSFilter" } } } diff --git a/hat/conf/play.conf b/hat/conf/play.conf index 78feac2e2..63f09c702 100644 --- a/hat/conf/play.conf +++ b/hat/conf/play.conf @@ -86,10 +86,8 @@ play { enabled = [ "com.github.stijndehaes.playprometheusfilters.filters.StatusAndRouteLatencyAndCounterFilter", "org.hatdex.hat.utils.LoggingFilter", - "play.filters.hosts.AllowedHostsFilter", "play.filters.cors.CORSFilter", - "play.filters.gzip.GzipFilter", - "org.hatdex.hat.utils.TLSFilter" + "play.filters.gzip.GzipFilter" ] } } diff --git a/project/BasicSettings.scala b/project/BasicSettings.scala index bfdc9ad39..10586322c 100644 --- a/project/BasicSettings.scala +++ b/project/BasicSettings.scala @@ -31,15 +31,6 @@ import sbt._ object BasicSettings extends AutoPlugin { override def trigger = allRequirements - object autoImport { - object BuildEnv extends Enumeration { - val Production, Stage, Test, Developement = Value - } - - val buildEnv = settingKey[BuildEnv.Value]("the current build environment") - } - import autoImport._ - override def projectSettings = Seq( organization := "org.hatdex", @@ -79,27 +70,6 @@ object BasicSettings extends AutoPlugin { // in Travis with `sudo: false`. // See https://github.com/sbt/sbt/issues/653 // and https://github.com/travis-ci/travis-ci/issues/3775 - javaOptions += "-Xmx1G", - buildEnv := { - sys.props - .get("env") - .orElse(sys.env.get("BUILD_ENV")) - .flatMap { - case "prod" => Some(BuildEnv.Production) - case "stage" => Some(BuildEnv.Stage) - case "test" => Some(BuildEnv.Test) - case "dev" => Some(BuildEnv.Developement) - case unknown => None - } - .getOrElse(BuildEnv.Developement) - }, - // give feed back - onLoadMessage := { - // depend on the old message as well - val defaultMessage = onLoadMessage.value - val env = buildEnv.value - s"""|$defaultMessage - |Running in build environment: $env""".stripMargin - } + javaOptions += "-Xmx1G" ) } diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 8b93ab66d..41adb8075 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -26,97 +26,44 @@ import sbt._ object Dependencies { - private object Version { - val Play: String = play.core.PlayVersion.current - val PlayJson = "2.9.2" - val Silhouette = "5.2.0" - val AtlassianJwt = "3.2.0" - val AwsSdk = "1.11.1003" - val AlpakkaAwsLambda = "1.1.2" - val CommonsLang3 = "3.11" - val BouncyCastle = "1.68" - val PlayPrometheusFilters = "0.6.1" - val PlayGuard = "2.5.0" - val PrettyTime = "5.0.0.Final" - val Nbvcxz = "1.5.0" - - val Adjudicator = "0.1.0-SNAPSHOT" - val DexClient = "3.2.2" - val DsBackend = "2.3.1" - val DsTestTools = "0.2.5" - } - val resolvers = Seq( "Atlassian" at "https://maven.atlassian.com/public/", - "HAT Library Artifacts Releases" at "https://s3-eu-west-1.amazonaws.com/library-artifacts-releases.hubofallthings.com", - "HAT Library Artifacts Snapshots" at "https://s3-eu-west-1.amazonaws.com/library-artifacts-snapshots.hubofallthings.com" + "HAT Library Artifacts Releases" at "https://s3-eu-west-1.amazonaws.com/library-artifacts-releases.hubofallthings.com" ) - object Library { - - object Play { - val ws = "com.typesafe.play" %% "play-ws" % Version.Play - val cache = "com.typesafe.play" %% "play-cache" % Version.Play - val test = "com.typesafe.play" %% "play-test" % Version.Play - val jdbc = "com.typesafe.play" %% "play-jdbc" % Version.Play - val json = "com.typesafe.play" %% "play-json" % Version.PlayJson - val jsonJoda = "com.typesafe.play" %% "play-json-joda" % Version.PlayJson - val playGuard = "com.digitaltangible" %% "play-guard" % Version.PlayGuard - - object Jwt { - val bouncyCastle = "org.bouncycastle" % "bcprov-jdk15on" % Version.BouncyCastle - val bouncyCastlePkix = "org.bouncycastle" % "bcpkix-jdk15on" % Version.BouncyCastle - val atlassianJwtCore = "com.atlassian.jwt" % "jwt-core" % Version.AtlassianJwt - } - - object Silhouette { - val passwordBcrypt = "com.mohiva" %% "dataswift-play-silhouette-password-bcrypt" % Version.Silhouette - val persistence = "com.mohiva" %% "dataswift-play-silhouette-persistence" % Version.Silhouette - val cryptoJca = "com.mohiva" %% "dataswift-play-silhouette-crypto-jca" % Version.Silhouette - val silhouette = "com.mohiva" %% "dataswift-play-silhouette" % Version.Silhouette - val silhouetteTestkit = "com.mohiva" %% "dataswift-play-silhouette-testkit" % Version.Silhouette % Test - } - } - - object Utils { - val awsJavaS3Sdk = "com.amazonaws" % "aws-java-sdk-s3" % Version.AwsSdk - val awsJavaSesSdk = "com.amazonaws" % "aws-java-sdk-ses" % Version.AwsSdk - val awsJavaLambdaSdk = "com.amazonaws" % "aws-java-sdk-lambda" % Version.AwsSdk - val prettyTime = "org.ocpsoft.prettytime" % "prettytime" % Version.PrettyTime - val nbvcxz = "me.gosimple" % "nbvcxz" % Version.Nbvcxz - val alpakkaAwsLambda = "com.lightbend.akka" %% "akka-stream-alpakka-awslambda" % Version.AlpakkaAwsLambda - val apacheCommonLang = "org.apache.commons" % "commons-lang3" % Version.CommonsLang3 + object DsLib { + private object Version { + val DsAdjudicator = "0.2.0" + val DsBackend = "2.5.0" + val DsDexClient = "3.3.1" + val DsSilhouette = "5.3.0" + val DsSlickPostgresDriver = "0.1.2" } - object Backend { - val logPlay = "io.dataswift" %% "log-play" % Version.DsBackend - val redisCache = "io.dataswift" %% "redis-cache" % Version.DsBackend - } - - object HATDeX { - val dexClient = "org.hatdex" %% "dex-client-scala" % Version.DexClient - val codegen = "org.hatdex" %% "slick-postgres-driver" % "0.1.2" - } - - val scalaGuice = "net.codingwell" %% "scala-guice" % "4.2.11" - val circeConfig = "io.circe" %% "circe-config" % "0.8.0" - val janino = "org.codehaus.janino" % "janino" % "3.1.3" - - object ContractLibrary { - val adjudicator = "io.dataswift" %% "adjudicatorlib" % Version.Adjudicator - } - - object Prometheus { - val filters = "io.github.jyllands-posten" %% "play-prometheus-filters" % Version.PlayPrometheusFilters - } + val Adjudicator = "io.dataswift" %% "adjudicatorlib" % Version.DsAdjudicator + val DexClient = "org.hatdex" %% "dex-client-scala" % Version.DsDexClient + val IntegrationTestCommon = "io.dataswift" %% "integration-test-common" % Version.DsBackend + val PlayCommon = "io.dataswift" %% "play-common" % Version.DsBackend + val RedisCache = "io.dataswift" %% "redis-cache" % Version.DsBackend + val SilhouetteCryptoJca = "com.mohiva" %% "dataswift-play-silhouette-crypto-jca" % Version.DsSilhouette + val SilhouettePasswordBcrypt = "com.mohiva" %% "dataswift-play-silhouette-password-bcrypt" % Version.DsSilhouette + val SilhouettePersistence = "com.mohiva" %% "dataswift-play-silhouette-persistence" % Version.DsSilhouette + val SilhouetteTestkit = "com.mohiva" %% "dataswift-play-silhouette-testkit" % Version.DsSilhouette + val SlickPostgresDriver = "org.hatdex" %% "slick-postgres-driver" % Version.DsSlickPostgresDriver + } - object ScalaTest { - val scalaplaytestmock = "org.scalatestplus" %% "mockito-3-4" % "3.2.7.0" % Test - val mockitoCore = "org.mockito" % "mockito-core" % "3.4.6" % Test - } + private object Version { + val AlpakkaAwsLambda = "1.1.2" + val CirceConfig = "0.8.0" + val PrettyTime = "5.0.0.Final" + val ScalaTestplusMockito = "3.2.9.0" + } - object Dataswift { - val integrationTestCommon = "io.dataswift" %% "integration-test-common" % Version.DsTestTools % Test - } + object LocalThirdParty { + val AlpakkaAwsLambda = "com.lightbend.akka" %% "akka-stream-alpakka-awslambda" % Version.AlpakkaAwsLambda + val CirceConfig = "io.circe" %% "circe-config" % Version.CirceConfig + val PrettyTime = "org.ocpsoft.prettytime" % "prettytime" % Version.PrettyTime + val ScalaTestplusMockito = "org.scalatestplus" %% "mockito-3-4" % Version.ScalaTestplusMockito } + } diff --git a/project/build.properties b/project/build.properties index 89c0bdc94..3b1d9b118 100644 --- a/project/build.properties +++ b/project/build.properties @@ -22,4 +22,4 @@ # 2 / 2017 # -sbt.version=1.5.2 +sbt.version=1.5.3 diff --git a/project/plugins.sbt b/project/plugins.sbt index 008e2394b..d4d6fb834 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1,25 +1,9 @@ resolvers += "HAT Library Artifacts Releases" at "https://s3-eu-west-1.amazonaws.com/library-artifacts-releases.hubofallthings.com" - -addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.8.1") -addSbtPlugin("com.typesafe.play" % "sbt-plugin" % "2.8.8") - -// Code Quality -addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.6.1") -addSbtPlugin("org.scoverage" % "sbt-coveralls" % "1.2.7") - -// web plugins -addSbtPlugin("com.typesafe.sbt" % "sbt-web" % "1.4.4") -addSbtPlugin("com.typesafe.sbt" % "sbt-digest" % "1.1.4") -addSbtPlugin("com.typesafe.sbt" % "sbt-gzip" % "1.0.2") -addSbtPlugin("org.irundaia.sbt" % "sbt-sassify" % "1.5.1") - -addSbtPlugin("org.hatdex" % "sbt-slick-postgres-generator" % "0.1.2") - -// run "sbt dependencyUpdates" to check maven for updates or "sbt ";dependencyUpdates; reload plugins; dependencyUpdates" for sbt plugins -addSbtPlugin("com.timushev.sbt" % "sbt-updates" % "0.5.2") - -// ScalaFMT, ScalaFIX and Tools Common -addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.2") -addSbtPlugin("ch.epfl.scala" % "sbt-scalafix" % "0.9.27") -addSbtPlugin("io.dataswift" % "sbt-scalatools-common" % "0.3.1") -addSbtPlugin("com.dwijnand" % "sbt-dynver" % "4.1.1") +addSbtPlugin("com.typesafe.sbt" % "sbt-digest" % "1.1.4") +addSbtPlugin("com.typesafe.sbt" % "sbt-gzip" % "1.0.2") +addSbtPlugin("com.typesafe.sbt" % "sbt-web" % "1.4.4") +addSbtPlugin("io.dataswift" % "sbt-scalatools-common" % "0.5.2") +addSbtPlugin("org.hatdex" % "sbt-slick-postgres-generator" % "0.1.2") +addSbtPlugin("org.irundaia.sbt" % "sbt-sassify" % "1.5.1") +addSbtPlugin("org.scoverage" % "sbt-coveralls" % "1.2.7") +addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.8.2")