diff --git a/handout/devops/kubernetes/index.html b/handout/devops/kubernetes/index.html index bea10b0..a89b876 100644 --- a/handout/devops/kubernetes/index.html +++ b/handout/devops/kubernetes/index.html @@ -2459,6 +2459,8 @@
Configuração de conexão do banco
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: postgres-configmap
- labels:
- app: postgres
-data:
- POSTGRES_HOST: postgres
- POSTGRES_DB: store
+configmap.yamlapiVersion: v1
+kind: ConfigMap
+metadata:
+ name: postgres-configmap
+ labels:
+ app: postgres
+data:
+ POSTGRES_HOST: postgres
+ POSTGRES_DB: store
-
Configuração de acesso ao banco
-credentials.yamlapiVersion: v1
-kind: Secret
-metadata:
- name: postgres-credentials
-data:
- POSTGRES_USER: c3RvcmU=
- POSTGRES_PASSWORD: c3RvcmU=
+credentials.yamlapiVersion: v1
+kind: Secret
+metadata:
+ name: postgres-credentials
+data:
+ POSTGRES_USER: c3RvcmU=
+ POSTGRES_PASSWORD: c3RvcmU=
-kubectl apply -f ./k8s/credentials.yaml
-kubectl get secrets
+
Use encode base64 para ofuscar a senha. Vide: Base64Encode.
Persistence Volume: espaço alocado no cluster
-pv.yamlapiVersion: v1
-kind: PersistentVolume
-metadata:
- name: postgres-volume
- labels:
- type: local
- app: postgres
-spec:
- storageClassName: manual
- capacity:
- storage: 10Gi
- accessModes:
- - ReadWriteMany
- hostPath:
- path: /data/postgresql
+pv.yamlapiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: postgres-volume
+ labels:
+ type: local
+ app: postgres
+spec:
+ storageClassName: manual
+ capacity:
+ storage: 10Gi
+ accessModes:
+ - ReadWriteMany
+ hostPath:
+ path: /data/postgresql
-
Persistence Volume Claim: espaço alocado do cluster para o pods.
-pvc.yamlapiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- name: postgres-volume-claim
- labels:
- app: postgres
-spec:
- storageClassName: manual
- accessModes:
- - ReadWriteMany
- resources:
- requests:
- storage: 10Gi
+pvc.yamlapiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: postgres-volume-claim
+ labels:
+ app: postgres
+spec:
+ storageClassName: manual
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 10Gi
-
-deployment.yamlapiVersion: apps/v1
-kind: Deployment
-metadata:
- name: postgres
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: postgres
- template:
- metadata:
- labels:
- app: postgres
- spec:
- containers:
- - name: postgres
- image: 'postgres:latest'
- imagePullPolicy: IfNotPresent
- ports:
- - containerPort: 5432
- env:
-
- - name: POSTGRES_DB
- valueFrom:
- configMapKeyRef:
- name: postgres-configmap
- key: POSTGRES_DB
-
- - name: POSTGRES_USER
- valueFrom:
- secretKeyRef:
- name: postgres-credentials
- key: POSTGRES_USER
-
- - name: POSTGRES_PASSWORD
- valueFrom:
- secretKeyRef:
- name: postgres-credentials
- key: POSTGRES_PASSWORD
-
- volumeMounts:
- - mountPath: /var/lib/postgresql/data
- name: postgresdata
- volumes:
- - name: postgresdata
- persistentVolumeClaim:
- claimName: postgres-volume-claim
+deployment.yamlapiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: postgres
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: postgres
+ template:
+ metadata:
+ labels:
+ app: postgres
+ spec:
+ containers:
+ - name: postgres
+ image: 'postgres:latest'
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 5432
+ env:
+
+ - name: POSTGRES_DB
+ valueFrom:
+ configMapKeyRef:
+ name: postgres-configmap
+ key: POSTGRES_DB
+
+ - name: POSTGRES_USER
+ valueFrom:
+ secretKeyRef:
+ name: postgres-credentials
+ key: POSTGRES_USER
+
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: postgres-credentials
+ key: POSTGRES_PASSWORD
+
+ volumeMounts:
+ - mountPath: /var/lib/postgresql/data
+ name: postgresdata
+ volumes:
+ - name: postgresdata
+ persistentVolumeClaim:
+ claimName: postgres-volume-claim
-
-service.yamlapiVersion: v1
-kind: Service
-metadata:
- name: postgres
- labels:
- app: postgres
-spec:
- type: ClusterIP
- ports:
- - port: 5432
- selector:
- app: postgres
+
-
Acessando o pod do Postgres:
-kubectl exec -it postgres-<pod-id> -- psql -h localhost -U store --password -p 5432 store
+
Redirecionando porta:
-
kubectl port-forward <pod> 5432:5432
+
Deploying the Discovery Microservice
discovery📁 store.discovery-resource
@@ -2655,55 +2657,55 @@ Deploying the Discovery Microservi
-
-
-
-configmap.yamlapiVersion: apps/v1
-kind: Deployment
+configmap.yamlapiVersion: v1
+kind: ConfigMap
metadata:
- name: discovery
+ name: discovery-configmap
labels:
app: discovery
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: discovery
- template:
- metadata:
- labels:
- app: discovery
- spec:
- containers:
- - name: discovery
- image: humbertosandmann/discovery:latest
- ports:
- - containerPort: 8761
+data:
+ DISCOVERY_HOST: discovery
-service.yamlapiVersion: v1
-kind: Service
+configmap.yamlapiVersion: apps/v1
+kind: Deployment
metadata:
name: discovery
labels:
app: discovery
spec:
- type: ClusterIP
- ports:
- - port: 8761
- targetPort: 8761
- protocol: TCP
- selector:
- app: discovery
+ replicas: 1
+ selector:
+ matchLabels:
+ app: discovery
+ template:
+ metadata:
+ labels:
+ app: discovery
+ spec:
+ containers:
+ - name: discovery
+ image: humbertosandmann/discovery:latest
+ ports:
+ - containerPort: 8761
+
+
+
@@ -2724,216 +2726,216 @@ Deploying a Microservice
-application.yamlserver:
- port: 8080
-
-spring:
- application:
- name: account
- datasource:
- url: jdbc:postgresql://${POSTGRES_HOST}:5432/${POSTGRES_DB}
- username: ${POSTGRES_USER:postgres}
- password: ${POSTGRES_PASSWORD:Post123321}
- driver-class-name: org.postgresql.Driver
- flyway:
- baseline-on-migrate: true
- schemas: account
- jpa:
- properties:
- hibernate:
- default_schema: account
-
-management:
- endpoints:
- web:
- base-path: /account/actuator
- exposure:
- include: [ 'prometheus' ]
-
-eureka:
- client:
- register-with-eureka: true
- fetch-registry: true
- service-url:
- defaultZone: http://${DISCOVERY_HOST}:8761/eureka/
+application.yamlserver:
+ port: 8080
+
+spring:
+ application:
+ name: account
+ datasource:
+ url: jdbc:postgresql://${POSTGRES_HOST}:5432/${POSTGRES_DB}
+ username: ${POSTGRES_USER:postgres}
+ password: ${POSTGRES_PASSWORD:Post123321}
+ driver-class-name: org.postgresql.Driver
+ flyway:
+ baseline-on-migrate: true
+ schemas: account
+ jpa:
+ properties:
+ hibernate:
+ default_schema: account
+
+management:
+ endpoints:
+ web:
+ base-path: /account/actuator
+ exposure:
+ include: [ 'prometheus' ]
+
+eureka:
+ client:
+ register-with-eureka: true
+ fetch-registry: true
+ service-url:
+ defaultZone: http://${DISCOVERY_HOST}:8761/eureka/
Subir no Git e rodar o Jenkins.
-deployment.yamlapiVersion: apps/v1
-kind: Deployment
-metadata:
- name: account
-spec:
- selector:
- matchLabels:
- app: account
- replicas: 1
- template:
- metadata:
- labels:
- app: account
- spec:
- containers:
- - name: account
- image: humbertosandmann/account:latest
- ports:
- - containerPort: 8080
- env:
-
- - name: DISCOVERY_HOST
- valueFrom:
- configMapKeyRef:
- name: discovery-configmap
- key: DISCOVERY_HOST
-
- - name: POSTGRES_HOST
- valueFrom:
- configMapKeyRef:
- name: postgres-configmap
- key: POSTGRES_HOST
-
- - name: POSTGRES_DB
- valueFrom:
- configMapKeyRef:
- name: postgres-configmap
- key: POSTGRES_DB
-
- - name: POSTGRES_USER
- valueFrom:
- secretKeyRef:
- name: postgres-credentials
- key: POSTGRES_USER
-
- - name: POSTGRES_PASSWORD
- valueFrom:
- secretKeyRef:
- name: postgres-credentials
- key: POSTGRES_PASSWORD
+deployment.yamlapiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: account
+spec:
+ selector:
+ matchLabels:
+ app: account
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: account
+ spec:
+ containers:
+ - name: account
+ image: humbertosandmann/account:latest
+ ports:
+ - containerPort: 8080
+ env:
+
+ - name: DISCOVERY_HOST
+ valueFrom:
+ configMapKeyRef:
+ name: discovery-configmap
+ key: DISCOVERY_HOST
+
+ - name: POSTGRES_HOST
+ valueFrom:
+ configMapKeyRef:
+ name: postgres-configmap
+ key: POSTGRES_HOST
+
+ - name: POSTGRES_DB
+ valueFrom:
+ configMapKeyRef:
+ name: postgres-configmap
+ key: POSTGRES_DB
+
+ - name: POSTGRES_USER
+ valueFrom:
+ secretKeyRef:
+ name: postgres-credentials
+ key: POSTGRES_USER
+
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: postgres-credentials
+ key: POSTGRES_PASSWORD
-service.yamlapiVersion: v1
-kind: Service
-metadata:
- name: account
- labels:
- name: account
-spec:
- type: NodePort
- ports:
- - port: 8080
- targetPort: 8080
- protocol: TCP
- selector:
- app: account
+
-
-kubectl apply -f k8s/deployment.yaml
-kubectl apply -f k8s/service.yaml
+
Deploying using Jenkins
Creating crendentials for Jenkins to K8s
Criar credentials no Kubernetes para que o Jenkins possa conectar.
-jenkins.yaml---
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: jenkins
- namespace: default
----
-
-kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: jenkins
- namespace: default
-rules:
-- apiGroups: [""]
- resources: ["pods","services"]
- verbs: ["create","delete","get","list","patch","update","watch"]
-- apiGroups: ["apps"]
- resources: ["deployments"]
- verbs: ["create","delete","get","list","patch","update","watch"]
-- apiGroups: [""]
- resources: ["pods/exec"]
- verbs: ["create","delete","get","list","patch","update","watch"]
-- apiGroups: [""]
- resources: ["pods/log"]
- verbs: ["get","list","watch"]
-- apiGroups: [""]
- resources: ["secrets"]
- verbs: ["get","create"]
-- apiGroups: [""]
- resources: ["configmaps"]
- verbs: ["create","get","update"]
-- apiGroups: [""]
- resources: ["persistentvolumeclaims"]
- verbs: ["create","delete","get","list","patch","update","watch"]
-
----
-apiVersion: v1
-kind: Secret
-metadata:
- name: jenkins-token
- annotations:
- kubernetes.io/service-account.name: jenkins
-type: kubernetes.io/service-account-token
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: jenkins
- namespace: default
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: jenkins
-subjects:
-- kind: ServiceAccount
- name: jenkins
----
-# Allows jenkins to create persistent volumes
-# This cluster role binding allows anyone in the "manager" group to read secrets in any namespace.
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: jenkins-crb
-subjects:
-- kind: ServiceAccount
- namespace: default
- name: jenkins
-roleRef:
- kind: ClusterRole
- name: jenkinsclusterrole
- apiGroup: rbac.authorization.k8s.io
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- # "namespace" omitted since ClusterRoles are not namespaced
- name: jenkinsclusterrole
-rules:
-- apiGroups: [""]
- resources: ["persistentvolumes"]
- verbs: ["create","delete","get","list","patch","update","watch"]
+jenkins.yaml---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: jenkins
+ namespace: default
+---
+
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: jenkins
+ namespace: default
+rules:
+- apiGroups: [""]
+ resources: ["pods","services"]
+ verbs: ["create","delete","get","list","patch","update","watch"]
+- apiGroups: ["apps"]
+ resources: ["deployments"]
+ verbs: ["create","delete","get","list","patch","update","watch"]
+- apiGroups: [""]
+ resources: ["pods/exec"]
+ verbs: ["create","delete","get","list","patch","update","watch"]
+- apiGroups: [""]
+ resources: ["pods/log"]
+ verbs: ["get","list","watch"]
+- apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get","create"]
+- apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["create","get","update"]
+- apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["create","delete","get","list","patch","update","watch"]
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: jenkins-token
+ annotations:
+ kubernetes.io/service-account.name: jenkins
+type: kubernetes.io/service-account-token
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: jenkins
+ namespace: default
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: jenkins
+subjects:
+- kind: ServiceAccount
+ name: jenkins
+---
+# Allows jenkins to create persistent volumes
+# This cluster role binding allows anyone in the "manager" group to read secrets in any namespace.
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: jenkins-crb
+subjects:
+- kind: ServiceAccount
+ namespace: default
+ name: jenkins
+roleRef:
+ kind: ClusterRole
+ name: jenkinsclusterrole
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ # "namespace" omitted since ClusterRoles are not namespaced
+ name: jenkinsclusterrole
+rules:
+- apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["create","delete","get","list","patch","update","watch"]
Executar a declaração:
-
kubectl apply -f jenkins.yaml
+
Recovering the Jenkins' Token
-kubectl get secrets
+
kubectl get secretsNAME TYPE DATA AGE
jenkins-token kubernetes.io/service-account-token 3 21s
Abrindo o objeto com o token.
-kubectl describe secrets/jenkins-token
+
kubectl describe secrets/jenkins-tokenName: jenkins-token
Namespace: default
Labels: <none>
Annotations: kubernetes.io/service-account.name: jenkins
kubernetes.io/service-account.uid: 0d06d343-fd34-4aff-8396-5dfec5a9e5b6
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1111 bytes
namespace: 7 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IklqTkZXdEVKcW1iclBrNHBnQzJSX1F6QjFIWDFMX0FvNGV
kNGd2aWFKd00ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ
2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZW
NyZXQubmFtZSI6ImplbmtpbnMtdG9rZW4iLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtY
WNjb3VudC5uYW1lIjoiamVua2lucyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2Nv
dW50LnVpZCI6IjBkMDZkMzQzLWZkMzQtNGFmZi04Mzk2LTVkZmVjNWE5ZTViNiIsInN1YiI6InN5c3RlbTpzZXJ
2aWNlYWNjb3VudDpkZWZhdWx0OmplbmtpbnMifQ.XkwD5vwC7CJNDv44PxCAIpLEfVlQbLE6VDNmTOEpFkaoe_x
4ehU8QS8fnTgUz0a_vjUKuXum-PD2vF8Fx_WBsWVAG8BNhXJv79MMbEe7axYT7W91fjsnT0rMqSqzajNjTTDFvP
DQu0KkLzC-UUnlG3RdNHhzxGVnUIA9lIeJuVKnlCXAexPQr6HeX5ggbe-CZO_uMjFZjwBnjLC-IJsIKKaz8I4Cb
Fxz10vAl5SpJ7PadA1iZZEvr_VYhhG42qMqRFLzkrXtWUG0NX8aSitJT0Wk9c54ME13WDZb6MfRXwUWbARu-TLN
56KrPaqtL2dBtRG2EFOn5nVXARI7jPzhjg
@@ -2950,16 +2952,16 @@ Set up the credential to Jenkins
Updating the Jenkinsfile
Adding the Deploy on k8s
stage:
-Jenkinsfile...
- stage('Deploy on local k8s') {
- steps {
- withCredentials([ string(credentialsId: 'minikube-credentials', variable: 'api_token') ]) {
- sh 'kubectl --token $api_token --server https://host.docker.internal:55529 --insecure-skip-tls-verify=true apply -f ./k8s/deployment.yaml '
- sh 'kubectl --token $api_token --server https://host.docker.internal:55529 --insecure-skip-tls-verify=true apply -f ./k8s/service.yaml '
- }
- }
- }
-...
+Jenkinsfile...
+ stage('Deploy on local k8s') {
+ steps {
+ withCredentials([ string(credentialsId: 'minikube-credentials', variable: 'api_token') ]) {
+ sh 'kubectl --token $api_token --server https://host.docker.internal:55529 --insecure-skip-tls-verify=true apply -f ./k8s/deployment.yaml '
+ sh 'kubectl --token $api_token --server https://host.docker.internal:55529 --insecure-skip-tls-verify=true apply -f ./k8s/service.yaml '
+ }
+ }
+ }
+...
kubectl config
@@ -2991,7 +2993,7 @@ References:
- June 17, 2024
+ June 19, 2024
diff --git a/search/search_index.json b/search/search_index.json
index 67cf7c4..e9e7c9b 100644
--- a/search/search_index.json
+++ b/search/search_index.json
@@ -1 +1 @@
-{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Plataformas, Microsservi\u00e7os e APIs","text":"Info Carga Hor\u00e1ria: 80
Semestre: 5\u00ba per\u00edodo
"},{"location":"#ementa","title":"Ementa","text":"Conceitos de Aplica\u00e7\u00f5es em Mon\u00f3l\u00edto e Microservi\u00e7os; Conteineriza\u00e7\u00e3o; Padr\u00f5es de Constru\u00e7\u00e3o de Softwares (Design Patterns); Monitoramento e Rastreabilidade de Aplica\u00e7\u00f5es; Seguran\u00e7a (autentica\u00e7\u00e3o e autoriza\u00e7\u00e3o); Mensageria; Dados Distribu\u00eddos; Orquestra\u00e7\u00e3o de computa\u00e7\u00e3o em nuvem, sistemas de gerenciamento, monitoramento e configura\u00e7\u00e3o de recursos virtualizados; Integra\u00e7\u00e3o entre Desenvolvimento e Opera\u00e7\u00e3o; Utiliza\u00e7\u00e3o de Plataformas em Nuvem para Produ\u00e7\u00e3o (Cloud Computing); Aspectos de automa\u00e7\u00e3o de gest\u00e3o de sistema em cloud \u2013 DevOps. Serverless computing \u2013 FaaS - function as a service; Utiliza\u00e7\u00e3o da Plataforma como Produto para Neg\u00f3cios: Infraestrutura como Servi\u00e7o (IaaS), Plataforma como Servi\u00e7o (PaaS) e Software como Servi\u00e7o (SaaS). Gest\u00e3o de n\u00edveis de servi\u00e7o (SLA - Service Level Agreement). Custos de projeto e de opera\u00e7\u00e3o de sistemas em cloud.
"},{"location":"#objetivos","title":"Objetivos","text":"Ao final da disciplina o aluno ser\u00e1 capaz de:
- Tomar decis\u00f5es a respeito da escolha de estrat\u00e9gias de arquiteturas para o emprego de problemas computacionais;
- Implementar e interconectar aplica\u00e7\u00f5es computacionais para a constru\u00e7\u00e3o de plataformas de alto desempenho: escalabilidade por meio do uso de t\u00e9cnicas de computa\u00e7\u00e3o em nuvem;
- Administrar um sistema de gerenciamento de nuvem, provisionando a infraestrutura necess\u00e1ria como um servi\u00e7o;
- Construir, com o aux\u00edlio de frameworks, solu\u00e7\u00f5es de plataformas completas e integradas de forma profissional;
- Arquitetar e implementar linhas de produ\u00e7\u00f5es de softwares robustos (CI/CD);
- Analisar, projetar e especificar uma solu\u00e7\u00e3o de computa\u00e7\u00e3o em nuvem mista baseada em hardware, software e redes para atender aos requisitos de determinado pacto de n\u00edvel de servi\u00e7o (SLA);
- Planejar e analisar o uso de plataformas empresariais como subs\u00eddio para cria\u00e7\u00e3o de novos neg\u00f3cios (PaaS).
"},{"location":"#conteudo-programatico","title":"Conte\u00fado Program\u00e1tico","text":" - Conceitos de Arquitetura e Microsservi\u00e7os;
- Microsservi\u00e7os com Interface API - RESTful;
- Introdu\u00e7\u00e3o a Cont\u00eaineres;
- Introdu\u00e7\u00e3o e Implementa\u00e7\u00e3o de Design Patterns;
- Apresenta\u00e7\u00e3o de Design Patterns mais Complexos: Seguran\u00e7a, Mensageria, Cache, etc;
- Fundamentos de Computa\u00e7\u00e3o em Nuvem.
- Orquestra\u00e7\u00e3o, Implementa\u00e7\u00e3o e Monitoramento de Ambientes Virtualizados e Distribu\u00eddos;
- Infraestrutura como um Servi\u00e7o.
- Redes Definidas por Software;
- Software como um Servi\u00e7o;
- Gest\u00e3o de N\u00edveis de Servi\u00e7o.
"},{"location":"#bibliografia-basica","title":"Bibliografia B\u00e1sica","text":"Livros:
-
ROMAN, Ed; AMBLER, Scott W.; JEWELL, Tyler. Dominando Enterprise Javabeans. Porto Alegre: Bookman, 2004. E-book. ISBN 9788577804061. Dispon\u00edvel em: https://integrada.minhabiblioteca.com.br/#/books/9788577804061. Acesso em: 30 de maio de 2023.
-
ALVES, William Pereira. Java para Web - Desenvolvimento de Aplica\u00e7\u00f5es. S\u00e3o Paulo: \u00c9rica, 2015. E-book. ISBN 9788536519357. Dispon\u00edvel em: https://integrada.minhabiblioteca.com.br/#/books/9788536519357. Acesso em: 30 de maio de 2023.
-
FREEMAN, Emily. DevOps Para Leigos. Rio de Janeiro: Editora Alta Books, 2021. E-book. ISBN 9788550816661. Dispon\u00edvel em: https://integrada.minhabiblioteca.com.br/#/books/9788550816661. Acesso em: 30 de maio de 2023.
"},{"location":"#bibliografia-complementar","title":"Bibliografia Complementar","text":"Livros:
-
XU, A., System Design Interview - An insider's guide, 1\u00aa ed., Independently Published, 2020.
-
MARTIN, R. C., Arquitetura Limpa: o guia do artes\u00e3o para estrutura e design de software, 1\u00aa ed., Alta Books, 2018.
-
PARKER, G. G.; VAN ALSTYNE, M. W.; CHOUDARY, S. P., Plataforma: a revolu\u00e7\u00e3o da estrat\u00e9gia, 1\u00aa ed., Alta Books, 2018.
-
SEHGAL, N. K.; BHATT, P. C. P.; ACKEN J. M., Cloud Computing with Security and Scalability.: Concepts and Practices, 3\u00aa ed., Springer, 2023.
-
KRIEF, M., Learning DevOps: A comprehensive guide to accelerating DevOps culture adoption with Terraform, Azure DevOps, Kubernetes, and Jenkins, 2\u00aa ed., Packt Publishing, 2022.
-
GAMMA, E.; HELM, R.; JOHNSON, R., VLISSIDES, J., Design Patterns: Elements of Reusable Object-Oriented Software, 1\u00aa ed., Addison-Wesley Professional, 1994.
-
SANTANA, E. F. Z., Back-end Java: Microsservi\u00e7os, Spring Boot e Kubernetes, Casa do C\u00f3digo, 2021. Material.
-
SANTANA, E. F. Z., Apache Kafka e Spring Boot: Comunica\u00e7\u00e3o ass\u00edncrona entre microsservi\u00e7os, Casa do C\u00f3digo, 2022. Material.
Artigos:
-
XU, A. et al.. ByteByteGo - System Design 101. Dispon\u00edvel em: https://github.com/ByteByteGoHq/system-design-101. Acesso em: 19 dezembro 2023.
-
Spring. Spring Cloud. Dispon\u00edvel em: https://spring.io/projects/spring-cloud. Acesso em: 19 dezembro 2023.
-
CHOI, K., Software Engineering Blogs. Dispon\u00edvel em: https://github.com/kilimchoi/engineering-blogs. Acesso em: 20 dezembro 2023.
-
Ghemawat, S. et al.. Towards Modern Development of Cloud Applications. Proceedings of the 19th Workshop on Hot Topics in Operating Systems, 2023 - p. 110-117. Association for Computing Machinery, Providence, RI, USA. Dispon\u00edvel em: doi:10.1145/3593856.3595909. Acesso em: 05 fevereiro de 2024.
"},{"location":"disclaimer/","title":"Disclaimer","text":""},{"location":"disclaimer/#contributors","title":"Contributors","text":"Name Humberto Sandmann Fabio Roberto de Miranda Raul Ikeda Maciel Calebe Vidal Eduardo Felipe Zambom Santana"},{"location":"disclaimer/#source","title":"Source","text":"Circa of 70% of the whole conceptual texts were generated by ChatGPT nonetheless all of them were revised by the editor. The sections of handout was produced by the contributors.
"},{"location":"api/documentation/","title":"Documentation","text":""},{"location":"api/documentation/#swagger","title":"Swagger","text":"mavengradle <dependency>\n</dependency>\n
\n
```javascript I'm A tab console.log('Code Tab A');
```javascript I'm tab B\nconsole.log('Code Tab B');\n
CC++ #include <stdio.h>\n\nint main(void) {\n printf(\"Hello world!\\n\");\n return 0;\n}\n
#include <iostream>\n\nint main(void) {\n std::cout << \"Hello world!\" << std::endl;\n return 0;\n}\n
https://www.baeldung.com/spring-rest-openapi-documentation
"},{"location":"api/spring-boot-cloud/","title":"Spring Boot Cloud","text":"Containering:
https://spring.io/projects/spring-cloud/
https://github.com/spring-cloud/spring-cloud-release/wiki/Supported-Versions
"},{"location":"appendix/ohmyzsh/","title":"Oh My Zsh","text":"Install:
sudo apt install zsh\nchsh -s $(which zsh)\nsh -c \"$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)\"\n
Plugins:
git clone https://github.com/zsh-users/zsh-syntax-highlighting ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting\ngit clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions\ngit clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf && ~/.fzf/install\n
Edit the file ~/.zshrc
at home's folder:
nano ~/.zshrc\n
~/.zshrcZSH_THEME=\"afowler\"\nplugins=(\n git\n zsh-syntax-highlighting\n zsh-autosuggestions\n fzf\n)\n
Reference:
- Oh My Zsh
"},{"location":"appendix/others/","title":"Others","text":" Clojure and Datomic Studies with Docker and Kafka by Pelichero, F.
Inside a Google data center
"},{"location":"appendix/rest-vs-graphql/","title":"REST vs GraphQL","text":"Source: System Design 101 - REST API vs. GraphQL -
XU, A., System Design 101.\u00a0\u21a9
-
REST.\u00a0\u21a9
-
GraphQL.\u00a0\u21a9
"},{"location":"appendix/rsa/","title":"RSA Algorithm","text":""},{"location":"appendix/rsa/#pkcs","title":"PKCS","text":""},{"location":"appendix/rsa/#the-rsa-encryption-algorithm","title":"The RSA Encryption Algorithm","text":" -
The RSA Encryption Algorithm (1 of 2: Generating the Keys)
-
The RSA Encryption Algorithm (2 of 2: Generating the Keys)
"},{"location":"appendix/tls-for-microservices/","title":"TLS for microservices","text":"The HTTPS-Only Standard
Let's Encrypt
"},{"location":"appendix/versioning-rest-apis/","title":"Versioning REST API","text":" -
Jacky, Versioning RESTful APIs with Spring Boot: A Step-by-Step Guide in 5 minutes.\u00a0\u21a9
"},{"location":"cloud/gitactions/","title":"GitActions","text":""},{"location":"cloud/gitactions/#github-actions","title":"GitHub Actions","text":"GitHub Actions is a feature of GitHub that allows you to automate, customize, and execute your software development workflows right in your repository.
With GitHub Actions, you can build, test, and deploy your code directly from GitHub. It provides world-class support for Continuous Integration/Continuous Deployment (CI/CD).
In addition, GitHub Actions allows you to automate other aspects of your development workflow such as assigning code reviews, managing branches, and triaging issues.
"},{"location":"cloud/terraform/","title":"Terraform","text":""},{"location":"cloud/terraform/#infrastructure-as-code-iac","title":"Infrastructure as Code (IaC)","text":"Infrastructure as Code (IaC) is a method of managing and provisioning computing infrastructure through machine-readable definition files, rather than physical hardware configuration or interactive configuration tools.
The IT infrastructure managed by this comprises both physical equipment, such as bare-metal servers, as well as virtual machines, and associated configuration resources. The definitions may be in a version control system. It can use either scripts or declarative definitions, rather than manual processes, but the term is more often used to promote declarative approaches.
"},{"location":"cloud/terraform/#pros","title":"Pros","text":" - Automatization of creation of an infrastructure;
- Standardization of platforms;
- Replication of infrastructure.
|- .github\n| |- workflows\n|- s3-bucket-static\n |- main.tf\n
main.tfprovider \"aws\" {\n region = \"us-east-1\"\n}\n\nvariable \"bucket_name\" {\n type = string\n}\n\nresource \"aws_s3_bucket\" \"static_site_bucket\" {\n bucket = \"static-site-${var.bucket_name}\"\n\n website {\n index_document = \"index.html\"\n error_document = \"404.html\n }\n\n tags = {\n Name = \"Static Site Bucket\"\n Environment = \"Production\"\n }\n}\n\nresource \"aws_s3_public_access_block\" \"static_site_bucket\" {\n bucket aws_s3_bucket.static_site_bucket.id\n\n block_public_acls = false\n block_public_policy = false\n ignore_public_acls = false\n restrict_public_buckets = false\n}\n
"},{"location":"cloud/terraform/#alternatives","title":"Alternatives","text":" - AWS CloudFormation
- Ansible
- Vagrant
"},{"location":"cloud/terraform/#additional-material","title":"Additional Material","text":" -
Criando Infra na AWS com Terraform (IaC) by Fernanda Kipper
"},{"location":"devops/concepts/","title":"Concepts","text":"DevOps is a set of practices that combines software development (Dev) and IT operations (Ops). It aims to shorten the system development life cycle and provide continuous delivery with high software quality. DevOps is complementary with Agile software development; several DevOps aspects came from Agile methodology.
Key concepts of DevOps include:
- Continuous Integration (CI): Developers regularly merge their code changes into a central repository, after which automated builds and tests are run.
- Continuous Delivery (CD): The combined practices of continuous integration and automated testing allow for the continuous delivery of code changes to a staging or production system.
- Infrastructure as Code (IaC): Infrastructure is defined and managed using code and software development techniques, such as version control and continuous integration.
- Monitoring and Logging: Keeping track of how applications and systems are performing in real-time to understand ongoing IT infrastructure status.
- Communication and Collaboration: Increased communication and collaboration in an organization is one of the key cultural aspects of DevOps. The use of DevOps tooling and automation of the software delivery process tends to increase collaboration between the teams.
Source: Wikipedia - Devops"},{"location":"devops/concepts/#cicd","title":"CI/CD","text":""},{"location":"devops/concepts/#pipeline","title":"Pipeline","text":""},{"location":"devops/concepts/#service-level-agreement-sla","title":"Service-level agreement - SLA","text":"Service-level agreement, well-known as SLA, is
"},{"location":"devops/concepts/#other-approaches","title":"Other Approaches","text":""},{"location":"devops/concepts/#noops","title":"NoOps","text":"NoOps, short for \"No Operations\", is a concept in software development where the software is designed in such a way that it requires minimal or even no IT operations support. This is often achieved through the use of fully automated processes and systems, which eliminate the need for manual intervention in tasks such as deployment, scaling, and systems management.
The goal of NoOps is to allow the software developers to focus on writing new features for the application, rather than spending time on operational concerns. This is often achieved through the use of Platform as a Service (PaaS) providers, which handle many of the operational tasks automatically.
https://www.jenkins.io/doc/tutorials/build-a-java-app-with-maven/
Jenkins
Install plugins: - Blue Ocean - Docker - Docker Pipeline - Kubernetes Cli
https://www.jenkins.io/doc/tutorials/build-a-java-app-with-maven/
https://www.jenkins.io/blog/2017/02/07/declarative-maven-project/
-
Wiki Service-level Agreement \u21a9
"},{"location":"devops/docker/","title":"Docker","text":"How to avoid the classical..
?
The answer is: CONTAINERIZATION.
Docker is a platform and tool that enables developers to automate the deployment of applications inside lightweight, portable containers. Containers are a form of virtualization that packages an application and its dependencies together, ensuring consistency across different environments, from development to testing and production.
Here are some key concepts and components of Docker:
- Containerization: Containers are lightweight, standalone, and executable packages that include everything needed to run a piece of software, including the code, runtime, libraries, and system tools. Containers isolate applications from their environment, making them portable and consistent across various systems.
- Docker Engine: This is the core component of Docker. It is a lightweight and portable runtime that can run containers on various operating systems, including Linux and Windows. The Docker Engine consists of a server, a REST API, and a command-line interface.
- Docker Image: An image is a lightweight, standalone, and executable package that includes everything needed to run a piece of software, including the code, a runtime, libraries, environment variables, and config files. Images are used to create containers.
- Dockerfile: A Dockerfile is a text file that contains instructions for building a Docker image. It specifies the base image, sets up the environment, installs dependencies, and configures the application.
- Registry: Docker images can be stored in registries, which are repositories for sharing and distributing container images. Docker Hub is a popular public registry, and organizations often use private registries to store and manage their proprietary images.
- Container Orchestration: Docker can be used in conjunction with container orchestration tools like Kubernetes or Docker Swarm to manage the deployment, scaling, and orchestration of containerized applications in production environments.
- Portability: One of Docker's key advantages is its portability. Since containers encapsulate everything an application needs to run, they can run consistently across different environments, reducing the \"it works on my machine\" problem often encountered in software development.
Docker has become a widely adopted technology in the software development and deployment space due to its ease of use, portability, and the efficiency it brings to the development and deployment lifecycle. It has revolutionized the way applications are packaged, shipped, and deployed, making it easier for developers to build, test, and deploy applications in a more reliable and consistent manner.
"},{"location":"devops/docker/#differences-between-docker-and-virtual-machines","title":"Differences between Docker and Virtual Machines","text":"Docker containers and virtual machines (VMs) are both technologies used for virtualization, but they operate at different levels and have distinct characteristics. Here are the key differences between Docker containers and virtual machines:
Aspect Docker Containers Virtual Machines Architecture Containers share the host operating system's kernel and isolate the application processes from each other. Each container runs in its own user space but uses the host's kernel. VMs, on the other hand, run a complete operating system, including its own kernel, on top of a hypervisor. Each VM is essentially a full-fledged virtualized computer with its own resources. Resource Efficiency Containers are more lightweight and share the host OS kernel, which makes them more resource-efficient compared to VMs. Containers can start up quickly and consume fewer system resources. VMs have more overhead because each VM requires a full operating system and has its own kernel. This makes VMs less resource-efficient than containers. Isolation Containers provide process-level isolation, meaning that each container runs in its own process space, but they share the same OS kernel. This isolation is generally sufficient for most applications. VMs provide stronger isolation since each VM runs its own operating system and has its own kernel. This makes VMs a better choice in situations where strong isolation is a critical requirement. Portability Containers are highly portable because they encapsulate the application and its dependencies, ensuring consistency across different environments. VMs are less portable due to the larger size and complexity associated with bundling a full operating system with the application. Startup Time Containers can start up very quickly, typically in seconds, making them well-suited for microservices architectures and dynamic scaling. VMs generally have longer startup times, often measured in minutes, due to the time required to boot a full operating system. Resource Utilization Containers share the host OS resources, which can lead to higher density and more efficient resource utilization. VMs have a higher resource overhead because each VM requires its own set of resources, including memory, disk space, and CPU. Use Cases Containers are well-suited for microservices architectures, continuous integration/continuous deployment (CI/CD) pipelines, and scenarios where rapid deployment and scalability are crucial. VMs are suitable for scenarios that require strong isolation, compatibility with various operating systems, and where applications rely on specific OS configurations.
Source: Docker vs. Virtual Machines: Differences You Should Know In summary, Docker containers and virtual machines have different levels of abstraction and are suitable for different use cases. Containers are lightweight, portable, and efficient, making them popular for modern application development and deployment practices. Virtual machines provide stronger isolation and are more suitable for scenarios where running multiple instances of different operating systems is necessary. The choice between Docker containers and virtual machines depends on the specific requirements of the application and the environment in which it will be deployed. To install Docker Engine, see Install Docker Engine.
"},{"location":"devops/docker/#creating-a-simple-docker","title":"Creating a Simple Docker","text":"Command Description docker run <image>
Runs a Docker container from an image. docker ps
Lists running Docker containers. docker ps -a
Lists all Docker containers, both running and stopped. docker stop <container>
Stops a running Docker container. docker rm <container>
Removes a Docker container. docker images
Lists Docker images. docker rmi <image>
Removes a Docker image. docker pull <image>
Pulls a Docker image from a Docker registry. docker build -t <tag> .
Builds a Docker image from a Dockerfile in the current directory. docker exec -it <container> <command>
Executes a command in a running Docker container. docker logs <container>
Fetches the logs of a Docker container. Hello Markdown!
pip install termynalInstalled FROM openjdk:17-alpine\nVOLUME /tmp\nARG JAR_FILE=target/gateway-0.0.1-SNAPSHOT.jar\nCOPY ${JAR_FILE} app.jar\nENTRYPOINT [\"java\",\"-jar\",\"/app.jar\"]\n
https://docs.docker.com/engine/install/
https://www.docker.com/blog/how-to-use-your-own-registry-2/
-
Docker vs. Virtual Machines: Differences You Should Know \u21a9
"},{"location":"devops/kubernetes/","title":"Kubernetes","text":""},{"location":"devops/kubernetes/#kubernetes","title":"Kubernetes","text":"Kubernetes, also known as K8s, is an open-source platform designed to automate deploying, scaling, and operating application containers. It was originally designed by Google and is now maintained by the Cloud Native Computing Foundation.
Key features of Kubernetes include:
- Service discovery and load balancing: Kubernetes can expose a container using the DNS name or their own IP address. If traffic to a container is high, Kubernetes is able to load balance and distribute the network traffic to help the deployment stable.
- Storage orchestration: Kubernetes allows you to automatically mount a storage system of your choice, such as local storages, public cloud providers, and more.
- Automated rollouts and rollbacks: You can describe the desired state for your deployed containers using Kubernetes, and it can change the actual state to the desired state at a controlled rate. For example, you can automate Kubernetes to create new containers for your deployment, remove existing containers and adopt all their resources to the new container.
- Automatic bin packing: You provide Kubernetes with a cluster of nodes that it can use to run containerized tasks. You tell Kubernetes how much CPU and memory (RAM) each container needs. Kubernetes can fit containers onto your nodes to make the best use of your resources.
- Self-healing: Kubernetes restarts containers that fail, replaces and reschedules containers when nodes die, kills containers that don\u2019t respond to your user-defined health check, and doesn\u2019t advertise them to clients until they are ready to serve.
- Secret and configuration management: Kubernetes lets you store and manage sensitive information, such as passwords, OAuth tokens, and SSH keys. You can deploy and update secrets and application configuration without rebuilding your container images, and without exposing secrets in your stack configuration.
MiniKube
https://cloud.google.com/learn/what-is-kubernetes?hl=pt-br#section-4
https://serverlessland.com/
"},{"location":"devops/packaging/","title":"Packaging","text":""},{"location":"devops/packaging/#maven","title":"Maven","text":"Maven uses an XML file to describe the software project being built, its dependencies on other external modules and components, the build order, directories, and required plugins. It comes with pre-defined targets for performing certain well-defined tasks such as compilation of code and its packaging.
Key Features: - Simple project setup that follows best practices. - Dependency management including automatic updating, dependency closures (also known as transitive dependencies) - Able to easily work with multiple projects at the same time. - Large and mature community with a large ecosystem of plugins and integrations.
mvn clean package\n
mvn clean install\n
mvn clean package spring-boot:run\n
mvn versions:display-dependency-updates\n
mvn dependency:analyze\n
more about Maven dependency plugin
"},{"location":"devops/packaging/#gradle","title":"Gradle","text":"Gradle is another build automation tool that builds upon the concepts of Apache Ant and Apache Maven and introduces a Groovy-based domain-specific language (DSL) instead of the XML form used by Apache Maven for declaring the project configuration. Gradle provides a platform to support the entire development lifecycle of a software project.
Key Features: - Declarative builds and build-by-convention. - Language for dependency-based programming. - Structure your build. - Deep API. - Multi-project builds. - Many ways to manage dependencies. - Integration with existing structures. - Ease of migration.
"},{"location":"devops/release/","title":"Release","text":""},{"location":"devops/release/#infrastructure-as-code-and-automation-iac","title":"Infrastructure as Code and Automation (IaC)","text":"Infrastructure as Code (IaC) is a method of managing and provisioning computing infrastructure through machine-readable definition files, rather than physical hardware configuration or interactive configuration tools.
In other words, IaC is the process of managing and provisioning computer data centers through machine-readable definition files, rather than physical hardware configuration or interactive configuration tools.
The IT infrastructure managed by this comprises both physical equipment such as bare-metal servers as well as virtual machines and associated configuration resources. The definitions may be in a version control system. It can use either scripts or declarative definitions, rather than manual processes, but the term is more often used to promote declarative approaches.
IaC approaches are promoted for cloud computing, which is sometimes marketed as Infrastructure as a Service (IaaS). IaC supports IaaS, but should not be confused with it.
Jenkins DSL (Domain Specific Language)
"},{"location":"devops/release/#jenkins","title":"Jenkins","text":"Installing Jenkins
SDLC - Software Development LifeCycle
"},{"location":"devops/release/#service-level-agreement-sla","title":"Service-level agreement - SLA","text":"Service-level agreement, well-known as SLA, is
References:
-[Wiki Service-level Agreement](https://en.wikipedia.org/wiki/Service-level_agreement)\n
"},{"location":"devops/version-control-system/","title":"Version Control","text":""},{"location":"devops/version-control-system/#git","title":"Git","text":"Git is a distributed version control system designed to handle everything from small to very large projects with speed and efficiency. It was created by Linus Torvalds in 2005 for development of the Linux kernel.
Key features of Git include:
- Distributed Version Control: This means that every user has a complete copy of the project repository on their local machine. This allows for operations to be performed offline and provides a backup in case the central repository fails.
- Branching and Merging: Git's branching model allows developers to work on different features or bugs in isolation, without affecting the main codebase. These branches can then be merged back into the main codebase when the work is complete.
- Speed: Git is designed to be fast and efficient, even for large projects.
- Data Integrity: Git uses a data model that ensures the cryptographic integrity of every bit of your project. Every file and commit is checksummed and retrieved by its checksum when checked back out.
- Staging Area: Git provides a staging area or \"index\" that allows you to format and review your commits before completing the commit.
SCM - Supply-chain-management software
https://twitter.com/milan_milanovic/status/1745435542127349899
"},{"location":"handout/architecture/","title":"Architecture","text":""},{"location":"handout/architecture/#clean-architecture","title":"Clean Architecture","text":"Total desacoplamento das regras de neg\u00f3cios das camadas de interface:
Source: The Clean Code Blog Em nossa arquitetura:
flowchart LR\n subgraph Controller\n direction TB\n Interface:::adapter\n RecordIn:::adapter\n RecordOut:::adapter\n end\n subgraph Case\n direction TB\n Service:::case\n DTO:::case\n end\n subgraph Entity\n direction TB\n Repository:::entity\n Table:::entity\n end\n\n Interface --> RecordIn\n Interface --> RecordOut\n\n Controller <--> parser[\"Parser\"] <--> Case\n\n Service --> DTO\n\n Case <--> mapper[\"Mapper\"] <--> Entity\n\n Repository --> Table\n\n classDef adapter fill:#6f6\n classDef case fill:#f99\n classDef entity fill:#ff9\n
"},{"location":"handout/architecture/#referencias","title":"Refer\u00eancias:","text":" -
Criando um projeto Spring Boot com Arquitetura Limpa by Giuliana Silva Bezerra
\u21a9
-
Clean Architecture: A Craftsman's Guide to Software Structure and Design \u21a9
-
Como se faz DevOps: Organizando pessoas, dos silos aos times de plataforma \u21a9
"},{"location":"handout/business/","title":"Business","text":""},{"location":"handout/business/#compromissos-e-contratos","title":"Compromissos e Contratos","text":"SLI significa Service Level Indicator, ou Indicador de N\u00edvel de Servi\u00e7o. S\u00e3o m\u00e9tricas quantitativas que medem a qualidade de um servi\u00e7o. Por exemplo, se o SLA especificar que os sistemas v\u00e3o estar dispon\u00edveis 99,95% do tempo, o SLI \u00e9 a medi\u00e7\u00e3o real da disponibilidade.
SLO significa Service Level Objective, ou Objetivo de N\u00edvel de Servi\u00e7o. S\u00e3o metas espec\u00edficas de desempenho que uma equipe de SRE define para cumprir os requisitos do SLA.
SLA significa Service Level Agreement, ou Acordo de N\u00edvel de Servi\u00e7o. \u00c9 um acordo entre a empresa e o cliente acerca do servi\u00e7o contratado. Por exemplo, se assinamos com o cliente que vamos manter ativo o seu ecommerce durante pelo menos 99,99% do tempo do m\u00eas, isso quer dizer que o m\u00e1ximo de tempo que a p\u00e1gina pode estar inacess\u00edvel durante o m\u00eas ser\u00e1 4 minutos e 19 segundos.
\nflowchart LR\nsubgraph \"SLI\"\n a(\"M\u00e9tricas\")\nend\nsubgraph \"SLO\"\n b(\"Objetivos\")\nend\nsubgraph \"SLA\"\n c(\"Promessas\")\nend\n\n\nSLI --> SLO --> SLA --> SLI
"},{"location":"handout/business/#cicd-continuous-integration-and-continuous-delivery","title":"CI/CD - Continuous Integration and Continuous Delivery","text":"CI/CD \u00e9 uma abordagem pr\u00e1tica e \u00e1gil para o desenvolvimento de software que combina duas pr\u00e1ticas: Integra\u00e7\u00e3o Cont\u00ednua (CI) e Entrega Cont\u00ednua/Implanta\u00e7\u00e3o Cont\u00ednua (CD). Esses processos automatizam a constru\u00e7\u00e3o, teste e implanta\u00e7\u00e3o de aplica\u00e7\u00f5es, facilitando um ciclo de desenvolvimento mais r\u00e1pido e confi\u00e1vel.
"},{"location":"handout/business/#conceito-de-cicd","title":"Conceito de CI/CD","text":" - Integra\u00e7\u00e3o Cont\u00ednua (CI):
- Objetivo: Automatizar a integra\u00e7\u00e3o de c\u00f3digo de m\u00faltiplos desenvolvedores em um reposit\u00f3rio central v\u00e1rias vezes ao dia.
- Processo: Sempre que um desenvolvedor faz commit de c\u00f3digo em um reposit\u00f3rio, um servidor de CI automaticamente verifica e testa o novo c\u00f3digo para detectar problemas rapidamente.
-
Ferramentas Comuns: Jenkins, Travis CI, CircleCI, GitLab CI/CD.
-
Entrega Cont\u00ednua (CD - Continuous Delivery):
- Objetivo: Automatizar a entrega de c\u00f3digo para um ambiente de produ\u00e7\u00e3o de maneira segura e r\u00e1pida.
- Processo: Ap\u00f3s a fase de integra\u00e7\u00e3o cont\u00ednua, o c\u00f3digo \u00e9 preparado para a produ\u00e7\u00e3o atrav\u00e9s de uma s\u00e9rie de testes automatizados. O c\u00f3digo est\u00e1 sempre pronto para ser implantado com um simples clique ou comando.
-
Ferramentas Comuns: Jenkins, GitLab CI/CD, Bamboo.
-
Implanta\u00e7\u00e3o Cont\u00ednua (CD - Continuous Deployment):
- Objetivo: Automatizar a implanta\u00e7\u00e3o de c\u00f3digo diretamente em produ\u00e7\u00e3o sem interven\u00e7\u00e3o manual.
- Processo: Ap\u00f3s passar por todos os testes, o c\u00f3digo \u00e9 automaticamente implantado em produ\u00e7\u00e3o. Isso requer um alto n\u00edvel de confian\u00e7a nos testes automatizados.
- Ferramentas Comuns: Jenkins, GitLab CI/CD, Spinnaker.
"},{"location":"handout/business/#vantagens-do-cicd","title":"Vantagens do CI/CD","text":" - Detec\u00e7\u00e3o Precoce de Problemas: Integra\u00e7\u00e3o cont\u00ednua ajuda a detectar e corrigir problemas rapidamente.
- Entrega R\u00e1pida: Automatiza\u00e7\u00e3o da entrega permite que novas funcionalidades e corre\u00e7\u00f5es cheguem aos usu\u00e1rios mais rapidamente.
- Qualidade e Confiabilidade: Testes automatizados garantem que o c\u00f3digo est\u00e1 funcionando conforme esperado antes de ser implantado.
- Feedback R\u00e1pido: Desenvolvedores recebem feedback r\u00e1pido sobre o estado do c\u00f3digo, facilitando um desenvolvimento mais \u00e1gil e iterativo.
- Automa\u00e7\u00e3o: Reduz o trabalho manual, minimizando erros humanos e aumentando a efici\u00eancia.
"},{"location":"handout/business/#conclusao","title":"Conclus\u00e3o","text":"CI/CD \u00e9 uma pr\u00e1tica essencial no desenvolvimento moderno de software, promovendo automa\u00e7\u00e3o, rapidez e confiabilidade nos processos de integra\u00e7\u00e3o, teste e implanta\u00e7\u00e3o de aplica\u00e7\u00f5es. Utilizando ferramentas como Jenkins, GitLab CI/CD e outras, equipes de desenvolvimento podem entregar software de alta qualidade de forma cont\u00ednua e eficiente.
Source: Wikipedia - Devops"},{"location":"handout/business/#iac-infrastructure-as-code","title":"IaC - Infrastructure as Code","text":"IaC, ou \"Infrastructure as Code\" (Infraestrutura como C\u00f3digo), \u00e9 uma abordagem para gerenciar e provisionar a infraestrutura de TI atrav\u00e9s de arquivos de configura\u00e7\u00e3o leg\u00edveis por humanos, em vez de processos manuais. Esta pr\u00e1tica permite automatizar a configura\u00e7\u00e3o de infraestrutura, tornando-a mais eficiente, replic\u00e1vel e gerenci\u00e1vel.
"},{"location":"handout/business/#conceito-de-iac","title":"Conceito de IaC","text":"Em vez de configurar manualmente servidores, redes, e outros componentes de infraestrutura, voc\u00ea escreve c\u00f3digo para definir e gerenciar essas configura\u00e7\u00f5es. Esse c\u00f3digo pode ser armazenado em sistemas de controle de vers\u00e3o, revisado, testado e aplicado de maneira consistente.
"},{"location":"handout/business/#ferramentas-comuns-de-iac","title":"Ferramentas Comuns de IaC","text":" - Terraform: Uma ferramenta de c\u00f3digo aberto que permite definir a infraestrutura em um arquivo de configura\u00e7\u00e3o usando o HashiCorp Configuration Language (HCL) ou JSON.
- AWS CloudFormation: Um servi\u00e7o da Amazon Web Services que permite modelar e configurar recursos da AWS.
- Ansible: Uma ferramenta que pode automatizar o provisionamento de infraestrutura, al\u00e9m de gerenciamento de configura\u00e7\u00e3o e implanta\u00e7\u00e3o de aplica\u00e7\u00f5es.
"},{"location":"handout/business/#vantagens-do-iac","title":"Vantagens do IaC","text":" - Consist\u00eancia: A infraestrutura \u00e9 provisionada de forma consistente cada vez que o c\u00f3digo \u00e9 executado.
- Reprodutibilidade: F\u00e1cil de replicar ambientes, como desenvolvimento, teste e produ\u00e7\u00e3o.
- Controle de Vers\u00e3o: As configura\u00e7\u00f5es de infraestrutura podem ser versionadas e auditadas, assim como o c\u00f3digo de aplica\u00e7\u00e3o.
- Automa\u00e7\u00e3o: Reduz o erro humano e aumenta a velocidade ao automatizar tarefas repetitivas.
- Documenta\u00e7\u00e3o: O pr\u00f3prio c\u00f3digo serve como documenta\u00e7\u00e3o da infraestrutura.
"},{"location":"handout/business/#conclusao_1","title":"Conclus\u00e3o","text":"IaC transforma a gest\u00e3o de infraestrutura, permitindo uma abordagem mais \u00e1gil, escal\u00e1vel e segura. Usando ferramentas como Terraform, CloudFormation ou Ansible, equipes podem definir, gerenciar e versionar a infraestrutura de maneira eficiente e confi\u00e1vel.
"},{"location":"handout/business/#iaas-infrastructure-as-a-service","title":"IaaS - Infrastructure as a Service","text":"IaaS, ou \"Infrastructure as a Service\" (Infraestrutura como Servi\u00e7o), \u00e9 um modelo de servi\u00e7o de computa\u00e7\u00e3o em nuvem que oferece recursos computacionais fundamentais como servidores virtuais, armazenamento, e redes, sob demanda, na internet. Esses recursos s\u00e3o escal\u00e1veis e gerenciados por um provedor de servi\u00e7os, permitindo que as empresas evitem o custo e a complexidade de comprar e gerenciar a pr\u00f3pria infraestrutura f\u00edsica.
"},{"location":"handout/business/#conceito-de-iaas","title":"Conceito de IaaS","text":"Com IaaS, os usu\u00e1rios podem alugar recursos de computa\u00e7\u00e3o, como m\u00e1quinas virtuais, armazenamento, e redes, e pagar somente pelo que utilizam. Esse modelo oferece flexibilidade, escalabilidade e efici\u00eancia, permitindo que as empresas foquem em suas aplica\u00e7\u00f5es e servi\u00e7os em vez de gerenciar a infraestrutura subjacente.
"},{"location":"handout/business/#provedores-comuns-de-iaas","title":"Provedores Comuns de IaaS","text":" - Amazon Web Services (AWS): Oferece servi\u00e7os como EC2 (Elastic Compute Cloud), S3 (Simple Storage Service), e VPC (Virtual Private Cloud).
- Microsoft Azure: Oferece servi\u00e7os como Azure Virtual Machines, Azure Blob Storage, e Virtual Networks.
- Google Cloud Platform (GCP): Oferece servi\u00e7os como Compute Engine, Cloud Storage, e Virtual Private Cloud.
"},{"location":"handout/business/#vantagens-do-iaas","title":"Vantagens do IaaS","text":" - Escalabilidade: Capacidade de aumentar ou diminuir recursos rapidamente conforme a demanda.
- Custo-Efetivo: Pague apenas pelos recursos que utiliza, sem necessidade de grandes investimentos iniciais em hardware.
- Flexibilidade: Escolha e configure recursos conforme suas necessidades espec\u00edficas.
- Redu\u00e7\u00e3o de Tempo: Rapidamente provisiona e deprovisiona recursos, acelerando a implementa\u00e7\u00e3o de novos projetos.
- Gerenciamento: O provedor de IaaS gerencia a infraestrutura f\u00edsica, enquanto voc\u00ea gerencia apenas os recursos alocados.
"},{"location":"handout/business/#conclusao_2","title":"Conclus\u00e3o","text":"IaaS oferece uma solu\u00e7\u00e3o poderosa e flex\u00edvel para organiza\u00e7\u00f5es que precisam de infraestrutura computacional robusta sem o \u00f4nus de gerenciar hardware f\u00edsico. Provedores como AWS, Azure, e GCP facilitam o provisionamento e gerenciamento de servidores, armazenamento e redes, permitindo que as empresas se concentrem no desenvolvimento e opera\u00e7\u00e3o de suas aplica\u00e7\u00f5es e servi\u00e7os.
"},{"location":"handout/business/#paas-platform-as-a-service","title":"PaaS - Platform as a Service","text":"PaaS, ou \"Platform as a Service\" (Plataforma como Servi\u00e7o), \u00e9 um modelo de servi\u00e7o de computa\u00e7\u00e3o em nuvem que fornece uma plataforma permitindo que os clientes desenvolvam, executem e gerenciem aplica\u00e7\u00f5es sem a complexidade de construir e manter a infraestrutura normalmente associada ao desenvolvimento e ao lan\u00e7amento de uma aplica\u00e7\u00e3o.
"},{"location":"handout/business/#exemplo-de-paas","title":"Exemplo de PaaS","text":"Imagine que voc\u00ea \u00e9 um desenvolvedor de software e deseja criar um aplicativo web.
Sem PaaS
- Configura\u00e7\u00e3o do Servidor: Voc\u00ea precisaria comprar servidores f\u00edsicos ou m\u00e1quinas virtuais para hospedar sua aplica\u00e7\u00e3o.
- Instala\u00e7\u00e3o do Sistema Operacional: Configurar o sistema operacional nos servidores.
- Configura\u00e7\u00e3o de Redes e Seguran\u00e7a: Configurar redes, firewalls, e garantir a seguran\u00e7a da aplica\u00e7\u00e3o.
- Banco de Dados: Instalar e gerenciar o banco de dados.
- Manuten\u00e7\u00e3o: Monitorar e manter o sistema, aplicando patches de seguran\u00e7a e atualiza\u00e7\u00f5es.
Com PaaS
- Escolha da Plataforma: Voc\u00ea escolhe uma plataforma PaaS, como Google App Engine, Microsoft Azure, ou Heroku.
- Desenvolvimento da Aplica\u00e7\u00e3o: Foca apenas no desenvolvimento do c\u00f3digo da aplica\u00e7\u00e3o.
- Desdobramento: Sobe (deploy) o c\u00f3digo para a plataforma PaaS.
- Gest\u00e3o e Escalabilidade: A plataforma cuida automaticamente da hospedagem, seguran\u00e7a, balanceamento de carga, escalabilidade, e manuten\u00e7\u00e3o.
"},{"location":"handout/business/#vantagens-do-paas","title":"Vantagens do PaaS","text":" - Redu\u00e7\u00e3o de Complexidade: Voc\u00ea n\u00e3o precisa se preocupar com a infraestrutura subjacente.
- Escalabilidade: F\u00e1cil de escalar sua aplica\u00e7\u00e3o conforme a demanda.
- Foco no Desenvolvimento: Permite focar mais no desenvolvimento da aplica\u00e7\u00e3o e menos na gest\u00e3o de servidores.
- Custo-Efetivo: Geralmente paga-se apenas pelos recursos usados, evitando grandes investimentos iniciais em hardware.
"},{"location":"handout/business/#conclusao_3","title":"Conclus\u00e3o","text":"Em resumo, PaaS permite que desenvolvedores se concentrem em criar e melhorar suas aplica\u00e7\u00f5es sem se preocupar com a infraestrutura necess\u00e1ria para suport\u00e1-las.
"},{"location":"handout/business/#paap-platform-as-a-product","title":"PaaP - Platform as a Product","text":"\"PaaP\" significa \"Plataforma como Produto\", um conceito que v\u00ea uma plataforma n\u00e3o apenas como um conjunto de ferramentas ou servi\u00e7os, mas como um produto completo e coeso que fornece uma solu\u00e7\u00e3o abrangente para seus usu\u00e1rios. \u00c9 diferente de Plataforma como Servi\u00e7o (PaaS), que geralmente foca em fornecer a infraestrutura e o ambiente para desenvolver, executar e gerenciar aplica\u00e7\u00f5es. PaaP enfatiza a experi\u00eancia do usu\u00e1rio, a integra\u00e7\u00e3o e o valor entregue ao usu\u00e1rio como um produto unificado.
"},{"location":"handout/business/#conceitos-chave-do-paap","title":"Conceitos-Chave do PaaP","text":" -
Solu\u00e7\u00e3o de Ponta a Ponta: PaaP fornece uma solu\u00e7\u00e3o completa que cobre todos os aspectos das necessidades do usu\u00e1rio, desde o desenvolvimento e implanta\u00e7\u00e3o at\u00e9 o gerenciamento e escalabilidade. Ele integra v\u00e1rias ferramentas e servi\u00e7os em uma experi\u00eancia cont\u00ednua.
-
Design Centrado no Usu\u00e1rio: A plataforma \u00e9 projetada com foco na experi\u00eancia do usu\u00e1rio. Prioriza a facilidade de uso, interfaces intuitivas e fluxos de trabalho simplificados para garantir que os usu\u00e1rios possam atingir seus objetivos de forma eficiente.
-
Integra\u00e7\u00e3o e Interoperabilidade: Plataformas PaaP frequentemente integram m\u00faltiplos servi\u00e7os e ferramentas, garantindo que eles funcionem juntos de forma harmoniosa. Essa integra\u00e7\u00e3o reduz a complexidade para os usu\u00e1rios, que n\u00e3o precisam gerenciar sistemas diferentes.
-
Entrega de Valor: A plataforma \u00e9 empacotada e comercializada como um produto que entrega proposi\u00e7\u00f5es de valor espec\u00edficas aos seus usu\u00e1rios. \u00c9 projetada para resolver problemas espec\u00edficos ou atender necessidades espec\u00edficas de maneira abrangente.
-
Melhoria Cont\u00ednua: Produtos PaaP s\u00e3o continuamente melhorados com base no feedback dos usu\u00e1rios e nas demandas do mercado. Atualiza\u00e7\u00f5es e aprimoramentos regulares garantem que a plataforma permane\u00e7a relevante e eficaz.
"},{"location":"handout/business/#exemplo-salesforce","title":"Exemplo: Salesforce","text":"O Salesforce \u00e9 um exemplo not\u00e1vel de Plataforma como Produto. Ele oferece uma su\u00edte abrangente de ferramentas para gerenciamento de relacionamento com clientes (CRM), mas vai al\u00e9m de apenas fornecer infraestrutura.
-
Solu\u00e7\u00e3o de CRM de Ponta a Ponta: O Salesforce fornece ferramentas para vendas, atendimento ao cliente, automa\u00e7\u00e3o de marketing, an\u00e1lises e mais, tudo integrado em uma \u00fanica plataforma.
-
Design Centrado no Usu\u00e1rio: O Salesforce \u00e9 projetado para ser f\u00e1cil de usar, com pain\u00e9is personaliz\u00e1veis, interfaces intuitivas e amplos recursos de suporte.
-
Integra\u00e7\u00e3o e Interoperabilidade: Ele integra com uma ampla gama de aplica\u00e7\u00f5es e servi\u00e7os de terceiros, permitindo que os usu\u00e1rios conectem seu CRM com outras ferramentas que utilizam em seus neg\u00f3cios.
-
Entrega de Valor: O Salesforce \u00e9 comercializado como um produto que ajuda as empresas a gerenciar seus relacionamentos com clientes de forma mais eficaz, melhorar as vendas e aprimorar o atendimento ao cliente.
-
Melhoria Cont\u00ednua: O Salesforce lan\u00e7a regularmente atualiza\u00e7\u00f5es e novos recursos com base no feedback dos usu\u00e1rios e nos avan\u00e7os tecnol\u00f3gicos, garantindo que a plataforma evolua com as necessidades dos usu\u00e1rios.
"},{"location":"handout/business/#beneficios-do-paap","title":"Benef\u00edcios do PaaP","text":" -
Experi\u00eancia do Usu\u00e1rio Simplificada: os usu\u00e1rios interagem com uma \u00fanica plataforma unificada, simplificando seu fluxo de trabalho e reduzindo a necessidade de gerenciar m\u00faltiplas ferramentas.
-
Aumento da Produtividade: ferramentas e servi\u00e7os integrados simplificam os processos, levando a uma maior efici\u00eancia e produtividade.
-
Escalabilidade: solu\u00e7\u00f5es PaaP s\u00e3o projetadas para escalar com as necessidades do usu\u00e1rio, facilitando o crescimento sem a necessidade de trocar de plataformas ou ferramentas.
-
Maior Valor: ao fornecer uma solu\u00e7\u00e3o abrangente, PaaP entrega maior valor aos usu\u00e1rios, atendendo suas necessidades de forma mais eficaz do que ferramentas dispersas.
-
Adapta\u00e7\u00e3o Cont\u00ednua: atualiza\u00e7\u00f5es e melhorias regulares garantem que a plataforma permane\u00e7a relevante e \u00fatil \u00e0 medida que as necessidades dos usu\u00e1rios evoluem.
"},{"location":"handout/business/#conclusao_4","title":"Conclus\u00e3o","text":"Plataforma como Produto (PaaP) representa uma abordagem hol\u00edstica para a entrega de solu\u00e7\u00f5es tecnol\u00f3gicas, focando em fornecer produtos completos, integrados e centrados no usu\u00e1rio. Ao combinar as for\u00e7as de v\u00e1rias ferramentas e servi\u00e7os em uma plataforma coesa, PaaP oferece maior valor, simplicidade e efici\u00eancia aos seus usu\u00e1rios. Salesforce \u00e9 um exemplo not\u00e1vel, mas os princ\u00edpios de PaaP podem ser aplicados em diversas ind\u00fastrias e solu\u00e7\u00f5es tecnol\u00f3gicas para criar plataformas mais eficazes e amig\u00e1veis.
-
Platform Revolution: How Networked Markets Are Transforming the Economy and How to Make Them Work for You \u21a9
"},{"location":"handout/cloud/aws/cli/","title":"cli","text":""},{"location":"handout/cloud/aws/cli/#setting-up-the-aws-cli","title":"Setting up the AWS Cli","text":"aws configure\n
aws configureAWS Access Key ID: ****************5DMGAWS Secret Access Key]: *********************************fhwQtDefault region name [None]: Default output format [None]: aws sts get-session-token\n
aws sts get-session-token{ \"Credentials\": { \"AccessKeyId\": \"ASIA4MTWJ5HP4RFKVFX2\", \"SecretAccessKey\": \"RWfqFn9NZRZYEy1a5sFpdUPSd5i03YRer/9+PZ6V\", \"SessionToken\": \"FwoGZXIvYXdzEJX//////////wEaDIRJrTOKnJTZ/ ZpZGiKCAYnnc+16sxQl/eGYvj998q9u2eFb3VziCgpvNzKAuI/YcthL2XLp2VUXZswaOb5C3BikDENEKVbeH4va32ltJ/1Bm+F/ qkHNE9dTRMOxshV9iwkCe3/4+Sl9O6dZJguglcCq2Yfh+9HDzJxo6WtAd7UiCL6C/ hlcWgRS24IhvbdUDsgoy47qsQYyKNwLwW9ki4w5bmYRM9MVMinufs4LEkVRJGpEmc8 RG3gNaGvnRB0d840=\", \"Expiration\": \"2024-05-08T07:55:55+00:00\" }} aws sts get-caller-identity\n
aws sts get-caller-identity{ \"UserId\": \"AIDA4MTWJ5HPRUU7R22VG\", \"Account\": \"851725380063\", \"Arn\": \"arn:aws:iam::851725380063:user/root\"}"},{"location":"handout/cloud/aws/cli/#reference","title":"Reference","text":" -
AWS Command Line Interface Documentation - https://docs.aws.amazon.com/cli/
-
User Guide - Install AWS Cli
"},{"location":"handout/cloud/aws/eks/","title":"eks","text":""},{"location":"handout/cloud/aws/eks/#elastic-kubernetes-service","title":"Elastic Kubernetes Service","text":"Never spend your money before you have it, Jefferson T.
EKS n\u00e3o tem cota gr\u00e1tis, sempre \u00e9 muito bem cobrado.
"},{"location":"handout/cloud/aws/eks/#rise-up-an-eks","title":"Rise up an EKS","text":""},{"location":"handout/cloud/aws/eks/#1-creating-a-role","title":"1. Creating a role","text":"IAM - Identity and Access Management: gerencia usu\u00e1rios e acessos.
Role \u00e9 um grupo de policiies que est\u00e3o vinculadas a servi\u00e7os AWS, assim, o EKS precisa de permissionamento para acessar os recursos da AWS.
"},{"location":"handout/cloud/aws/eks/#2-creating-a-vpc","title":"2. Creating a VPC","text":"Virtual Private Cloud
Organiza\u00e7\u00e3o do Kubernetes
Kubernetes Components 2 \u00c9 necess\u00e1rio criar uma estrutura de rede para suportar o Kubernetes, para isso, \u00e9 aconselh\u00e1vel utilizar um template do Cloud Formation. Abaixe o arquivo amazon-eks-vpc-private-subnets.yaml e d\u00ea um upload na cria\u00e7\u00e3o da VPC.
https://s3.us-west-2.amazonaws.com/amazon-eks/cloudformation/2020-10-29/amazon-eks-vpc-private-subnets.yaml\n
flowchart TB\n subgraph Region\n direction LR\n subgraph Zone A\n direction LR\n subgraph subpri1[\"Subnet Private\"]\n direction TB\n poda1[\"pod 1\"]\n poda2[\"pod 2\"]\n poda3[\"pod 3\"]\n end\n subgraph subpub1[\"Subnet Public\"]\n loadbalancea[\"Load Balance\"]\n end\n end\n subgraph Zone B\n direction LR\n subgraph subpri2[\"Subnet Private\"]\n direction TB\n podb1[\"pod 1\"]\n podb2[\"pod 2\"]\n podb3[\"pod 3\"]\n end\n subgraph subpub2[\"Subnet Public\"]\n loadbalanceb[\"Load Balance\"]\n end\n end\n User --> loadbalancea\n loadbalancea --> poda1\n loadbalancea --> poda2\n loadbalancea --> poda3\n User --> loadbalanceb\n loadbalanceb --> podb1\n loadbalanceb --> podb2\n loadbalanceb --> podb3\n end
gateway --> auth gateway --> discovery
"},{"location":"handout/cloud/aws/eks/#3-building-an-eks","title":"3. Building an EKS","text":""},{"location":"handout/cloud/aws/eks/#4-accessing-the-eks","title":"4. Accessing the EKS","text":"On terminal, after that it had been set up the aws cli.
aws configure\n
See the configuration that was done.
aws configure list\n
aws configure list Name Value Type Location ---- ----- ---- -------- profile <not set> None Noneaccess_key ****************TTNI shared-credentials-file secret_key ****************zAJ1 shared-credentials-file region us-east-2 config-file ~/.aws/config Set up the kube-config to point to the remote aws eks cluster.
aws eks update-kubeconfig --name eks-store\n
aws eks update-kubeconfig --name eks-storeAdded new context arn:aws:eks:us-east-2:058264361068:cluster/eks-store to /Users/sandmann/.kube/config>>kubectl get podsNo resources found in default namespace.>>kubectl get nodesNo resources found> Come back to AWS EKS > compute:
Notice that there no nodes on cluster also, because only the Control Pane had been created, there is no exist a node for the worker nodes.
Attach roles to node group, it is exclusive for the worker nodes.
IAM > Roles
Add Permissions
- AmazonEKS_CNI_Policy (Configuration Network Interface)
- AmazonEKSWorkerNodePolicy
- AmazonEC2ContainerRegistryReadOnly
Review
Group Node Group
Only private subnets:
kubectl get nodes\n
kubectl get nodesNAME STATUS ROLES AGE VERSIONip-192-168-179-174.us-east-2.compute.internal Ready <none> 54s v1.29.3-eks-ae9a62aip-192-168-204-234.us-east-2.compute.internal Ready <none> 54s v1.29.3-eks-ae9a62a Now, deploy the microservice.
kubectl apply -f ./k8s/deployment.yamldeployment.apps/gateway createdkubectl apply -f ./k8s/service.yamlservice/gateway created>>>kubectl get allNAME READY STATUS RESTARTS AGEpod/gateway-7894679df8-lbngj 1/1 Running 0 81sNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEservice/gateway LoadBalancer 10.100.245.4 a3a5cc62ba81e466e9746f64f83fc349-1127848642.us-east-2.elb.amazonaws.com 8080:32681/TCP 25mservice/kubernetes ClusterIP 10.100.0.1 <none> 443/TCP 87mNAME READY UP-TO-DATE AVAILABLE AGEdeployment.apps/gateway 1/1 1 1 82sNAME DESIRED CURRENT READY AGEreplicaset.apps/gateway-7894679df8 1 1 1 82s> Jenkins update
Jenkins precisa instalar o awscli (adicionar ao docker-compose.yaml
)
RUN apt-get install -y awscli\n
Dentro da inst\u00e2ncia, configurar:
> aws configure\n> aws eks update-kubeconfig --name eks-store\n
Scale
> kubectl scale --replicas=3 deployment/gateway\n
kubectl scale --replicas=3 deployment/gatewaydeployment.apps/gateway scaledkubectl get podsNAME READY STATUS RESTARTS AGEgateway-7894679df8-62m7z 1/1 Running 0 12sgateway-7894679df8-r2kp2 1/1 Running 0 12sgateway-7894679df8-v6xhs 1/1 Running 0 5m58s
"},{"location":"handout/cloud/aws/eks/#references","title":"References:","text":" -
Setting up to use Amazon EKS \u21a9
-
Kubernetes Components \u21a9
-
Como criar um cluster Kubernetes na AWS com EKS by Fabricio Veronez
\u21a9
-
Creating a VPC for your Amazon EKS cluster \u21a9
-
AWS Princing Calculator - EKS \u21a9
-
Getting started with Amazon EKS \u2013 AWS Management Console and AWS CLI \u21a9
-
kubectl scale \u21a9
"},{"location":"handout/devops/jenkins/","title":"Jenkins","text":"Docker ComposeEnvironment Variables docker-compose.yaml# docker compose up -d --build --force-recreate\nversion: '3.8'\nname: ops\n\nservices:\n\njenkins:\n container_name: jenkins\n build:\n dockerfile_inline: |\n FROM jenkins/jenkins:jdk21\n USER root\n RUN apt-get update && apt-get install -y lsb-release\n RUN curl -fsSLo /usr/share/keyrings/docker-archive-keyring.asc \\\n https://download.docker.com/linux/debian/gpg\n RUN echo \"deb [arch=$(dpkg --print-architecture) \\\n signed-by=/usr/share/keyrings/docker-archive-keyring.asc] \\\n https://download.docker.com/linux/debian \\\n $(lsb_release -cs) stable\" > /etc/apt/sources.list.d/docker.list\n\n RUN apt-get update && apt-get install -y docker-ce maven\n\n RUN apt-get install -y apt-transport-https ca-certificates curl\n RUN curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg\n RUN chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg\n RUN echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list\n RUN chmod 644 /etc/apt/sources.list.d/kubernetes.list\n RUN apt-get update\n RUN apt-get install -y kubectl\n\n RUN apt-get install -y awscli\n\n RUN usermod -aG docker jenkins\n ports:\n - 9080:8080\n - 50000:50000 \n volumes:\n - $CONFIG/jenkins:/var/jenkins_home\n - /var/run/docker.sock:/var/run/docker.sock\n restart: always\n networks:\n - infra\n\nnetworks:\n infra:\n driver: bridge\n
.envCONFIG=./config\n
To run this container:
docker compose up -d --build\n
The will be avaliable at:
http://localhost:9000\n
"},{"location":"handout/devops/jenkins/#pipeline","title":"Pipeline","text":""},{"location":"handout/devops/jenkins/#checkout-scm","title":"Checkout SCM","text":"Jenkinsfilepipeline {\n agent any\n\n stages {\n stage('Build') {\n steps {\n sh 'mvn -B -DskipTests clean install'\n }\n }\n }\n}\n
Definindo o n\u00famero m\u00e1ximo de executores.
Instalando o plugin para executar o Docker dentro do Jenkins container.
BASED ARTICLE:
Getting \u201cPermission Denied\u201d error when pulling a docker image in Jenkins docker container on Mac
"},{"location":"handout/devops/kubernetes/","title":"Kubernetes","text":""},{"location":"handout/devops/kubernetes/#minikube","title":"Minikube","text":"Vers\u00e3o light do kubernetes, para rodar em m\u00e1quinas locais. Instala\u00e7\u00e3o do Kubernetes.
Para Inicializar o Minikube ap\u00f3s a instala\u00e7\u00e3o, utilize:
minikube start --driver=docker --profile=store\n
minikube profile list\n
minikube delete --all\n
minikube delete --all --purge\n
Dashboard
minikube dashboard\n
"},{"location":"handout/devops/kubernetes/#kubectl","title":"Kubectl","text":"Comando cliente de gerenciamento do Kubernetes.
kubectl apply -f <filename>\n
kubectl get deployments\n
kubectl get svc\n
kubectl get pods\n
kubectl port-forward <pod> 8080:8080\n
kubectl exec -it <pod> -- bash\n
kubectl delete --all\n
kubectl api-resources\n
kubectl logs <pod>\n
kubectl describe pod <pod>\n
"},{"location":"handout/devops/kubernetes/#services","title":"Services","text":" -
ClusterIp: apenas dentro do cluster.
-
NodePort: permite exposi\u00e7\u00e3o de porta para fora do cluster.
-
LoadBalance: uma porta para diversas inst\u00e2ncias no cluster.
"},{"location":"handout/devops/kubernetes/#deploying-a-postgres","title":"Deploying a Postgres","text":"Crie um novo reposit\u00f3rio para armazenar as configura\u00e7\u00f5es do banco de dados: platform.241.store.db.
estrutura de diret\u00f3rio sugerida\ud83d\udcc4 store.account\n\ud83d\udcc1 store.db\n\u2514\u2500\u2500 \ud83d\udcc1 k8s\n \u251c\u2500\u2500 \ud83d\udcc4 configmap.yaml\n \u251c\u2500\u2500 \ud83d\udcc4 credentials.yaml\n \u251c\u2500\u2500 \ud83d\udcc4 pv.yaml\n \u251c\u2500\u2500 \ud83d\udcc4 pvc.yaml\n \u251c\u2500\u2500 \ud83d\udcc4 deployment.yaml\n \u2514\u2500\u2500 \ud83d\udcc4 service.yaml\n
configmap.yamlcredentials.yamlpv.yamlpvc.yamldeployment.yamlservice.yaml Configura\u00e7\u00e3o de conex\u00e3o do banco
configmap.yamlapiVersion: v1\nkind: ConfigMap\nmetadata:\n name: postgres-configmap\n labels:\n app: postgres\ndata:\n POSTGRES_HOST: postgres\n POSTGRES_DB: store\n
kubectl apply -f ./k8s/configmap.yaml\nkubectl get configmap\n
Configura\u00e7\u00e3o de acesso ao banco
credentials.yamlapiVersion: v1\nkind: Secret\nmetadata:\n name: postgres-credentials\ndata:\n POSTGRES_USER: c3RvcmU=\n POSTGRES_PASSWORD: c3RvcmU=\n
kubectl apply -f ./k8s/credentials.yaml\nkubectl get secrets\n
Use encode base64 para ofuscar a senha. Vide: Base64Encode.
Persistence Volume: espa\u00e7o alocado no cluster
pv.yamlapiVersion: v1\nkind: PersistentVolume\nmetadata:\n name: postgres-volume\n labels:\n type: local\n app: postgres\nspec:\n storageClassName: manual\n capacity:\n storage: 10Gi\n accessModes:\n - ReadWriteMany\n hostPath:\n path: /data/postgresql\n
kubectl apply -f ./k8s/pv.yaml\nkubectl get pv\n
Persistence Volume Claim: espa\u00e7o alocado do cluster para o pods.
pvc.yamlapiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: postgres-volume-claim\n labels:\n app: postgres\nspec:\n storageClassName: manual\n accessModes:\n - ReadWriteMany\n resources:\n requests:\n storage: 10Gi\n
kubectl apply -f ./k8s/pvc.yaml\nkubectl get pvc\n
deployment.yamlapiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: postgres\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: postgres\n template:\n metadata:\n labels:\n app: postgres\n spec:\n containers:\n - name: postgres\n image: 'postgres:latest'\n imagePullPolicy: IfNotPresent\n ports:\n - containerPort: 5432\n env:\n\n - name: POSTGRES_DB\n valueFrom:\n configMapKeyRef:\n name: postgres-configmap\n key: POSTGRES_DB\n\n - name: POSTGRES_USER\n valueFrom:\n secretKeyRef:\n name: postgres-credentials\n key: POSTGRES_USER\n\n - name: POSTGRES_PASSWORD\n valueFrom:\n secretKeyRef:\n name: postgres-credentials\n key: POSTGRES_PASSWORD\n\n volumeMounts:\n - mountPath: /var/lib/postgresql/data\n name: postgresdata\n volumes:\n - name: postgresdata\n persistentVolumeClaim:\n claimName: postgres-volume-claim\n
kubectl apply -f ./k8s/deployment.yaml\nkubectl get deployments\nkubectl get pods\n
service.yamlapiVersion: v1\nkind: Service\nmetadata:\n name: postgres\n labels:\n app: postgres\nspec:\n type: ClusterIP\n ports:\n - port: 5432\n selector:\n app: postgres\n
kubectl apply -f ./k8s/service.yaml\nkubectl get services\n
Acessando o pod do Postgres:
kubectl exec -it postgres-<pod-id> -- psql -h localhost -U store --password -p 5432 store\n
Redirecionando porta:
kubectl port-forward <pod> 5432:5432\n
"},{"location":"handout/devops/kubernetes/#deploying-the-discovery-microservice","title":"Deploying the Discovery Microservice","text":"discovery\ud83d\udcc1 store.discovery-resource\n\u251c\u2500\u2500 \ud83d\udcc1 src\n\u2502 \u2514\u2500\u2500 \ud83d\udcc1 main\n\u2502 \u2514\u2500\u2500 \ud83d\udcc1 resources\n\u2502 \u2514\u2500\u2500 \ud83d\udcc4 application.yaml\n\u251c\u2500\u2500 \ud83d\udcc1 k8s\n\u2502 \u251c\u2500\u2500 \ud83d\udcc4 configmap.yaml\n\u2502 \u251c\u2500\u2500 \ud83d\udcc4 deployment.yaml\n\u2502 \u2514\u2500\u2500 \ud83d\udcc4 service.yaml\n\u251c\u2500\u2500 \ud83d\udcc4 Dockerfile\n\u251c\u2500\u2500 \ud83d\udcc4 Jenkins\n\u2514\u2500\u2500 \ud83d\udcc4 pom.xml\n
configmap.yamldeployment.yamlservice.yaml configmap.yamlapiVersion: v1\nkind: ConfigMap\nmetadata:\n name: discovery-configmap\n labels:\n app: discovery\ndata:\n DISCOVERY_HOST: discovery \n
configmap.yamlapiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: discovery\n labels:\n app: discovery\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: discovery\n template:\n metadata:\n labels:\n app: discovery\n spec:\n containers:\n - name: discovery\n image: humbertosandmann/discovery:latest\n ports:\n - containerPort: 8761\n
service.yamlapiVersion: v1\nkind: Service\nmetadata:\n name: discovery\n labels:\n app: discovery\nspec:\n type: ClusterIP\n ports:\n - port: 8761\n targetPort: 8761\n protocol: TCP\n selector:\n app: discovery\n
"},{"location":"handout/devops/kubernetes/#deploying-a-microservice","title":"Deploying a Microservice","text":"account\ud83d\udcc1 store.account-resource\n\u251c\u2500\u2500 \ud83d\udcc1 src\n\u2502 \u2514\u2500\u2500 \ud83d\udcc1 main\n\u2502 \u2514\u2500\u2500 \ud83d\udcc1 resources\n\u2502 \u2514\u2500\u2500 \ud83d\udcc4 application.yaml\n\u251c\u2500\u2500 \ud83d\udcc1 k8s\n\u2502 \u251c\u2500\u2500 \ud83d\udcc4 deployment.yaml\n\u2502 \u2514\u2500\u2500 \ud83d\udcc4 service.yaml\n\u251c\u2500\u2500 \ud83d\udcc4 Dockerfile\n\u251c\u2500\u2500 \ud83d\udcc4 Jenkins\n\u2514\u2500\u2500 \ud83d\udcc4 pom.xml\n
application.yamldeployment.yamlservice.yaml application.yamlserver:\n port: 8080\n\nspring:\n application:\n name: account\n datasource:\n url: jdbc:postgresql://${POSTGRES_HOST}:5432/${POSTGRES_DB}\n username: ${POSTGRES_USER:postgres}\n password: ${POSTGRES_PASSWORD:Post123321}\n driver-class-name: org.postgresql.Driver\n flyway:\n baseline-on-migrate: true\n schemas: account\n jpa:\n properties:\n hibernate:\n default_schema: account\n\nmanagement:\n endpoints:\n web:\n base-path: /account/actuator\n exposure:\n include: [ 'prometheus' ]\n\neureka:\n client:\n register-with-eureka: true\n fetch-registry: true\n service-url:\n defaultZone: http://${DISCOVERY_HOST}:8761/eureka/\n
Subir no Git e rodar o Jenkins.
deployment.yamlapiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: account\nspec:\n selector:\n matchLabels:\n app: account\n replicas: 1\n template:\n metadata:\n labels:\n app: account\n spec:\n containers:\n - name: account\n image: humbertosandmann/account:latest\n ports:\n - containerPort: 8080\n env:\n\n - name: DISCOVERY_HOST\n valueFrom:\n configMapKeyRef:\n name: discovery-configmap\n key: DISCOVERY_HOST\n\n - name: POSTGRES_HOST\n valueFrom:\n configMapKeyRef:\n name: postgres-configmap\n key: POSTGRES_HOST\n\n - name: POSTGRES_DB\n valueFrom:\n configMapKeyRef:\n name: postgres-configmap\n key: POSTGRES_DB\n\n - name: POSTGRES_USER\n valueFrom:\n secretKeyRef:\n name: postgres-credentials\n key: POSTGRES_USER\n\n - name: POSTGRES_PASSWORD\n valueFrom:\n secretKeyRef:\n name: postgres-credentials\n key: POSTGRES_PASSWORD\n
service.yamlapiVersion: v1\nkind: Service\nmetadata:\n name: account\n labels:\n name: account\nspec:\n type: NodePort\n ports:\n - port: 8080\n targetPort: 8080\n protocol: TCP\n selector:\n app: account\n
kubectl apply -f ./k8s/service.yaml\nkubectl get services\n
kubectl apply -f k8s/deployment.yaml\nkubectl apply -f k8s/service.yaml \n
"},{"location":"handout/devops/kubernetes/#deploying-using-jenkins","title":"Deploying using Jenkins","text":""},{"location":"handout/devops/kubernetes/#creating-crendentials-for-jenkins-to-k8s","title":"Creating crendentials for Jenkins to K8s","text":"Criar credentials no Kubernetes para que o Jenkins possa conectar.
jenkins.yaml---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: jenkins\n namespace: default\n---\n\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: jenkins\n namespace: default\nrules:\n- apiGroups: [\"\"]\n resources: [\"pods\",\"services\"]\n verbs: [\"create\",\"delete\",\"get\",\"list\",\"patch\",\"update\",\"watch\"]\n- apiGroups: [\"apps\"]\n resources: [\"deployments\"]\n verbs: [\"create\",\"delete\",\"get\",\"list\",\"patch\",\"update\",\"watch\"]\n- apiGroups: [\"\"]\n resources: [\"pods/exec\"]\n verbs: [\"create\",\"delete\",\"get\",\"list\",\"patch\",\"update\",\"watch\"]\n- apiGroups: [\"\"]\n resources: [\"pods/log\"]\n verbs: [\"get\",\"list\",\"watch\"]\n- apiGroups: [\"\"]\n resources: [\"secrets\"]\n verbs: [\"get\",\"create\"]\n- apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"create\",\"get\",\"update\"]\n- apiGroups: [\"\"]\n resources: [\"persistentvolumeclaims\"]\n verbs: [\"create\",\"delete\",\"get\",\"list\",\"patch\",\"update\",\"watch\"]\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: jenkins-token\n annotations:\n kubernetes.io/service-account.name: jenkins\ntype: kubernetes.io/service-account-token\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: jenkins\n namespace: default\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: jenkins\nsubjects:\n- kind: ServiceAccount\n name: jenkins\n---\n# Allows jenkins to create persistent volumes\n# This cluster role binding allows anyone in the \"manager\" group to read secrets in any namespace.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: jenkins-crb\nsubjects:\n- kind: ServiceAccount\n namespace: default\n name: jenkins\nroleRef:\n kind: ClusterRole\n name: jenkinsclusterrole\n apiGroup: rbac.authorization.k8s.io\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n # \"namespace\" omitted since ClusterRoles are not namespaced\n name: jenkinsclusterrole\nrules:\n- apiGroups: [\"\"]\n resources: [\"persistentvolumes\"]\n verbs: [\"create\",\"delete\",\"get\",\"list\",\"patch\",\"update\",\"watch\"]\n
Executar a declara\u00e7\u00e3o:
kubectl apply -f jenkins.yaml\n
"},{"location":"handout/devops/kubernetes/#recovering-the-jenkins-token","title":"Recovering the Jenkins' Token","text":"kubectl get secrets\n
kubectl get secretsNAME TYPE DATA AGEjenkins-token kubernetes.io/service-account-token 3 21s Abrindo o objeto com o token.
kubectl describe secrets/jenkins-token\n
kubectl describe secrets/jenkins-tokenName: jenkins-tokenNamespace: defaultLabels: <none>Annotations: kubernetes.io/service-account.name: jenkins kubernetes.io/service-account.uid: 0d06d343-fd34-4aff-8396-5dfec5a9e5b6Type: kubernetes.io/service-account-tokenData====ca.crt: 1111 bytesnamespace: 7 bytestoken: eyJhbGciOiJSUzI1NiIsImtpZCI6IklqTkZXdEVKcW1iclBrNHBnQzJSX1F6QjFIWDFMX0FvNGVkNGd2aWFKd00ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImplbmtpbnMtdG9rZW4iLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiamVua2lucyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjBkMDZkMzQzLWZkMzQtNGFmZi04Mzk2LTVkZmVjNWE5ZTViNiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmplbmtpbnMifQ.XkwD5vwC7CJNDv44PxCAIpLEfVlQbLE6VDNmTOEpFkaoe_x4ehU8QS8fnTgUz0a_vjUKuXum-PD2vF8Fx_WBsWVAG8BNhXJv79MMbEe7axYT7W91fjsnT0rMqSqzajNjTTDFvPDQu0KkLzC-UUnlG3RdNHhzxGVnUIA9lIeJuVKnlCXAexPQr6HeX5ggbe-CZO_uMjFZjwBnjLC-IJsIKKaz8I4CbFxz10vAl5SpJ7PadA1iZZEvr_VYhhG42qMqRFLzkrXtWUG0NX8aSitJT0Wk9c54ME13WDZb6MfRXwUWbARu-TLN56KrPaqtL2dBtRG2EFOn5nVXARI7jPzhjg Try it!!! Abra o token no site jwt.io e verifique seu conte\u00fado.
"},{"location":"handout/devops/kubernetes/#set-up-the-credential-to-jenkins","title":"Set up the credential to Jenkins","text":"Before to go ahead
Instale os plugins: Kubernetes Cli e Kubernetes pipeline.
Manage Jenkins > Credentials
"},{"location":"handout/devops/kubernetes/#updating-the-jenkinsfile","title":"Updating the Jenkinsfile","text":"Adding the Deploy on k8s
stage:
Jenkinsfile...\n stage('Deploy on local k8s') {\n steps {\n withCredentials([ string(credentialsId: 'minikube-credentials', variable: 'api_token') ]) {\n sh 'kubectl --token $api_token --server https://host.docker.internal:55529 --insecure-skip-tls-verify=true apply -f ./k8s/deployment.yaml '\n sh 'kubectl --token $api_token --server https://host.docker.internal:55529 --insecure-skip-tls-verify=true apply -f ./k8s/service.yaml '\n }\n }\n }\n...\n
"},{"location":"handout/devops/kubernetes/#kubectl-config","title":"kubectl config","text":"kubectl config get-contexts
"},{"location":"handout/devops/kubernetes/#references","title":"References:","text":"[1^]: Using a Service to Expose Your App
[2^]: Install Kubernetes's Tools
[3^]: How to Deploy Postgres to Kubernetes Cluster
[4^]: Spring boot, PostgreSQL and Kubernetes
[5^]: Deploy nodejs App in a Minikube Kubernetes using Jenkins CI/CD pipeline
[6^]: Horizontal Pod Autoscaling
"},{"location":"handout/devops/observability/","title":"Observability","text":" - Logging
- Monitoring
- Tracing
"},{"location":"handout/devops/observability/#microservice","title":"Microservice","text":"pom.xml<!-- metricas de uso -->\n<dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter-actuator</artifactId>\n</dependency>\n\n<!-- exporta no formato prometheus -->\n<dependency>\n <groupId>io.micrometer</groupId>\n <artifactId>micrometer-registry-prometheus</artifactId>\n <scope>runtime</scope>\n</dependency>\n
application.yamlmanagement:\n endpoints:\n web:\n base-path: /gateway/actuator\n exposure:\n include: [ 'prometheus' ]\n
"},{"location":"handout/devops/observability/#docker","title":"Docker","text":"docker-compose.yaml prometheus:\n image: prom/prometheus:latest\n container_name: store-prometheus\n ports:\n - 9090:9090\n volumes:\n - $VOLUME/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml\n networks:\n - private-network\n\n grafana:\n container_name: store-grafana\n image: grafana/grafana-enterprise\n ports:\n - 3000:3000\n environment:\n - GF_SECURITY_ADMIN_PASSWORD=admin\n volumes:\n - $VOLUME/grafana:/var/lib/grafana\n - $VOLUME/grafana/provisioning/datasources:/etc/grafana/provisioning/datasources \n restart: always\n networks:\n - private-network\n
"},{"location":"handout/devops/observability/#prometheus","title":"Prometheus","text":"$VOLUME/prometheus/prometheus.ymlscrape_configs:\n\n - job_name: 'GatewayMetrics'\n metrics_path: '/gateway/actuator/prometheus'\n scrape_interval: 1s\n static_configs:\n - targets:\n - gateway:8080\n labels:\n application: 'Gateway Application'\n\n - job_name: 'AuthMetrics'\n metrics_path: '/auth/actuator/prometheus'\n scrape_interval: 1s\n static_configs:\n - targets:\n - auth:8080\n labels:\n application: 'Auth Application'\n\n - job_name: 'AccountMetrics'\n metrics_path: '/accounts/actuator/prometheus'\n scrape_interval: 1s\n static_configs:\n - targets:\n - account:8080\n labels:\n application: 'Account Application'\n
http://localhost:9090/
"},{"location":"handout/devops/observability/#grafana","title":"Grafana","text":"$VOLUME/grafana/provisioning/datasources/datasources.ymlapiVersion: 1\ndatasources:\n - name: Prometheus\n type: prometheus\n access: proxy\n url: http://prometheus:9090\n isDefault: true\n
http://localhost:3000/
- Dashboard MarketPlace
"},{"location":"handout/microservices/account/","title":"Account","text":"Esse microservi\u00e7o \u00e9 respons\u00e1vel por gerenciar as contas dos usu\u00e1rios do sistema que est\u00e1 sendo desenvolvido. Ele tamb\u00e9m pode ser utilizado como template para o desenvolvimento de outros microservi\u00e7os que se utilizem de recuros semelhantes em seu funcionamento.
- Endpoints
- Modulariza\u00e7\u00e3o
- Interface
- Resource
- Persist\u00eancia
- Documenta\u00e7\u00e3o
- Integra\u00e7\u00e3o
- Docker
"},{"location":"handout/microservices/account/#endpoints","title":"Endpoints","text":"Create Account POST /accounts\n
Request
{\n \"name\": \"Antonio do Estudo\",\n \"email\": \"acme@insper.edu.br\",\n \"password\": \"123@\"\n}\n
Responses: codebody 201
{\n \"id\": \"45d16201-12a4-48bf-8c84-df768fdc4878\",\n \"name\": \"Antonio do Estudo\",\n \"email\": \"acme@insper.edu.br\"\n}\n
401 Login :: find by email and password Get Account GET /accounts/{uuid}\n
Responses:
codebody 200
{\n \"id\": \"45d16201-12a4-48bf-8c84-df768fdc4878\",\n \"name\": \"Antonio do Estudo\",\n \"email\": \"acme@insper.edu.br\"\n}\n
401 "},{"location":"handout/microservices/account/#modularizacao","title":"Modulariza\u00e7\u00e3o","text":"Class Diagram
Exemplo para o microsservi\u00e7o Account.
classDiagram\n namespace Interface {\n class AccountController {\n <<interface>>\n create(AccountIn)\n read(String id): AccountOut\n update(String id, AccountIn)\n delete(String id)\n findByEmailAndPassword(AccountIn)\n }\n class AccountIn {\n <<record>>\n String name\n String email\n String password\n }\n class AccountOut {\n <<record>>\n String id\n String name\n String email\n }\n }\n namespace Resource {\n class AccountResource {\n <<REST API>>\n -accountService\n }\n class AccountService {\n <<service>>\n -accountRepository\n create(Account)\n }\n class AccountRepository {\n <<nterface>>\n findByEmailAndHash(String, String)\n }\n class AccountModel {\n <<entity>>\n String id\n String name\n String email\n String hash\n }\n class Account {\n <<dto>>\n String id\n String name\n String email\n String password\n }\n }\n AccountController <|-- AccountResource\n AccountResource o-- AccountService\n AccountService o-- AccountRepository
"},{"location":"handout/microservices/account/#pom-dependecy","title":"POM dependecy","text":"Note que esse microsservi\u00e7o possui depend\u00eancia da interface, o Account. Logo, se torna necess\u00e1rio explicitar essa depend\u00eancia no pom.xml
do microsservi\u00e7o Account.
<dependency>\n <groupId>insper.store</groupId>\n <artifactId>account</artifactId>\n <version>${project.version}</version>\n</dependency>\n
Outras depend\u00eancias relevantes para adicionar no pom.xml
s\u00e3o o suporte ao registro no discovery.
<dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-netflix-eureka-client</artifactId>\n</dependency>\n
Quando adicionado o acesso ao Discovery, \u00e9 necess\u00e1rio definir no application.yalm
o nome com o qual o servi\u00e7o ser\u00e1 invocado, assim bem como, o endere\u00e7o de acesso do discovery ao qual o servi\u00e7o ir\u00e1 conectar:
spring:\n application:\n name: store-account\n\neureka:\n client:\n register-with-eureka: true\n fetch-registry: true\n service-url:\n defaultZone: ${EUREKA_URI:http://localhost:8761/eureka/}\n
J\u00e1 para disponibilizar o uso ao OpenFeign
.
<dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-openfeign</artifactId>\n</dependency>\n
"},{"location":"handout/microservices/auth/","title":"Auth","text":"A fim do sistema possuir um controle de acesso, \u00e9 conveniente a cria\u00e7\u00e3o de um microsservi\u00e7o Auth, que ser\u00e1 respons\u00e1vel pelo cadastro de usu\u00e1rios do sistema.
- Endpoints
- Modulariza\u00e7\u00e3o
- Interface
- Resource
- Documenta\u00e7\u00e3o
- Integra\u00e7\u00e3o
- Token
- Docker
"},{"location":"handout/microservices/auth/#endpoints","title":"Endpoints","text":"Register POST /auth/register\n
Autentica\u00e7\u00e3o :: Login POST /auth/login\n
"},{"location":"handout/microservices/auth/#request","title":"Request","text":"{\n \"name\": \"Antonio do Estudo\",\n \"email\": \"acme@insper.edu.br\",\n \"password\": \"123@321\"\n}\n
"},{"location":"handout/microservices/auth/#response","title":"Response","text":"code body 201"},{"location":"handout/microservices/auth/#sequence-diagram","title":"Sequence Diagram","text":"sequenceDiagram\nautonumber\nactor User\nUser->>+Auth: register(RegisterIn)\nAuth->>+Account: create(AccountIn)\nAccount->>-Auth: returns the new account (AccountOut)\nAuth->>-User: returns 201
"},{"location":"handout/microservices/auth/#request_1","title":"Request","text":"{\n \"email\": \"acme@insper.edu.br\",\n \"password\": \"123@321\"\n}\n
"},{"location":"handout/microservices/auth/#response_1","title":"Response","text":"code body 201 { \"token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiI0NWQxNjIwMS0xMmE0LTQ4YmYtOGM4NC1kZjc2OGZkYzQ4NzgiLCJuYW1lIjoiQW50b25pbyBkbyBFc3R1ZG8iLCJpYXQiOjE1MTYyMzkwMjIsInJvbGUiOiJyZWd1bGFyIn0.8eiTZjXGUFrseBP5J91UdDctw-Flp7HP-PAp1eO8f1M\" }
403"},{"location":"handout/microservices/auth/#sequence-diagram_1","title":"Sequence Diagram","text":"sequenceDiagram\nautonumber\nactor User\nUser->>+Auth: authenticate(CredentiaIn)\nAuth->>+Account: login(LoginIn)\ncritical validated\n Account->>-Auth: returns the account\noption denied\n Auth-->>User: unauthorized message\nend \nAuth->>Auth: generates a token\nAuth->>-User: returns LoginOut\nUser->>User: stores the token to use for the next requests
"},{"location":"handout/microservices/auth/#modularizacao","title":"Modulariza\u00e7\u00e3o","text":"Exemplo para o microsservi\u00e7o Auth.
classDiagram\n namespace Interface {\n class AuthController {\n <<interface>>\n register(RegisterIn)\n authenticate(CredentialIn): LoginOut\n solve(SolveIn): SolveOut\n }\n class RegisterIn {\n <<record>>\n String name\n String email\n String password\n }\n class CredentialIn {\n <<record>>\n String email\n String password\n }\n class LoginOut {\n <<Record>>\n String token\n }\n class SolveIn {\n <<Record>>\n String token\n }\n class SolveOut {\n <<Record>>\n String id\n String name\n String role\n }\n }\n namespace Resource {\n class AuthResource {\n <<REST API>>\n -authService\n }\n class AuthService {\n <<service>>\n JwtService jwtService\n register(RegisterIn)\n authenticate(CredentialIn)\n }\n class JwtService {\n <<service>>\n String secretKey\n String issuer\n long duration\n SecretKey key\n JwtParser parser\n init()\n create(String id, String name, String role): String\n getToken(String token): Token\n getRole(String token): String\n }\n class Token {\n <<record>>\n String id\n String name\n String role\n }\n }\n AuthController <|-- AuthResource\n AuthResource o-- AuthService\n AuthService o-- JwtService
Exemplo de uma implementa\u00e7\u00e3o da interface AuthController.
AuthController.javapackage store.auth;\n\nimport org.springframework.cloud.openfeign.FeignClient;\nimport org.springframework.http.ResponseEntity;\nimport org.springframework.web.bind.annotation.PostMapping;\nimport org.springframework.web.bind.annotation.RequestBody;\n\n@FeignClient(\"store-auth\")\npublic interface AuthController {\n\n @PostMapping(\"/auth/register\")\n ResponseEntity<?> create (\n @RequestBody(required = true) RegisterIn in\n );\n\n @PostMapping(\"/auth/login\")\n ResponseEntity<LoginOut> authenticate (\n @RequestBody(required = true) Credential in\n );\n}\n
Repare que h\u00e1 a publica\u00e7\u00e3o da interface como sendo um servi\u00e7o a ser registrado no Discovery.
"},{"location":"handout/microservices/auth/#documentacao","title":"Documenta\u00e7\u00e3o","text":"Para fazer a documenta\u00e7\u00e3o dos APIs, de forma automatizada, \u00e9 aconselh\u00e1vel a utiliza\u00e7\u00e3o da biblioteca SpringDoc OpenAPI
.
pom.xml<!-- https://mvnrepository.com/artifact/org.springdoc/springdoc-openapi-ui -->\n<dependency>\n <groupId>org.springdoc</groupId>\n <artifactId>springdoc-openapi-starter-webmvc-ui</artifactId>\n <version>[2.3.0,)</version>\n</dependency>\n
"},{"location":"handout/microservices/auth/#integracao","title":"Integra\u00e7\u00e3o","text":"A integra\u00e7\u00e3o entre os microsservi\u00e7os \u00e9 feita via OpenFeign. Esse framework precisa saber, quando a aplica\u00e7\u00e3o sobe, em quais pacotes ir\u00e1 procurar os servi\u00e7os. Para isso, se torna necess\u00e1rio anotar a classe AuthApplication
com a lista de pacotes, assim bem como, anotar que esse microsservi\u00e7o ir\u00e1 trabalhar com a sistema de descoberta de microsservi\u00e7os habitado.
AuthApplication.javapackage store.auth;\n\nimport org.springframework.boot.SpringApplication;\nimport org.springframework.boot.autoconfigure.SpringBootApplication;\nimport org.springframework.cloud.client.discovery.EnableDiscoveryClient;\nimport org.springframework.cloud.openfeign.EnableFeignClients;\n\n@EnableFeignClients(basePackages = {\n \"insper.store.account\"\n})\n@EnableDiscoveryClient\n@SpringBootApplication\npublic class AuthApplication {\n\n public static void main(String[] args) {\n SpringApplication.run(AuthApplication.class, args);\n }\n\n}\n
Necess\u00e1rio tamb\u00e9m atualizar o pom.xml
para que o microsservi\u00e7o possa enxergar o outro microsservi\u00e7o.
Note que esse microsservi\u00e7o possui depend\u00eancia de outro, o Account, al\u00e9m da depend\u00eancia da interface do pr\u00f3prio microsservi\u00e7o. Logo, se torna necess\u00e1rio explicitar essa depend\u00eancia no pom.xml
do microsservi\u00e7o Auth.
pom.xml<dependency>\n <groupId>insper.store</groupId>\n <artifactId>auth</artifactId>\n <version>${project.version}</version>\n</dependency>\n<dependency>\n <groupId>insper.store</groupId>\n <artifactId>account</artifactId>\n <version>${project.version}</version>\n</dependency>\n\n<!-- https://mvnrepository.com/artifact/io.jsonwebtoken/jjwt-api -->\n<dependency>\n <groupId>io.jsonwebtoken</groupId>\n <artifactId>jjwt-api</artifactId>\n <version>0.12.3</version>\n</dependency>\n<dependency>\n <groupId>io.jsonwebtoken</groupId>\n <artifactId>jjwt-impl</artifactId>\n <version>0.12.3</version>\n <scope>runtime</scope>\n</dependency>\n<dependency>\n <groupId>io.jsonwebtoken</groupId>\n <artifactId>jjwt-jackson</artifactId> <!-- or jjwt-gson if Gson is preferred -->\n <version>0.12.3</version>\n <scope>runtime</scope>\n</dependency>\n
Aproveitando esse ponto, vale a pena j\u00e1 incluir tamb\u00e9m no pom.xml
.
"},{"location":"handout/microservices/auth/#token","title":"Token","text":"Para gerar o token de acesso, no caso JWT, um servi\u00e7o foi criado, JwtService.java
.
Para gerar o JWT, alguns atributos s\u00e3o adicionados no application.yaml
.
application.yamlstore:\n jwt:\n issuer: \"In5pEr\"\n secretKey: \"\"\n duration: 31536000000 # 365 days in miliseconds\n
JwtService.javapackage store.auth;\n\nimport java.util.Date;\n\nimport javax.crypto.SecretKey;\n\nimport org.springframework.beans.factory.annotation.Value;\nimport org.springframework.stereotype.Service;\n\nimport io.jsonwebtoken.Claims;\nimport io.jsonwebtoken.ExpiredJwtException;\nimport io.jsonwebtoken.JwtParser;\nimport io.jsonwebtoken.Jwts;\nimport io.jsonwebtoken.io.Decoders;\nimport io.jsonwebtoken.security.Keys;\nimport jakarta.annotation.PostConstruct;\n\n@Service\npublic class JwtService {\n\n @Value(\"${store.jwt.secret-key}\")\n private String secretKey;\n\n @Value(\"${store.jwt.issuer}\")\n private String issuer;\n\n @Value(\"${store.jwt.duration}\")\n private long duration = 1l;\n\n private SecretKey key;\n private JwtParser parser;\n\n @PostConstruct\n public void init() {\n this.key = Keys.hmacShaKeyFor(Decoders.BASE64.decode(secretKey));\n this.parser = Jwts.parser().verifyWith(key).build();\n }\n\n public String create(String id, String name, String role) {\n String jwt = Jwts.builder()\n .header()\n .and()\n .id(id)\n .issuer(issuer)\n .subject(name)\n .signWith(key)\n .claim(\"role\", role)\n .notBefore(new Date())\n .expiration(new Date(new Date().getTime() + duration))\n .compact();\n return jwt;\n }\n\n public String getToken(String token) {\n final Claims claims = resolveClaims(token);\n return Token.builder\n .id(claims.getId())\n .role(claims.get(\"role\", String.class))\n .build();\n }\n\n private Claims resolveClaims(String token) {\n if (token == null) throw new io.jsonwebtoken.MalformedJwtException(\"token is null\");\n return validateClaims(parser.parseSignedClaims(token).getPayload());\n }\n\n private Claims validateClaims(Claims claims) throws ExpiredJwtException {\n if (claims.getExpiration().before(new Date())) throw new ExpiredJwtException(null, claims, issuer);\n if (claims.getNotBefore().after(new Date())) throw new ExpiredJwtException(null, claims, issuer);\n return claims;\n }\n\n}\n
"},{"location":"handout/microservices/auth/#docker","title":"Docker","text":"Adicione no docker-compose.yaml
o registro desse novo microsservi\u00e7o:
docker-compose.yaml auth:\n build:\n context: ../store.auth-resource/\n dockerfile: Dockerfile\n container_name: store-auth\n image: store-auth:latest\n # ports:\n # - 8080:8080\n environment:\n - eureka.client.service-url.defaultZone=http://store-discovery:8761/eureka/\n deploy:\n mode: replicated\n replicas: 1\n restart: always\n networks:\n - private-network\n depends_on:\n - discovery\n - account\n
NICE TO HAVE O projeto da disciplina pode ter um microsservi\u00e7o de registro que valide email ou SMS para criar a conta.
"},{"location":"handout/microservices/gateway/","title":"Gateway","text":"O gateway tem como fun\u00e7\u00e3o ser o \u00fanico ponto de entrada de todo o sistema, ele \u00e9 respons\u00e1vel por redirecionar todas as requisi\u00e7\u00f5es aos respectivos microsservi\u00e7os. Assim bem como, de autorizar ou negar acesso ao sistema baseando-se no token de seguran\u00e7a passado pela requisi\u00e7\u00e3o.
flowchart LR\n subgraph Client\n direction LR\n Web\n Mobile\n Desktop\n end\n subgraph Microservices\n direction LR\n gateway[\"Gateway\"]\n subgraph Essentials\n direction TB\n discovery[\"Discovery\"]\n auth[\"Auth\"]\n config[\"Configuration\"]\n end\n subgraph Businesses\n direction TB\n ms1[\"Service 1\"]\n ms2[\"Service 2\"]\n ms3[\"Service 3\"]\n end\n end\n Client --> lb[\"Load Balance\"] --> gateway --> Businesses\n gateway --> auth\n gateway --> discovery
"},{"location":"handout/microservices/gateway/#sequence-diagram","title":"Sequence Diagram","text":"sequenceDiagram\n autonumber\n actor User\n User->>Gateway: route(ServerHttpRequest)\n Gateway->>+AuthenticationFilter: filter(ServerWebExchange, GatewayFilterChain)\n AuthenticationFilter->>RouteValidator: isSecured.test(ServerHttpRequest)\n RouteValidator-->>AuthenticationFilter: True | False\n critical notSecured\n AuthenticationFilter->>Gateway: follow the flux\n end\n AuthenticationFilter->>AuthenticationFilter: isAuthMissing(ServerHttpRequest)\n critical isAuthMissing\n AuthenticationFilter->>User: unauthorized message\n end\n AuthenticationFilter->>AuthenticationFilter: validateAuthorizationHeader()\n critical isInvalidAuthorizationHeader\n AuthenticationFilter->>User: unauthorized message\n end\n AuthenticationFilter->>Auth: solve(Token)\n critical isInvalidToken\n Auth->>User: unauthorized message\n end\n Auth->>AuthenticationFilter: returns token claims\n AuthenticationFilter->>AuthenticationFilter: updateRequestHeader(ServerHttpRequest)\n AuthenticationFilter->>Gateway: follow the flux
pom.xml<dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-gateway</artifactId>\n</dependency>\n<dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-netflix-eureka-client</artifactId>\n</dependency>\n<dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-loadbalancer</artifactId>\n</dependency>\n<dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter-webflux</artifactId>\n</dependency>\n<!-- https://mvnrepository.com/artifact/com.github.ben-manes.caffeine/caffeine -->\n<dependency>\n <groupId>com.github.ben-manes.caffeine</groupId>\n <artifactId>caffeine</artifactId>\n <version>3.1.8</version>\n</dependency>\n<dependency>\n <groupId>insper.store</groupId>\n <artifactId>auth</artifactId>\n <version>0.0.1-SNAPSHOT</version>\n</dependency>\n
application.yamlspring:\n application:\n name: store-gateway\n cloud:\n discovery:\n locator:\n enabled: true\n gateway:\n routes:\n\n - id: auth\n uri: lb://store-auth\n predicates:\n - Path=/auth/**\n\n # - id: product\n # uri: lb://store-product\n # predicates:\n # - Path=/product/**\n\n default-filters:\n - DedupeResponseHeader=Access-Control-Allow-Credentials Access-Control-Allow-Origin\n globalcors:\n corsConfigurations:\n '[/**]':\n allowedOrigins: \"http://localhost\"\n allowedHeaders: \"*\"\n allowedMethods:\n - GET\n - POST\n\napi:\n endpoints:\n open: >\n POST /auth/register/,\n POST /auth/login/\n
GatewayConfiguration.javapackage insper.store.gateway;\n\nimport org.springframework.cloud.client.loadbalancer.LoadBalanced;\nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.Configuration;\nimport org.springframework.web.reactive.function.client.WebClient;\n\n@Configuration\npublic class GatewayConfiguration {\n\n @Bean\n @LoadBalanced\n public WebClient.Builder webClient() {\n return WebClient.builder();\n }\n\n}\n
RouterValidator.javapackage insper.store.gateway.security;\n\nimport java.util.List;\nimport java.util.function.Predicate;\n\nimport org.springframework.beans.factory.annotation.Value;\nimport org.springframework.http.server.reactive.ServerHttpRequest;\nimport org.springframework.stereotype.Component;\n\n@Component\npublic class RouterValidator {\n\n @Value(\"${api.endpoints.open}}\") \n private List<String> openApiEndpoints;\n\n public Predicate<ServerHttpRequest> isSecured =\n request -> openApiEndpoints\n .stream()\n .noneMatch(uri -> {\n String[] parts = uri.replaceAll(\"[^a-zA-Z0-9// ]\", \"\").split(\" \");\n return request.getMethod().toString().equalsIgnoreCase(parts[0])\n && request.getURI().getPath().equals(parts[1]);\n });\n\n}\n
AuthenticationFilter.javapackage insper.store.gateway.security;\n\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.cloud.gateway.filter.GatewayFilterChain;\nimport org.springframework.cloud.gateway.filter.GlobalFilter;\nimport org.springframework.http.HttpHeaders;\nimport org.springframework.http.HttpStatus;\nimport org.springframework.http.MediaType;\nimport org.springframework.http.server.reactive.ServerHttpRequest;\nimport org.springframework.stereotype.Component;\nimport org.springframework.web.reactive.function.client.WebClient;\nimport org.springframework.web.server.ResponseStatusException;\nimport org.springframework.web.server.ServerWebExchange;\n\nimport reactor.core.publisher.Mono;\nimport store.auth.IdIn;\nimport store.auth.IdOut;\n\n@Component\npublic class AuthenticationFilter implements GlobalFilter {\n\n private static final String HEADER_AUTHORIZATION = \"Authorization\";\n private static final String HEADER_BEARER = \"Bearer\";\n\n @Autowired\n private RouterValidator routerValidator;\n\n @Autowired\n private WebClient.Builder webClient;\n\n @Override\n public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) {\n ServerHttpRequest request = exchange.getRequest();\n if (!routerValidator.isSecured.test(request)) {\n return chain.filter(exchange);\n }\n if (!isAuthMissing(request)) {\n final String[] parts = this.getAuthHeader(request).split(\" \");\n if (parts.length != 2 || !parts[0].equals(HEADER_BEARER)) {\n throw new ResponseStatusException(HttpStatus.FORBIDDEN, \"Authorization header format must be Bearer {token}\");\n }\n final String token = parts[1];\n return webClient\n .defaultHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE)\n .build()\n .post()\n .uri(\"http://store-auth/auth/token/\")\n .bodyValue(new IdIn(token))\n .retrieve()\n .toEntity(IdOut.class)\n .flatMap(response -> {\n if (response != null && response.getBody() != null) {\n this.updateRequest(exchange, response.getBody().id());\n return chain.filter(exchange);\n } else {\n throw new ResponseStatusException(HttpStatus.UNAUTHORIZED, \"Invalid token\");\n }\n });\n }\n throw new ResponseStatusException(HttpStatus.UNAUTHORIZED, \"Missing authorization header\");\n }\n\n private String getAuthHeader(ServerHttpRequest request) {\n return request.getHeaders().getOrEmpty(HEADER_AUTHORIZATION).get(0);\n }\n\n private boolean isAuthMissing(ServerHttpRequest request) {\n return !request.getHeaders().containsKey(\"Authorization\");\n } \n\n private void updateRequest(ServerWebExchange exchange, String id) {\n exchange.getRequest().mutate()\n .header(\"id-user\", id)\n .build();\n }\n\n}\n
"},{"location":"handout/microservices/roadmap/","title":"Roadmap","text":""},{"location":"handout/microservices/roadmap/#microsservico","title":"Microsservi\u00e7o","text":"A fim de implementar microsservi\u00e7os em Spring Boot, aqui, \u00e9 proposto uma abordagem de modulariza\u00e7\u00e3o de cada microsservi\u00e7o, de forma que exista uma interface de comunica\u00e7\u00e3o Java a ser consumida por outros microsservi\u00e7os, tamb\u00e9m em Java, e tamb\u00e9m um compromisso de implementa\u00e7\u00e3o. Essa estrat\u00e9gia visa aumentar a produtividade do ambiente de desenvolvimento em Java, j\u00e1 que para o consumo da API por outros frameworks sempre ser\u00e1 necess\u00e1rio reescrever as assinaturas de cada endpoint.
"},{"location":"handout/microservices/roadmap/#modularizacao","title":"Modulariza\u00e7\u00e3o","text":"Crie dois projetos Mavens:
- um de interface, e;
- outro para o micro servi\u00e7o.
A vantagem dessa abordagem \u00e9 que a interface pode ser utilizada em outros projetos como uma biblioteca a ser consumida.
Exemplo de uso dessa abordagem no microsservi\u00e7o Account:
classDiagram\n namespace Interface {\n class AccountController {\n <<interface>>\n create(AccountIn)\n read(String id): AccountOut\n update(String id, AccountIn)\n delete(String id)\n findByEmailAndPassword(AccountIn)\n }\n class AccountIn {\n <<record>>\n String name\n String email\n String password\n }\n class AccountOut {\n <<record>>\n String id\n String name\n String email\n }\n }\n namespace Resource {\n class AccountResource {\n <<REST API>>\n -accountService\n }\n class AccountService {\n <<service>>\n -accountRepository\n create(Account)\n }\n class AccountRepository {\n <<nterface>>\n findByEmailAndHash(String, String)\n }\n class AccountModel {\n <<entity>>\n String id\n String name\n String email\n String hash\n }\n class Account {\n <<dto>>\n String id\n String name\n String email\n String password\n }\n }\n AccountController <|-- AccountResource\n AccountResource o-- AccountService\n AccountService o-- AccountRepository
"},{"location":"handout/microservices/roadmap/#interface","title":"Interface","text":"Para compilar e instalar a interface do microsservi\u00e7o, crie um pom.xml
espec\u00edfico para essa interface e seus dtos (AccountIn e AccountOut).
Installing the microservice interfacemvn clean install\n
"},{"location":"handout/microservices/roadmap/#implementacao","title":"Implementa\u00e7\u00e3o","text":"A implementa\u00e7\u00e3o n\u00e3o precisa ser instalada como biblioteca do reposit\u00f3rio Maven, pois \u00e9 apenas para execu\u00e7\u00e3o do microsservi\u00e7o. Por\u00e9m, o microsservi\u00e7o deve ter explic\u00edto a chamada da biblioteca de interface no seu pom.xml
.
<dependency>\n <groupId>insper.store</groupId>\n <artifactId>account</artifactId>\n <version>${project.version}</version>\n</dependency>\n
O comando para empacotar o microsservi\u00e7o \u00e9:
Packaging the microservicemvn clean package\n
Adicionalmente, para executar o microsservi\u00e7o:
Packaging and running the microservicemvn clean package spring-boot:run\n
"},{"location":"handout/microservices/roadmap/#banco-de-dados","title":"Banco de dados","text":"Muitos microsservi\u00e7os podem persistir seus dados em banco de dados. Cada microsservi\u00e7o \u00e9 respons\u00e1vel pelo acesso e grava\u00e7\u00e3o de seus dados de forma aut\u00f4noma.
Isso aumenta de forma significativa a complexidade do gerenciamento do microsservi\u00e7o, pois se torna necess\u00e1rio manter o gerenciamento da base de dados tais como: altera\u00e7\u00f5es, vers\u00f5es e roteiros de retornos.
O Flyway \u00e9 uma biblioteca que pode ser acoplado ao framework Spring Boot a fim de ajudar na tarefa de gerenciamento e cria\u00e7\u00e3o do sistema de persit\u00eancia dos dados do microsservi\u00e7o.
Para fazer uso dessa biblioteca, altere o pom.xml
adicionando a depend\u00eancia da biblioteca JPA assim bem como a depend\u00eancia da biblioteca Flyway.
<dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter-data-jpa</artifactId>\n</dependency>\n<dependency>\n <groupId>org.flywaydb</groupId>\n <artifactId>flyway-core</artifactId>\n</dependency>\n<dependency>\n <groupId>org.postgresql</groupId>\n <artifactId>postgresql</artifactId>\n <version>42.7.2</version>\n</dependency>\n
Altera\u00e7\u00f5es no arquivo de propriedades tamb\u00e9m s\u00e3o necess\u00e1rias, para definir o banco de dados e sua configura\u00e7\u00e3o JPA, assim bem como, a configura\u00e7\u00e3o do Flyway.
Exemplo baseado no microsservi\u00e7o Accountspring:\n datasource:\n url: ${DATABASE_URL:jdbc:postgresql://localhost:5432/store}\n username: ${DATABASE_USERNAME:store}\n password: ${DATABASE_PASSWORD:store123321}\n driver-class-name: org.postgresql.Driver\n flyway:\n baseline-on-migrate: true\n schemas: account\n jpa:\n properties:\n hibernate:\n default_schema: account\n
A estrutura de organiza\u00e7\u00e3o e execu\u00e7\u00e3o de scripts de banco de dados do Flyway \u00e9 persistido na seguinte hier\u00e1rquia de diret\u00f3rios, onde cada arquivo \u00e9 executado em ordem alfanum\u00e9rica.
exemplo\ud83d\udcc4 store.account\n\ud83d\udcc1 store.account-resource\n\u2514\u2500\u2500 \ud83d\udcc1 src\n \u2514\u2500\u2500 \ud83d\udcc1 main\n \u251c\u2500\u2500 \ud83d\udcc4 java\n \u2514\u2500\u2500 \ud83d\udcc1 resources\n \u251c\u2500\u2500 \ud83d\udcc1 db\n \u2502 \u2514\u2500\u2500 \ud83d\udcc1 migration\n \u2502 \u251c\u2500\u2500 \ud83d\udcc4 V2024.02.16.001__create_schema.sql\n \u2502 \u2514\u2500\u2500 \ud83d\udcc4 V2024.02.16.002__create_table_account.sql\n \u2514\u2500\u2500 \ud83d\udcc4 application.yaml\n
V2024.02.16.001__create_schema.sqlV2024.02.16.002__create_table_account.sql CREATE SCHEMA IF NOT EXISTS account;\n
CREATE TABLE account\n(\n id_account character varying(36) NOT NULL,\n tx_name character varying(256) NOT NULL,\n tx_email character varying(256) NOT NULL,\n tx_hash character varying(256) NOT NULL,\n CONSTRAINT account_pkey PRIMARY KEY (id_account)\n);\n
"},{"location":"handout/microservices/roadmap/#conectando-microsservicos-openfeign","title":"Conectando Microsservi\u00e7os - OpenFeign","text":"Nomeando o microsservi\u00e7o dentro do sistema de discovery.
<dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-openfeign</artifactId>\n</dependency>\n
@FeignClient(name = \"store-account\")\npublic interface AccountController {\n ...\n}\n
"},{"location":"handout/microservices/roadmap/#docker","title":"Docker","text":"Para cada microsservi\u00e7o Java Spring Cloud \u00e9 aconselh\u00e1vel criar um arquivo Dockerfile
no diret\u00f3rio raiz do projeto a fim de permitir a cria\u00e7\u00e3o adequada da imagem do microservi\u00e7o.
Typical Dockerfile for Java microserviceFROM openjdk:23-slim\nVOLUME /tmp\nCOPY target/*.jar app.jar\nENTRYPOINT [\"java\",\"-jar\",\"/app.jar\"]\n
"},{"location":"handout/microservices/roadmap/#docker-compose","title":"Docker Compose","text":"O Docker Compose permite criar um cluster com todos os microsservi\u00e7os neces\u00e1rios para o funcionamento de um sistema em uma rede apartada (nat).
Para criar um docker compose basta criar um arquivo de configura\u00e7\u00e3o chamado docker-compose.yaml
em uma pasta que possa acessar os demais microsservi\u00e7os, como uma pasta store.docker-platform.
exemplo\ud83d\udcc4 store.account\n\ud83d\udcc4 store.account-resource\n\ud83d\udcc1 store.docker-platform\n\u251c\u2500\u2500 \ud83d\udcc4 .env\n\u2514\u2500\u2500 \ud83d\udcc4 docker-compose.yaml\n
Dentro do arquivo, cada microsservi\u00e7o \u00e9 declarado e configurado, utilizando imagem que s\u00e3o criadas no momento de execu\u00e7\u00e3o do docker engine ou imagens que est\u00e3o dispon\u00edveis em algum diret\u00f3rio (eg.: DockerHub).
docker-compose.yaml# docker compose up -d --build --force-recreate\nversion: '3.8'\nname: store\n\nservices:\n\n db-store:\n container_name: store-db-store\n image: postgres:latest\n ports:\n - 5432:5432\n environment:\n - POSTGRES_USER=store\n - POSTGRES_PASSWORD=store\n - POSTGRES_DB=store\n volumes:\n - $VOLUME/postgres/store/data:/var/lib/postgresql/data\n restart: always\n networks:\n - private-network\n\n account:\n build:\n context: ../store.account-resource/\n dockerfile: Dockerfile\n image: store-account:latest\n environment:\n - spring.datasource.url=jdbc:postgresql://store-db-store:5432/store\n - spring.datasource.username=store\n - spring.datasource.password=store\n deploy:\n mode: replicated\n replicas: 1\n restart: always\n networks:\n - private-network\n depends_on:\n - db-store\n\nnetworks:\n private-network:\n driver: bridge\n
Arquivo de configura\u00e7\u00e3o de ambiente.
.envVOLUME=./volume\nCONFIG=./config\n
Na pasta do arquivo docker-compose.yaml
execute o comando docker para criar as imagens e subir os containers:
Rise up a clusterdocker compose up -d --build\n
Shutdown the clusterdocker compose down\n
Referencia:
"},{"location":"platform/circuit-breaker/","title":"Circuit Breaker","text":""},{"location":"platform/circuit-breaker/#spring-cloud-circuit-breaker","title":"Spring Cloud Circuit Breaker","text":"Spring Cloud Circuit Breaker is a service resilience pattern that allows you to provide default behavior when a network failure or any exception occurs while invoking a remote service. It's an abstraction over various circuit breaker implementations like Netflix Hystrix, Resilience4j, Sentinel, etc.
Key components of Spring Cloud Circuit Breaker include:
-
Dependency: To use Spring Cloud Circuit Breaker, you need to include the spring-cloud-starter-circuitbreaker-{implementation}
dependency in your project, where {implementation}
could be hystrix
, resilience4j
, sentinel
, etc.
-
Configuration: You can configure the circuit breaker parameters like failure threshold, delay time, etc. in the application.properties (or application.yml) file.
-
Usage: You can use the @CircuitBreaker
annotation on a method to apply the circuit breaker pattern. If the method throws an exception, the circuit breaker will open and provide a fallback method.
"},{"location":"platform/communcation/","title":"Communication","text":""},{"location":"platform/communcation/#synchronous-communication","title":"Synchronous Communication","text":""},{"location":"platform/communcation/#feign","title":"Feign","text":""},{"location":"platform/communcation/#asynchronous-communication","title":"Asynchronous Communication","text":""},{"location":"platform/communcation/#webclient","title":"WebClient","text":""},{"location":"platform/concepts/","title":"Concepts","text":""},{"location":"platform/concepts/#historical-context","title":"Historical Context","text":""},{"location":"platform/concepts/#single-block-system","title":"Single block system","text":"Single block system, following the concept that a system is a blackbox schema, so many projects started in a simple single project that is a good choice to raise a system and try to use the initial features. This is a good approach for small and compact systems or for specialist systems where the speed of application matters.
---\ntitle: blackbox\n---\nflowchart LR\n Input\n subgraph Processing\n direction TB\n Storage\n Business\n UI\n end\n Output\n Input --> UI --> Output
The main disadvantage of this approach is the strong coupling among business, user interface (UI), and storage. The coupling is so strong that there is a mix among all the components, which implies a high cost for maintenance.
"},{"location":"platform/concepts/#splitted-betweeen-data-and-program","title":"Splitted betweeen data and program","text":"---\ntitle: blackbox\n---\nflowchart LR\n subgraph Processing\n direction TB\n subgraph Storage\n x\n end\n subgraph Business\n UI\n end\n end\n Input --> UI --> Output\n Business <-- driver --> Storage
System communicates to only an UI.
Cobol
"},{"location":"platform/concepts/#multi-layer-approach","title":"Multi-layer approach","text":"---\ntitle: blackbox\n---\nflowchart LR\n Input\n subgraph Processing\n direction TB\n Storage\n subgraph _\n Businesses\n UI\n end\n end\n Output\n Input --> UI --> Output\n Business <-- driver --> Storage
"},{"location":"platform/concepts/#mvc-pattern","title":"MVC Pattern","text":"MVC stands for Model-View-Controller. It's a design pattern often used in web development. Here's a brief explanation of each component:
-
Model: This is the part of the system that handles the logic for the application data. Often model objects retrieve data (and store data) from a database.
-
View: This is the part of the system that handles the display of the data. Most often the views are created from the model data.
-
Controller: This is the part of the system that handles user interaction. Typically controllers read data from a view, control user input, and send input data to the model.
The idea behind MVC is that each of these components can be developed and tested independently, which can simplify the overall development process.
timeline\n title Relevant Events\n 1991 : CORBA\n 1994 : GoF\n 1999 : J2EE 1.2 <br> initial specification\n 2002 : Spring\n 2006 : Java EE 5\n 2014 : Spring Boot\n 2019 : Jakarta EE 8
"},{"location":"platform/concepts/#high-perfomance-architectures","title":"High-perfomance Architectures","text":"High-performance architectures refer to the design and configuration of computer systems, networks, and software to achieve optimal speed, responsiveness, throughput, and efficiency. These architectures are specifically tailored to handle large-scale, resource-intensive, and performance-critical workloads. High-performance systems are often employed in scenarios such as data centers, cloud computing environments, scientific computing, financial services, and other applications where speed and efficiency are paramount.
Here are key aspects and principles associated with high-performance architectures:
"},{"location":"platform/concepts/#parallelism-and-concurrency","title":"Parallelism and Concurrency","text":" - High-performance architectures often leverage parallelism and concurrency to execute multiple tasks simultaneously, improving overall throughput.
- Parallel processing involves dividing a task into smaller sub-tasks that can be processed concurrently, often across multiple processors or cores.
- Concurrency allows multiple tasks to be executed concurrently, even if they are not divided into explicit sub-tasks.
"},{"location":"platform/concepts/#distributed-systems","title":"Distributed Systems","text":" - Distributing workloads across multiple nodes in a network is a common strategy for achieving high performance.
- Distributed systems allow for horizontal scaling, where additional resources (nodes) can be added to handle increased demand.
"},{"location":"platform/concepts/#optimized-algorithms-and-data-structures","title":"Optimized Algorithms and Data Structures","text":" - Carefully designed algorithms and data structures are crucial for high performance.
- Efficient algorithms and data structures minimize computational complexity and memory usage.
"},{"location":"platform/concepts/#caching-and-memory-optimization","title":"Caching and Memory Optimization","text":" - Caching is used to store frequently accessed data in a location that allows faster retrieval, reducing the need to recompute or fetch data from slower storage.
- Memory optimization involves efficiently managing memory usage to minimize latency and improve responsiveness.
"},{"location":"platform/concepts/#scalability","title":"Scalability","text":" - High-performance architectures are designed to scale horizontally or vertically to accommodate growing workloads.
- Horizontal scalability involves adding more nodes or machines, while vertical scalability involves increasing the resources of individual nodes.
"},{"location":"platform/concepts/#load-balancing","title":"Load Balancing","text":" - Load balancing ensures that incoming requests are distributed evenly across multiple servers or resources.
- This helps prevent individual components from becoming bottlenecks and ensures optimal resource utilization.
"},{"location":"platform/concepts/#fault-tolerance-and-redundancy","title":"Fault Tolerance and Redundancy","text":" - High-performance architectures often incorporate redundancy and fault-tolerant mechanisms to ensure continuous operation in the face of hardware failures or network issues.
"},{"location":"platform/concepts/#specialized-hardware","title":"Specialized Hardware","text":" - In some cases, high-performance architectures may use specialized hardware, such as Graphics Processing Units (GPUs) or Field-Programmable Gate Arrays (FPGAs), to accelerate specific types of computations.
"},{"location":"platform/concepts/#optimized-network-architecture","title":"Optimized Network Architecture","text":" - Efficient communication between nodes is critical for high performance. Optimized network architectures, low-latency interconnects, and high-bandwidth connections contribute to overall system efficiency.
"},{"location":"platform/concepts/#monitoring-and-performance-tuning","title":"Monitoring and Performance Tuning","text":" - Continuous monitoring and performance tuning are essential to identify and address bottlenecks, optimize resource utilization, and ensure that the system is operating at peak efficiency.
"},{"location":"platform/concepts/#asynchronous-and-event-driven-design","title":"Asynchronous and Event-Driven Design","text":" - Asynchronous and event-driven architectures can improve system responsiveness by allowing components to operate independently and respond to events as they occur.
High-performance architectures are tailored to the specific requirements of the applications they support. They often involve a combination of hardware and software optimizations to achieve the desired level of performance for a given workload. It's important to note that designing and maintaining high-performance architectures can be complex and may involve trade-offs between factors such as cost, complexity, and ease of maintenance.
"},{"location":"platform/concepts/#cap-theorem","title":"CAP theorem","text":"CAP theorem, also known as Brewer's theorem, is a concept in distributed systems that addresses the trade-offs among three fundamental aspects: Consistency, Availability, and Partition Tolerance. It was introduced by computer scientist Eric Brewer in 2000. The CAP theorem suggests that in a distributed system, it is impossible to simultaneously achieve all three of these guarantees. A system can provide at most two out of the three.
Here are the key components of the CAP theorem:
"},{"location":"platform/concepts/#consistency-c","title":"Consistency (C)","text":" - Definition: Every read receives the most recent write or an error. In other words, all nodes in the system see the same data at the same time.
- Implication: Ensuring consistency means that any read operation on the system will reflect the most recent write, even in the presence of concurrent operations.
"},{"location":"platform/concepts/#availability-a","title":"Availability (A)","text":" - Definition: Every request for a read or write operation receives a response without the guarantee that it contains the most recent version of the data.
- Implication: An available system can provide a response to read or write requests even if it may not reflect the most recent update. The system is operational and accessible.
"},{"location":"platform/concepts/#partition-tolerance-p","title":"Partition Tolerance (P)","text":" - Definition: The system continues to operate even when network partitions occur, meaning that communication between nodes is lost or delayed.
- Implication: In a partition-tolerant system, the network can be unreliable or experience failures, and the system can still function.
According to the CAP theorem, a distributed system can prioritize at most two of these three guarantees, and the choice depends on the system's requirements and the nature of the application. Here are three possible scenarios:
- CA (Consistency and Availability): In scenarios where network partitions are rare and can be quickly resolved, a system may prioritize consistency and availability. This is common in traditional relational databases where consistency is crucial.
- CP (Consistency and Partition Tolerance): In scenarios where the network is unreliable, and partitions are frequent, a system may prioritize consistency and partition tolerance. This is common in systems that require strong consistency, such as many distributed databases.
- AP (Availability and Partition Tolerance): In scenarios where network partitions are common, and the system needs to remain operational, a system may prioritize availability and partition tolerance. This is common in systems where high availability and fault tolerance are critical, even if it means sacrificing strong consistency.
Source: Wikipedia - CAP Theorem It's important to note that the CAP theorem provides a theoretical framework for understanding trade-offs in distributed systems but does not prescribe specific solutions. Different systems may make different choices based on their specific requirements and use cases. Additionally, advancements in distributed systems research have led to the exploration of systems that aim to provide a balance between the three aspects, challenging the strict interpretation of the CAP theorem in some cases.
"},{"location":"platform/concepts/#scalability_1","title":"Scalability","text":"Scalability in the context of computer systems refers to the ability of a system to handle an increasing amount of work, or its potential to be enlarged to accommodate that growth. There are several types of scalability that are often discussed in the field of computing:
"},{"location":"platform/concepts/#vertical-scalability-scale-up","title":"Vertical Scalability (Scale-Up)","text":"Definition Vertical scalability involves adding more resources to a single node or machine in order to increase its capacity. Example Upgrading the CPU, adding more RAM, or increasing storage on a server. Pros Cons Simplicity in implementation. There's a limit to how much a single machine can be scaled vertically. it can be cost-effective for certain applications. It may also lead to downtime during upgrades."},{"location":"platform/concepts/#horizontal-scalability-scale-out","title":"Horizontal Scalability (Scale-Out)","text":"Definition Horizontal scalability involves adding more nodes or machines to a system, distributing the load across multiple machines. Example Adding more servers to a web application to handle increased traffic. Pros Cons Highly scalable, as resources can be easily added by adding more machines. Requires a distributed architecture. Can provide better fault tolerance. Some applications may not be easily parallelized."},{"location":"platform/concepts/#load-balancing_1","title":"Load Balancing","text":"Definition Load balancing involves distributing incoming network traffic or workload across multiple servers or resources to optimize resource utilization, maximize throughput, minimize response time, and avoid overloading any single resource. Example A load balancer distributing incoming web requests across multiple web servers. Pros Cons Improves overall system performance, ensures high availability, and can help with fault tolerance. Requires additional infrastructure, and the load balancer itself can become a potential bottleneck."},{"location":"platform/concepts/#elastic-scalability","title":"Elastic Scalability","text":"Definition Elastic scalability involves dynamically adjusting resources based on demand. Resources are automatically added or removed as needed. Example Cloud computing platforms that can automatically scale the number of virtual machines based on traffic. Pros Cons Efficient resource utilization, cost-effective as resources are only used when needed. Requires sophisticated monitoring and management systems."},{"location":"platform/concepts/#database-scalability","title":"Database Scalability","text":"Definition Database scalability refers to the ability of a database to handle an increasing amount of data and transactions. Vertical Database Scalability: Adding more resources to a single database server (e.g., increasing CPU, RAM). Horizontal Database Scalability: Distributing the database across multiple servers (e.g., sharding or partitioning). Pros Cons Can improve performance and handle increased data loads. Complex to implement, and horizontal scalability may require changes to the database schema."},{"location":"platform/concepts/#caching","title":"Caching","text":"Definition Caching involves storing frequently accessed data in a cache to reduce the need to fetch the same data from the original source repeatedly. Example Caching frequently used database queries or the results of computationally expensive operations. Pros Cons Improves response time, reduces load on backend systems. May lead to stale data if not managed properly. Each type of scalability has its own strengths and weaknesses, and the choice of scalability approach depends on the specific requirements and constraints of the system or application being developed. Often, a combination of these scalability types is employed to achieve optimal performance and resource utilization.
"},{"location":"platform/concepts/#design-patterns","title":"Design Patterns","text":"A design pattern in software development is a general, reusable solution to a common problem that occurs in a particular context within a software design. It's a template or a best practice that addresses a specific design or programming problem. Design patterns aren't complete solutions by themselves; rather, they provide a blueprint for solving certain types of problems.
The concept of design patterns was popularized by the book \"Design Patterns: Elements of Reusable Object-Oriented Software,\" written by Erich Gamma, Richard Helm, Ralph Johnson, and John Vlissides, often referred to as the \"Gang of Four\" (GoF)1. The book categorizes design patterns into three main types:
- Creational Patterns: These patterns deal with object creation mechanisms, trying to create objects in a manner suitable to the situation. Examples include the Singleton pattern, Factory Method pattern, and Abstract Factory pattern.
- Structural Patterns: These patterns focus on the composition of classes or objects. They help in creating a structure of classes and objects, making it easier to form larger structures. Examples include the Adapter pattern, Decorator pattern, and Composite pattern.
- Behavioral Patterns: Behavioral patterns are concerned with the interaction and responsibility of objects. They define communication patterns between objects and the responsibility of one object in a given situation. Examples include Observer pattern, Strategy pattern, and Command pattern.
Design patterns provide several benefits in software development:
- Reusability: Design patterns promote reusability of solutions to common problems. Once a design pattern is established, it can be applied to similar problems in different parts of the system.
- Scalability: Using design patterns can enhance the scalability of a system by providing proven solutions that can be applied as the system grows.
- Maintainability: Patterns make code more maintainable by providing a clear and organized structure. Developers familiar with design patterns can understand the overall architecture more easily.
- Common Vocabulary: Design patterns establish a common vocabulary for developers. When a developer mentions a particular pattern, others who are familiar with it can quickly understand the solution being implemented.
While design patterns are valuable tools, it's essential to use them judiciously. Not every problem requires a design pattern, and using patterns unnecessarily can lead to overly complex and difficult-to-maintain code. It's important to understand the problem at hand and choose the appropriate design pattern when it genuinely adds value to the solution.
-
GAMMA, E.; HELM, R.; JOHNSON, R., VLISSIDES, J., Design Patterns: Elements of Reusable Object-Oriented Software, 1\u00aa ed., Addison-Wesley Professional, 1994.\u00a0\u21a9
-
Wikipedia - CAP Theorem \u21a9
-
Gang of Four - Gof \u21a9
"},{"location":"platform/config/","title":"Config","text":""},{"location":"platform/config/#spring-cloud-config","title":"Spring Cloud Config","text":"Spring Cloud Config provides server-side and client-side support for externalized configuration in a distributed system. With the Config Server, you have a central place to manage external properties for applications across all environments.
Key components of Spring Cloud Config include:
-
Config Server: A standalone server that provides a REST API for providing configuration properties to clients. The server is embeddable in a Spring Boot application, by using the @EnableConfigServer
annotation. The properties can be stored in various types of repositories (Git, SVN, filesystem, etc.).
-
Config Client: A library for Spring Boot applications. It fetches the configuration properties from the Config Server and bootstrap them into the application's context. It's included in the classpath by adding the spring-cloud-starter-config
dependency.
-
Refresh Scope: Spring Cloud Config includes a RefreshScope
capability which allows properties to be reloaded without restarting the application. You can expose a /refresh
endpoint in your application that, when invoked, will cause the application to re-fetch properties from the Config Server.
Spring Cloud Config Server
"},{"location":"platform/discovery/","title":"Discovery","text":"Spring Cloud Discovery is a module in the Spring Cloud framework that provides a way for services to discover and communicate with each other in a distributed system. It helps manage the dynamic nature of microservices by allowing them to register themselves and discover other services without hardcoding their locations.
In a distributed system, services often need to communicate with each other to fulfill their functionalities. However, the locations of these services may change frequently due to scaling, failures, or deployments. Spring Cloud Discovery solves this problem by providing a service registry where services can register themselves and provide information about their location, such as IP address and port.
The service registry acts as a central database of all the services in the system. When a service needs to communicate with another service, it can query the service registry to obtain the necessary information. This allows services to be decoupled from each other and eliminates the need for hardcoding service locations in the code.
Spring Cloud Discovery supports multiple service registry implementations, such as Netflix Eureka, Consul, and ZooKeeper. These implementations provide additional features like service health checks, load balancing, and failover.
To use Spring Cloud Discovery, you need to include the necessary dependencies in your project and configure the service registry implementation you want to use. Then, you can annotate your services with @EnableDiscoveryClient to enable service registration and discovery. Spring Cloud Discovery will automatically register your services with the service registry and provide a client library to query the registry for service information.
Here's an example of how you can use Spring Cloud Discovery with Netflix Eureka:
@SpringBootApplication\n@EnableDiscoveryClient\npublic class MyServiceApplication {\n public static void main(String[] args) {\n SpringApplication.run(MyServiceApplication.class, args);\n }\n}\n
In this example, the @EnableDiscoveryClient annotation enables service registration and discovery using the configured service registry. When the application starts, it will register itself with the service registry and be discoverable by other services.
Overall, Spring Cloud Discovery simplifies the process of service discovery and communication in a distributed system, making it easier to build and maintain microservices architectures.
"},{"location":"platform/gateway/","title":"Gateway","text":""},{"location":"platform/gateway/#concepts","title":"Concepts","text":"The Gateway design pattern is a structural design pattern that provides a centralized entry point for handling requests from external systems. It acts as a mediator between the client and the server, allowing the client to make requests to multiple services through a single interface.
In the context of software development, a gateway acts as an intermediary between the client and the backend services. It abstracts away the complexity of interacting with multiple services by providing a unified API for the client to communicate with.
The main benefits of using the Gateway design pattern include:
-
Simplified client code: The client only needs to interact with the gateway, which handles the routing and communication with the appropriate backend services. This reduces the complexity and coupling in the client code.
-
Centralized cross-cutting concerns: The gateway can handle common concerns such as authentication, authorization, rate limiting, caching, and logging in a centralized manner. This eliminates the need to implement these features in each individual service.
-
Scalability and flexibility: The gateway can distribute requests to multiple instances of backend services, allowing for horizontal scaling. It also provides the flexibility to add or remove backend services without affecting the client code.
-
Protocol translation: The gateway can handle protocol translation, allowing clients to use different protocols (e.g., HTTP, WebSocket) while the backend services can use a different protocol.
-
Service aggregation: The gateway can aggregate data from multiple backend services and provide a unified response to the client. This reduces the number of requests made by the client and improves performance.
To implement the Gateway design pattern, various technologies and frameworks can be used, such as Spring Cloud Gateway, Netflix Zuul, or NGINX. These tools provide features like routing, load balancing, and request filtering, making it easier to build a robust and scalable gateway.
In summary, the Gateway design pattern provides a centralized entry point for handling requests from clients and abstracts away the complexity of interacting with multiple backend services. It simplifies client code, centralizes cross-cutting concerns, and provides scalability and flexibility in a distributed system architecture.
"},{"location":"platform/gateway/#spring-cloud-gateway","title":"Spring Cloud Gateway","text":"https://spring.io/projects/spring-cloud-gateway/
"},{"location":"platform/load-balancing/","title":"Load Balancing","text":""},{"location":"platform/load-balancing/#spring-cloud-loadbalancer","title":"Spring Cloud LoadBalancer","text":"Spring Cloud LoadBalancer is a generic abstraction over load balancing algorithms that you can use with service discovery clients like Eureka, Consul, and Zookeeper. It provides a round-robin load balancing implementation by default, but you can also implement your own custom load balancing algorithms.
Key components of Spring Cloud LoadBalancer include:
-
Dependency: To use Spring Cloud LoadBalancer, you need to include the spring-cloud-starter-loadbalancer
dependency in your project.
-
Configuration: By default, Spring Cloud LoadBalancer uses a simple round-robin strategy for load balancing. If you want to customize this, you can create a bean of type ServiceInstanceListSupplier
that returns a custom list of instances for load balancing.
-
Usage: You can use the @LoadBalanced
annotation on a RestTemplate
or WebClient.Builder
bean to integrate it with Spring Cloud LoadBalancer. When you make a request through this client, it will automatically be load balanced.
"},{"location":"platform/microservices/","title":"Microservices","text":""},{"location":"platform/microservices/#microservices-concepts","title":"Microservices Concepts","text":"Microservices, also known as the microservices architecture, is an architectural style that structures an application as a collection of small autonomous services, modeled around a business domain.
Key concepts of microservices include:
- Single Responsibility: Each microservice should have a single responsibility and should implement a single business capability.
- Independence: Microservices should be able to run and evolve independently of each other. They should be independently deployable and scalable.
- Decentralization: Microservices architecture favors decentralized governance. Teams have the freedom to choose the best technology stack that suits their service.
- Isolation of Failures: If a microservice fails, it should not impact the availability of other services.
- Data Isolation: Each microservice should have its own database to ensure that the services are loosely coupled and can evolve independently.
- Communication: Microservices communicate with each other through well-defined APIs and protocols, typically HTTP/REST with JSON or gRPC with Protobuf.
- Infrastructure Automation: Due to the distributed nature of the microservices architecture, automation of infrastructure is a must. This includes automated provisioning, scaling, and deployment.
- Observability: With many different services, it's important to have excellent monitoring and logging to detect and diagnose problems.
"},{"location":"platform/microservices/#domain-driven-design","title":"Domain Driven Design","text":"Domain-Driven Design (DDD) is a software development approach that emphasizes collaboration between technical experts and domain experts. The goal is to create software that is a deep reflection of the underlying domain, which is the specific area of business or activity that the software is intended to support.
Key concepts of DDD include:
- Ubiquitous Language: A common language established between developers and domain experts, used to describe all aspects of the domain.
- Bounded Context: A boundary within which a particular model is defined and applicable.
- Entities: Objects that have a distinct identity that persists over time and across different representations.
- Value Objects: Objects that are defined by their attributes, not their identity.
- Aggregates: Clusters of entities and value objects that are treated as a single unit.
- Repositories: They provide a way to obtain references to aggregates.
- Domain Events: Events that domain experts care about.
- Services: Operations that don't naturally belong to any entity or value object.
By focusing on the domain and domain logic, DDD provides techniques to develop complex systems targeting real-world scenarios. It helps to reduce the complexity by dividing the system into manageable and interconnected parts.
Source: System Design 101 - Microservice Architecture"},{"location":"platform/microservices/#design-a-microservice-platform","title":"Design a Microservice Platform","text":"flowchart LR\n subgraph Client\n direction LR\n Web\n Mobile\n Desktop\n end\n subgraph Microservices\n direction LR\n gateway[\"Gateway\"]\n subgraph Essentials\n direction TB\n discovery[\"Discovery\"]\n auth[\"Auth\"]\n config[\"Configuration\"]\n end\n subgraph Businesses\n direction TB\n ms1[\"Service 1\"]\n ms2[\"Service 2\"]\n ms3[\"Service 3\"]\n end\n end\n Client --> lb[\"Load Balance\"] --> gateway --> Businesses\n gateway --> auth\n gateway --> discovery\n click gateway \"../gateway/\" \"Gateway\"\n click discovery \"../discovery/\" \"Discovery\"\n click auth \"../auth-service/\" \"Auth\"\n click config \"../config/\" \"Configuration\"\n click lb \"../load-balancing/\" \"Load Balance\"
"},{"location":"platform/microservices/#containering","title":"Containering:","text":"Many microservices implies in many ports, then a complicated environment to manage
"},{"location":"platform/microservices/#gateway","title":"Gateway","text":""},{"location":"platform/microservices/#discovery","title":"Discovery","text":""},{"location":"platform/microservices/#communication","title":"Communication","text":" -
XU, A., System Design 101.\u00a0\u21a9
-
Wikipedia - Domain Driven Design \u21a9
"},{"location":"platform/payment/","title":"Payment","text":"FinOps
"},{"location":"platform/database/caching/","title":"Caching","text":"Spring Boot Cache
https://docs.spring.io/spring-framework/docs/4.1.5.RELEASE/spring-framework-reference/html/cache.html
Redis
https://medium.com/nstech/programa%C3%A7%C3%A3o-reativa-com-spring-boot-webflux-e-mongodb-chega-de-sofrer-f92fb64517c3
"},{"location":"platform/database/caching/#handout-redis","title":"Handout Redis","text":""},{"location":"platform/database/flyway/","title":"Flyway","text":""},{"location":"platform/database/flyway/#flyway","title":"Flyway","text":"Flyway is an open-source database migration tool that strongly favors simplicity and convention over configuration. It is designed to simplify the process of versioning a database, similar to how Git versions source code.
With Flyway, you can apply version control to your database which allows you to migrate it to a newer version and also revert changes if needed. Flyway uses SQL scripts or Java-based migrations to evolve your database schema in a way that is controllable and predictable.
Key features of Flyway include:
- Version control for your database: Allows you to track changes and apply version control to your database, similar to how you would with your source code.
- Support for SQL and Java-based migrations: You can use SQL for simple changes, and Java for complex migrations.
- Repeatable migrations: You can use this feature to manage objects in your database that can't be easily handled with versioned migrations, like stored procedures and views.
- Multiple database support: Flyway supports a wide variety of databases including MySQL, PostgreSQL, SQL Server, and more.
https://www.baeldung.com/liquibase-vs-flyway
-
https://www.baeldung.com/database-migrations-with-flyway\u00a0\u21a9
"},{"location":"platform/messaging/concepts/","title":"Concepts","text":"https://medium.com/@thiagolenz/tutorial-spring-boot-e-rabbitmq-como-fazer-e-porqu%C3%AA-4a6cc34a3bd1
https://www.simplilearn.com/kafka-vs-rabbitmq-article
https://mmarcosab.medium.com/criando-consumer-e-produkafka-com-spring-boot-b427cc2f841d
"},{"location":"platform/observability/logging/","title":"Logging","text":""},{"location":"platform/observability/monitoring/","title":"Monitoring","text":""},{"location":"platform/security/concepts/","title":"Concepts","text":"Security is an important aspect of software development. It involves protecting the confidentiality, integrity, and availability of data and resources. Two key concepts in security are authentication and authorization.
"},{"location":"platform/security/concepts/#authentication","title":"Authentication","text":"Authentication is the process of verifying the identity of a user or system. It ensures that the user or system is who they claim to be. Common authentication methods include passwords, biometrics, and two-factor authentication. The system checks these credentials against the stored data. If the credentials are valid, the system confirms the user's identity.
In many systems, after successful authentication, the system generates a token. This token is a piece of data that represents the user's authentication session. It's like a digital ticket that proves the user's identity for a certain period of time.
This token is then sent back to the user. The user's client software (like a web browser) stores this token and sends it along with every subsequent request to the server (in case of stateless server). This way, the server knows that the request comes from an authenticated user without needing to ask for the credentials again.
Here's a simplified step-by-step process:
sequenceDiagram\n autonumber\n actor User\n User->>+Auth Server: authentication(credentials)\n Auth Server->>Auth Server: verifies credenditals and generates a token\n Auth Server->>-User: returns the token\n User->>User: stores the token to use for the next requests
- The user sends their username and password (or other credentials) to the server;
- The server verifies the credentials. If they're valid, the server generates a token.
- The server sends this token back to the user.
- The user's client software stores this token.
- For every subsequent request, the client sends this token along with the request.
- The server checks the token to ensure it's valid and hasn't expired.
- This token-based authentication process is commonly used in many modern web applications and APIs. It helps maintain the user's session and allows the server to authenticate requests without storing the user's state.
"},{"location":"platform/security/concepts/#authorization","title":"Authorization","text":"Authorization is the process of granting or denying access to specific resources or actions based on the authenticated user's privileges. It determines what a user is allowed to do within a system. Authorization can be role-based, where permissions are assigned based on predefined roles, or attribute-based, where permissions are based on specific attributes of the user.
In many systems, the token not only represents the user's identity, but also includes information about their permissions or roles. This is often done using a type of token called a JSON Web Token (JWT), which can include a payload of data.
Here's a simplified step-by-step process:
sequenceDiagram\n autonumber\n actor User\n User->>Auth Server: request with token\n Auth Server->>Auth Server: decodes the token and extracts claims\n Auth Server->>Auth Server: verifies permissions\n critical allowed\n Auth Server->>Secured Resource: authorizes the request\n Secured Resource->>User: returns the response\n option denied\n Auth Server-->>User: unauthorized message\n end
- After authentication, the user's client software sends a request to a server. This request includes the token.
- The server decodes the token and extracts the user's identity and permissions.
- The server checks whether the user has the necessary permissions for the requested action. This could involve checking the user's roles or other attributes against the requirements for the action.
- If the user has the necessary permissions, the server allows the action. If not, the server denies the action.
This process allows the server to authorize actions without needing to repeatedly look up the user's permissions. It also allows for stateless servers, as the necessary information is included in every request.
By implementing strong authentication and authorization mechanisms, software systems can ensure that only authorized users have access to sensitive data and functionalities, reducing the risk of unauthorized access and potential security breaches.
As the platform has only one entrace point, it is
JWT is a decentralized
The point of entrance of API is the gateway, then as suggested by 1.
"},{"location":"platform/security/concepts/#auth-service","title":"Auth Service","text":" - Responsabilities:
- Registration:
- Authentication:
- Authorization:
Two Maven Projects
-
Interfaces
-
Implemmentation: resource
classDiagram\n namespace Interface {\n class AuthController {\n <<interface>>\n register(RegisterIn)\n authenticate(CredentialIn)\n identify(String)\n }\n class RegisterIn {\n <<record>>\n String firstName\n String lastName\n String email\n String password\n }\n class CredentialIn {\n <<record>>\n String email\n String password\n }\n }\n namespace Resource {\n class AuthResource {\n <<REST API>>\n -authService\n }\n class AuthService {\n <<service>>\n -registerRepository\n -userRepository\n register(Register)\n authenticate(Credential)\n identify(Session)\n }\n class RegisterRepository {\n <<interface>>\n }\n class RegisterEntity {\n <<entity>>\n }\n class UserRepository {\n <<interface>>\n }\n class UserEntity {\n <<entity>>\n }\n }\n AuthController <|-- AuthResource\n AuthResource o-- AuthService\n AuthService o-- RegisterRepository\n AuthService o-- UserRepository\n RegisterRepository \"1\" --> \"0..*\" RegisterEntity\n UserRepository \"1\" --> \"0..*\" UserEntity
"},{"location":"platform/security/concepts/#addtional-material","title":"Addtional Material","text":" -
JSON Web Token
-
Autentica\u00e7\u00e3o e Autoriza\u00e7\u00e3o com Spring Security e JWT Tokens by Fernanda Kipper
-
DELANTHA, R., Spring Cloud Gateway security with JWT, 2023.\u00a0\u21a9
"},{"location":"platform/security/jwt/","title":"JWT","text":""},{"location":"platform/security/jwt/#jwt-json-web-token","title":"JWT - JSON Web Token","text":"JWT stands for JSON Web Token. It is a compact, URL-safe means of representing claims between two parties. JWTs are commonly used to secure the transmission of information between parties in a web environment, typically for authentication and information exchange. The JWT specification is defined by RFC 75191 and it is a decentralized approach for security (which can support horizontal scalability).
Here are the key components and concepts of JWT:
- JSON Format: JWTs are represented as JSON objects that are easy to parse and generate. The JSON format makes them human-readable and easy to work with.
-
Three Parts: JWTs consist of three parts separated by dots (.
): Header, Payload, and Signature.
-
Header: The header typically consists of two parts: the type of the token (JWT) and the signing algorithm being used, such as HMAC SHA256 or RSA.
-
Payload: The payload contains the claims. Claims are statements about an entity (typically, the user) and additional data. There are three types of claims: registered, public, and private claims.
-
Signature: To create the signature part, you take the encoded header, the encoded payload, a secret, the algorithm specified in the header, and sign that.
-
Encoding: Each of the three parts is Base64Url encoded, and the resulting strings are concatenated with periods between them. The final JWT looks like: xxxxx.yyyyy.zzzzz
.
- Stateless and Self-contained: JWTs are stateless, meaning that all the information needed is within the token itself. The server doesn't need to store the user's state. They are also self-contained, meaning that all the information needed is contained within the token.
- Use Cases: JWTs are commonly used for authentication and information exchange between parties. For example, after a user logs in, a server could generate a JWT and send it to the client. The client can then include the JWT in the headers of subsequent requests to access protected resources. The server can verify the authenticity of the JWT using the stored secret key.
- Security Considerations: While JWTs are widely used and versatile, it's important to handle them securely. For instance, the key used to sign the JWT should be kept secret, and HTTPS should be used to transmit JWTs to prevent man-in-the-middle attacks.
Here's a simple example of a JWT created on JWT Builder2:
eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzUxMiJ9.eyJpc3MiOiJJbnNwZXIiLCJpYXQiOjE3MDMwMDgzMzgsImV4cCI6MjAxODU0MTEzOCwiYXVkIjoid3d3Lmluc3Blci5lZHUuYnIiLCJzdWIiOiJodW1iZXJ0b3JzQGluc3Blci5lZHUuYnIiLCJHaXZlbk5hbWUiOiJIdW1iZXJ0byIsIlN1cm5hbWUiOiJTYW5kbWFubiIsIkVtYWlsIjoiaHVtYmVydG9yc0BpbnNwZXIuZWR1LmJyIiwiUm9sZSI6IlByb2Zlc3NvciJ9.SsGdvR5GbYWTRbxY7IGxHt1vSxhkpRueBJWsi0lrPhJVCICp119QjU8F3QvHW0yF5tw-HhQ9RVh0l89t4M0LNw
This JWT consists of three parts, decoded by 3:
HeaderPayloadSignature eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzUxMiJ9
{\n \"typ\": \"JWT\",\n \"alg\": \"HS512\"\n}\n
eyJpc3MiOiJJbnNwZXIiLCJpYXQiOjE3MDMwMDgzMzgsImV4cCI6MjAxODU0MTEzOCwiYXVkIjoid3d3Lmluc3Blci5lZHUuYnIiLCJzdWIiOiJodW1iZXJ0b3JzQGluc3Blci5lZHUuYnIiLCJHaXZlbk5hbWUiOiJIdW1iZXJ0byIsIlN1cm5hbWUiOiJTYW5kbWFubiIsIkVtYWlsIjoiaHVtYmVydG9yc0BpbnNwZXIuZWR1LmJyIiwiUm9sZSI6IlByb2Zlc3NvciJ9
{\n \"iss\": \"Insper\",\n \"iat\": 1703008338,\n \"exp\": 2018541138,\n \"aud\": \"www.insper.edu.br\",\n \"sub\": \"humbertors@insper.edu.br\",\n \"GivenName\": \"Humberto\",\n \"Surname\": \"Sandmann\",\n \"Email\": \"humbertors@insper.edu.br\",\n \"Role\": \"Professor\"\n}\n
SsGdvR5GbYWTRbxY7IGxHt1vSxhkpRueBJWsi0lrPhJVCICp119QjU8F3QvHW0yF5tw-HhQ9RVh0l89t4M0LNw
HMACSHA512(\n base64UrlEncode(header) + \".\" +\n base64UrlEncode(payload),\n qwertyuiopasdfghjklzxcvbnm123456,\n)\n
JWTs are widely used in web development due to their simplicity, flexibility, and support across various programming languages and frameworks. They are commonly used in token-based authentication systems.
"},{"location":"platform/security/jwt/#addtional-material","title":"Addtional Material","text":" -
Spring Cloud Security
-
ByeteByteGo - Why is JWT popular?
-
RFC 7519 - JSON Web Token (JWT), 2015.\u00a0\u21a9
-
JWT - Builder.\u00a0\u21a9
-
jwt.io - JWT Verification.\u00a0\u21a9
-
Unix Time Stamp - Epoch Converter.\u00a0\u21a9
-
DELANTHA, R., Spring Cloud Gateway security with JWT, 2023.\u00a0\u21a9
-
Wikipedia - Pepper (cryptography).\u00a0\u21a9
-
PGzlan, Serve your hash with Salt and Pepper for Stronger Account Security, 2023.\u00a0\u21a9
"},{"location":"platform/security/oauth2/","title":"OAuth2","text":"OAuth2 is an authorization framework that allows applications to obtain limited access to user accounts on an HTTP service, such as Facebook, Google, or GitHub, without exposing the user's credentials. It provides a secure and standardized way for users to grant access to their resources to third-party applications.
The principal process to obtain a valid credential using OAuth2 involves the following steps:
-
Registration: The application developer needs to register their application with the OAuth2 provider (e.g., Google, Facebook) to obtain client credentials, including a client ID and client secret. These credentials are used to identify and authenticate the application.
-
User Authorization: When a user wants to grant access to their resources, the application redirects them to the OAuth2 provider's authorization endpoint. This typically involves the user being presented with a login screen and being asked to grant permission to the application.
-
Authorization Grant: Once the user grants permission, the OAuth2 provider issues an authorization grant to the application. This grant can take various forms, such as an authorization code or an access token.
-
Token Exchange: The application then exchanges the authorization grant for an access token by sending a request to the OAuth2 provider's token endpoint. The access token is a credential that the application can use to access the user's resources on behalf of the user.
-
Accessing Resources: With the access token, the application can make requests to the OAuth2 provider's API endpoints to access the user's resources. The access token is typically included in the request headers or as a query parameter.
-
Refreshing Tokens: Access tokens have a limited lifespan. To continue accessing the user's resources, the application can use a refresh token (if provided) to obtain a new access token without requiring the user to reauthorize the application.
It's important to note that the exact process and terminology may vary slightly depending on the OAuth2 provider and the specific implementation. However, the general flow remains consistent across most OAuth2 implementations.
"},{"location":"versions/2024.1/","title":"2024.1","text":"Info Prof. Humberto Sandmann
humbertors@insper.edu.br
Students
Meetings
Evento Dia In\u00edcio T\u00e9rmino Aula Qua. 09h45 11h45 Aula Sex. 07h30 09h30 Atendimento Seg. 12h00 13h30 Grades
FinalIndividualTeam \\[ \\text{Final Grade} = \\left\\{\\begin{array}{lll} \\text{Individual} \\geq 5 \\bigwedge \\text{Team} \\geq 5 & \\implies & \\displaystyle \\frac{ \\text{Individual} + \\text{Team} } {2} \\\\ \\\\ \\text{Otherwise} & \\implies & \\min\\left(\\text{Individual}, \\text{Team}\\right) \\end{array}\\right. \\] Avalia\u00e7\u00e3o Descri\u00e7\u00e3o Data Nota (%) Roteiros M\u00e9dia aritm\u00e9tica dos 2 roteiros de maiores notas. 60.0 Roteiro 1 Testes - Roteiro 2 Bottlenecks 22.mai Roteiro 3 Cloud 22.mai Participa\u00e7\u00e3o Nota geral atribu\u00edda ao grupo distribu\u00edda aos membros pelo pr\u00f3prio grupo, apenas notas inteiras \\([0; 10]\\) 40.0 Avalia\u00e7\u00e3o Descri\u00e7\u00e3o Data Nota (%) Checkpoints CP1 Montar um Spring Cloud 05.abr 7.5 CP2 Testes e Pipeline 19.abr 7.5 CP3 K8s 10.mai 7.5 CP4 Platform as a Product 22.mai 7.5 Apresenta\u00e7\u00e3o 10.0 Projeto 60.0 Individual
Roteiro 1Roteiro 2Roteiro 3Participa\u00e7\u00e3o Testes
- Roteiros de testes de funcionalidades ou de testes de carga
- Documenta\u00e7\u00e3o dos resultados obtidos
Bottlenecks
-
Implementa\u00e7\u00e3o de um microservi\u00e7o de bottleneck para o projeto:
- Mensageria
- RabbitMQ
- Kafka
- Spring e Kafka, Giuliana Bezerra
- Resili\u00eancia
- Spring Cloud Circuit Breaker
- Configura\u00e7\u00e3o
- Spring Cloud Config
- In-Memory Database
- Redis, Giuliana Bezerra
- Payments (sandboxes)
- PayPal
- Hearland
- Mercado Pago
- Jenkins
- SonarQube
- Dependency Analyzes
Cloud
- Roteiro de publica\u00e7\u00e3o de um microsservi\u00e7o em Cloud
- Contribui\u00e7\u00f5es no GitHub dos participantes
- Documenta\u00e7\u00e3o das reuni\u00f5es (dayly, retro, etc)
- Nota geral atribu\u00edda pelo professor mas dividida pelo grupo
Team
Checkpoint 1Checkpoint 2Checkpoint 3Checkpoint 4Apresenta\u00e7\u00e3oProjeto Desenvolvimento Spring Cloud
- Servi\u00e7o de discovery
- Servi\u00e7o de gateway
- Servi\u00e7o de autentica\u00e7\u00e3o e autoriza\u00e7\u00e3o
- 3 microsservi\u00e7os com persist\u00eancia de dados
- Comunica\u00e7\u00e3o entre, ao menos 2, microsservi\u00e7os, al\u00e9m de: Gateway \\(\\rightarrow\\) Auth \\(\\rightarrow\\) Account
- Monitoramento com dashboard de microsservi\u00e7os
- Documenta\u00e7\u00e3o das APIs padr\u00e3o Swagger
- Cluster em Docker Compose para deploy dos microsservi\u00e7os
Testes e Pipeline
- Plano de testes
- Script Jenkins - Pipeline as Code
K8s
- Release no Minikube
- Scripts declarativos dos servi\u00e7os
Platform as a Service
- Plano de uso da plataforma como um produto (PaaS)
- Vislumbrar uso da plataforma por terceiros
- Storytelling (come\u00e7o, meio, fim)
- Flu\u00eddez
- Qualidade do material apresentado
- Tempo
- Participa\u00e7\u00e3o
- Checkpoint 1
- Checkpoint 2
- Checkpoint 3
- Checkpoint 4
- Planejamento
- Documenta\u00e7\u00e3o (markdown)
- Frontend (funcionalidades b\u00e1sicas: login, registro, dashboard, etc)
Planning
"},{"location":"versions/2024.1/#repositories","title":"Repositories","text":"Dev
Microservice Context Interface Service Discovery Infra platform.241.store.discovery Gateway Infra platform.241.store.gateway Postgres Database platform.241.store.db Account Business platform.241.store.account platform.241.store.account-resource Auth Business platform.241.store.auth platform.241.store.auth-resource Ops
Description Repositories Commands Docker Compose API platform.241.store.docker-api docker compose up --build
docker compose down
Jenkins Pipelines platform.241.store.ops docker compose up --build
docker compose down
http://localhost:9000"}]}
\ No newline at end of file
+{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Plataformas, Microsservi\u00e7os e APIs","text":"Info Carga Hor\u00e1ria: 80
Semestre: 5\u00ba per\u00edodo
"},{"location":"#ementa","title":"Ementa","text":"Conceitos de Aplica\u00e7\u00f5es em Mon\u00f3l\u00edto e Microservi\u00e7os; Conteineriza\u00e7\u00e3o; Padr\u00f5es de Constru\u00e7\u00e3o de Softwares (Design Patterns); Monitoramento e Rastreabilidade de Aplica\u00e7\u00f5es; Seguran\u00e7a (autentica\u00e7\u00e3o e autoriza\u00e7\u00e3o); Mensageria; Dados Distribu\u00eddos; Orquestra\u00e7\u00e3o de computa\u00e7\u00e3o em nuvem, sistemas de gerenciamento, monitoramento e configura\u00e7\u00e3o de recursos virtualizados; Integra\u00e7\u00e3o entre Desenvolvimento e Opera\u00e7\u00e3o; Utiliza\u00e7\u00e3o de Plataformas em Nuvem para Produ\u00e7\u00e3o (Cloud Computing); Aspectos de automa\u00e7\u00e3o de gest\u00e3o de sistema em cloud \u2013 DevOps. Serverless computing \u2013 FaaS - function as a service; Utiliza\u00e7\u00e3o da Plataforma como Produto para Neg\u00f3cios: Infraestrutura como Servi\u00e7o (IaaS), Plataforma como Servi\u00e7o (PaaS) e Software como Servi\u00e7o (SaaS). Gest\u00e3o de n\u00edveis de servi\u00e7o (SLA - Service Level Agreement). Custos de projeto e de opera\u00e7\u00e3o de sistemas em cloud.
"},{"location":"#objetivos","title":"Objetivos","text":"Ao final da disciplina o aluno ser\u00e1 capaz de:
- Tomar decis\u00f5es a respeito da escolha de estrat\u00e9gias de arquiteturas para o emprego de problemas computacionais;
- Implementar e interconectar aplica\u00e7\u00f5es computacionais para a constru\u00e7\u00e3o de plataformas de alto desempenho: escalabilidade por meio do uso de t\u00e9cnicas de computa\u00e7\u00e3o em nuvem;
- Administrar um sistema de gerenciamento de nuvem, provisionando a infraestrutura necess\u00e1ria como um servi\u00e7o;
- Construir, com o aux\u00edlio de frameworks, solu\u00e7\u00f5es de plataformas completas e integradas de forma profissional;
- Arquitetar e implementar linhas de produ\u00e7\u00f5es de softwares robustos (CI/CD);
- Analisar, projetar e especificar uma solu\u00e7\u00e3o de computa\u00e7\u00e3o em nuvem mista baseada em hardware, software e redes para atender aos requisitos de determinado pacto de n\u00edvel de servi\u00e7o (SLA);
- Planejar e analisar o uso de plataformas empresariais como subs\u00eddio para cria\u00e7\u00e3o de novos neg\u00f3cios (PaaS).
"},{"location":"#conteudo-programatico","title":"Conte\u00fado Program\u00e1tico","text":" - Conceitos de Arquitetura e Microsservi\u00e7os;
- Microsservi\u00e7os com Interface API - RESTful;
- Introdu\u00e7\u00e3o a Cont\u00eaineres;
- Introdu\u00e7\u00e3o e Implementa\u00e7\u00e3o de Design Patterns;
- Apresenta\u00e7\u00e3o de Design Patterns mais Complexos: Seguran\u00e7a, Mensageria, Cache, etc;
- Fundamentos de Computa\u00e7\u00e3o em Nuvem.
- Orquestra\u00e7\u00e3o, Implementa\u00e7\u00e3o e Monitoramento de Ambientes Virtualizados e Distribu\u00eddos;
- Infraestrutura como um Servi\u00e7o.
- Redes Definidas por Software;
- Software como um Servi\u00e7o;
- Gest\u00e3o de N\u00edveis de Servi\u00e7o.
"},{"location":"#bibliografia-basica","title":"Bibliografia B\u00e1sica","text":"Livros:
-
ROMAN, Ed; AMBLER, Scott W.; JEWELL, Tyler. Dominando Enterprise Javabeans. Porto Alegre: Bookman, 2004. E-book. ISBN 9788577804061. Dispon\u00edvel em: https://integrada.minhabiblioteca.com.br/#/books/9788577804061. Acesso em: 30 de maio de 2023.
-
ALVES, William Pereira. Java para Web - Desenvolvimento de Aplica\u00e7\u00f5es. S\u00e3o Paulo: \u00c9rica, 2015. E-book. ISBN 9788536519357. Dispon\u00edvel em: https://integrada.minhabiblioteca.com.br/#/books/9788536519357. Acesso em: 30 de maio de 2023.
-
FREEMAN, Emily. DevOps Para Leigos. Rio de Janeiro: Editora Alta Books, 2021. E-book. ISBN 9788550816661. Dispon\u00edvel em: https://integrada.minhabiblioteca.com.br/#/books/9788550816661. Acesso em: 30 de maio de 2023.
"},{"location":"#bibliografia-complementar","title":"Bibliografia Complementar","text":"Livros:
-
XU, A., System Design Interview - An insider's guide, 1\u00aa ed., Independently Published, 2020.
-
MARTIN, R. C., Arquitetura Limpa: o guia do artes\u00e3o para estrutura e design de software, 1\u00aa ed., Alta Books, 2018.
-
PARKER, G. G.; VAN ALSTYNE, M. W.; CHOUDARY, S. P., Plataforma: a revolu\u00e7\u00e3o da estrat\u00e9gia, 1\u00aa ed., Alta Books, 2018.
-
SEHGAL, N. K.; BHATT, P. C. P.; ACKEN J. M., Cloud Computing with Security and Scalability.: Concepts and Practices, 3\u00aa ed., Springer, 2023.
-
KRIEF, M., Learning DevOps: A comprehensive guide to accelerating DevOps culture adoption with Terraform, Azure DevOps, Kubernetes, and Jenkins, 2\u00aa ed., Packt Publishing, 2022.
-
GAMMA, E.; HELM, R.; JOHNSON, R., VLISSIDES, J., Design Patterns: Elements of Reusable Object-Oriented Software, 1\u00aa ed., Addison-Wesley Professional, 1994.
-
SANTANA, E. F. Z., Back-end Java: Microsservi\u00e7os, Spring Boot e Kubernetes, Casa do C\u00f3digo, 2021. Material.
-
SANTANA, E. F. Z., Apache Kafka e Spring Boot: Comunica\u00e7\u00e3o ass\u00edncrona entre microsservi\u00e7os, Casa do C\u00f3digo, 2022. Material.
Artigos:
-
XU, A. et al.. ByteByteGo - System Design 101. Dispon\u00edvel em: https://github.com/ByteByteGoHq/system-design-101. Acesso em: 19 dezembro 2023.
-
Spring. Spring Cloud. Dispon\u00edvel em: https://spring.io/projects/spring-cloud. Acesso em: 19 dezembro 2023.
-
CHOI, K., Software Engineering Blogs. Dispon\u00edvel em: https://github.com/kilimchoi/engineering-blogs. Acesso em: 20 dezembro 2023.
-
Ghemawat, S. et al.. Towards Modern Development of Cloud Applications. Proceedings of the 19th Workshop on Hot Topics in Operating Systems, 2023 - p. 110-117. Association for Computing Machinery, Providence, RI, USA. Dispon\u00edvel em: doi:10.1145/3593856.3595909. Acesso em: 05 fevereiro de 2024.
"},{"location":"disclaimer/","title":"Disclaimer","text":""},{"location":"disclaimer/#contributors","title":"Contributors","text":"Name Humberto Sandmann Fabio Roberto de Miranda Raul Ikeda Maciel Calebe Vidal Eduardo Felipe Zambom Santana"},{"location":"disclaimer/#source","title":"Source","text":"Circa of 70% of the whole conceptual texts were generated by ChatGPT nonetheless all of them were revised by the editor. The sections of handout was produced by the contributors.
"},{"location":"api/documentation/","title":"Documentation","text":""},{"location":"api/documentation/#swagger","title":"Swagger","text":"mavengradle <dependency>\n</dependency>\n
\n
```javascript I'm A tab console.log('Code Tab A');
```javascript I'm tab B\nconsole.log('Code Tab B');\n
CC++ #include <stdio.h>\n\nint main(void) {\n printf(\"Hello world!\\n\");\n return 0;\n}\n
#include <iostream>\n\nint main(void) {\n std::cout << \"Hello world!\" << std::endl;\n return 0;\n}\n
https://www.baeldung.com/spring-rest-openapi-documentation
"},{"location":"api/spring-boot-cloud/","title":"Spring Boot Cloud","text":"Containering:
https://spring.io/projects/spring-cloud/
https://github.com/spring-cloud/spring-cloud-release/wiki/Supported-Versions
"},{"location":"appendix/ohmyzsh/","title":"Oh My Zsh","text":"Install:
sudo apt install zsh\nchsh -s $(which zsh)\nsh -c \"$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)\"\n
Plugins:
git clone https://github.com/zsh-users/zsh-syntax-highlighting ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting\ngit clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions\ngit clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf && ~/.fzf/install\n
Edit the file ~/.zshrc
at home's folder:
nano ~/.zshrc\n
~/.zshrcZSH_THEME=\"afowler\"\nplugins=(\n git\n zsh-syntax-highlighting\n zsh-autosuggestions\n fzf\n)\n
Reference:
- Oh My Zsh
"},{"location":"appendix/others/","title":"Others","text":" Clojure and Datomic Studies with Docker and Kafka by Pelichero, F.
Inside a Google data center
"},{"location":"appendix/rest-vs-graphql/","title":"REST vs GraphQL","text":"Source: System Design 101 - REST API vs. GraphQL -
XU, A., System Design 101.\u00a0\u21a9
-
REST.\u00a0\u21a9
-
GraphQL.\u00a0\u21a9
"},{"location":"appendix/rsa/","title":"RSA Algorithm","text":""},{"location":"appendix/rsa/#pkcs","title":"PKCS","text":""},{"location":"appendix/rsa/#the-rsa-encryption-algorithm","title":"The RSA Encryption Algorithm","text":" -
The RSA Encryption Algorithm (1 of 2: Generating the Keys)
-
The RSA Encryption Algorithm (2 of 2: Generating the Keys)
"},{"location":"appendix/tls-for-microservices/","title":"TLS for microservices","text":"The HTTPS-Only Standard
Let's Encrypt
"},{"location":"appendix/versioning-rest-apis/","title":"Versioning REST API","text":" -
Jacky, Versioning RESTful APIs with Spring Boot: A Step-by-Step Guide in 5 minutes.\u00a0\u21a9
"},{"location":"cloud/gitactions/","title":"GitActions","text":""},{"location":"cloud/gitactions/#github-actions","title":"GitHub Actions","text":"GitHub Actions is a feature of GitHub that allows you to automate, customize, and execute your software development workflows right in your repository.
With GitHub Actions, you can build, test, and deploy your code directly from GitHub. It provides world-class support for Continuous Integration/Continuous Deployment (CI/CD).
In addition, GitHub Actions allows you to automate other aspects of your development workflow such as assigning code reviews, managing branches, and triaging issues.
"},{"location":"cloud/terraform/","title":"Terraform","text":""},{"location":"cloud/terraform/#infrastructure-as-code-iac","title":"Infrastructure as Code (IaC)","text":"Infrastructure as Code (IaC) is a method of managing and provisioning computing infrastructure through machine-readable definition files, rather than physical hardware configuration or interactive configuration tools.
The IT infrastructure managed by this comprises both physical equipment, such as bare-metal servers, as well as virtual machines, and associated configuration resources. The definitions may be in a version control system. It can use either scripts or declarative definitions, rather than manual processes, but the term is more often used to promote declarative approaches.
"},{"location":"cloud/terraform/#pros","title":"Pros","text":" - Automatization of creation of an infrastructure;
- Standardization of platforms;
- Replication of infrastructure.
|- .github\n| |- workflows\n|- s3-bucket-static\n |- main.tf\n
main.tfprovider \"aws\" {\n region = \"us-east-1\"\n}\n\nvariable \"bucket_name\" {\n type = string\n}\n\nresource \"aws_s3_bucket\" \"static_site_bucket\" {\n bucket = \"static-site-${var.bucket_name}\"\n\n website {\n index_document = \"index.html\"\n error_document = \"404.html\n }\n\n tags = {\n Name = \"Static Site Bucket\"\n Environment = \"Production\"\n }\n}\n\nresource \"aws_s3_public_access_block\" \"static_site_bucket\" {\n bucket aws_s3_bucket.static_site_bucket.id\n\n block_public_acls = false\n block_public_policy = false\n ignore_public_acls = false\n restrict_public_buckets = false\n}\n
"},{"location":"cloud/terraform/#alternatives","title":"Alternatives","text":" - AWS CloudFormation
- Ansible
- Vagrant
"},{"location":"cloud/terraform/#additional-material","title":"Additional Material","text":" -
Criando Infra na AWS com Terraform (IaC) by Fernanda Kipper
"},{"location":"devops/concepts/","title":"Concepts","text":"DevOps is a set of practices that combines software development (Dev) and IT operations (Ops). It aims to shorten the system development life cycle and provide continuous delivery with high software quality. DevOps is complementary with Agile software development; several DevOps aspects came from Agile methodology.
Key concepts of DevOps include:
- Continuous Integration (CI): Developers regularly merge their code changes into a central repository, after which automated builds and tests are run.
- Continuous Delivery (CD): The combined practices of continuous integration and automated testing allow for the continuous delivery of code changes to a staging or production system.
- Infrastructure as Code (IaC): Infrastructure is defined and managed using code and software development techniques, such as version control and continuous integration.
- Monitoring and Logging: Keeping track of how applications and systems are performing in real-time to understand ongoing IT infrastructure status.
- Communication and Collaboration: Increased communication and collaboration in an organization is one of the key cultural aspects of DevOps. The use of DevOps tooling and automation of the software delivery process tends to increase collaboration between the teams.
Source: Wikipedia - Devops"},{"location":"devops/concepts/#cicd","title":"CI/CD","text":""},{"location":"devops/concepts/#pipeline","title":"Pipeline","text":""},{"location":"devops/concepts/#service-level-agreement-sla","title":"Service-level agreement - SLA","text":"Service-level agreement, well-known as SLA, is
"},{"location":"devops/concepts/#other-approaches","title":"Other Approaches","text":""},{"location":"devops/concepts/#noops","title":"NoOps","text":"NoOps, short for \"No Operations\", is a concept in software development where the software is designed in such a way that it requires minimal or even no IT operations support. This is often achieved through the use of fully automated processes and systems, which eliminate the need for manual intervention in tasks such as deployment, scaling, and systems management.
The goal of NoOps is to allow the software developers to focus on writing new features for the application, rather than spending time on operational concerns. This is often achieved through the use of Platform as a Service (PaaS) providers, which handle many of the operational tasks automatically.
https://www.jenkins.io/doc/tutorials/build-a-java-app-with-maven/
Jenkins
Install plugins: - Blue Ocean - Docker - Docker Pipeline - Kubernetes Cli
https://www.jenkins.io/doc/tutorials/build-a-java-app-with-maven/
https://www.jenkins.io/blog/2017/02/07/declarative-maven-project/
-
Wiki Service-level Agreement \u21a9
"},{"location":"devops/docker/","title":"Docker","text":"How to avoid the classical..
?
The answer is: CONTAINERIZATION.
Docker is a platform and tool that enables developers to automate the deployment of applications inside lightweight, portable containers. Containers are a form of virtualization that packages an application and its dependencies together, ensuring consistency across different environments, from development to testing and production.
Here are some key concepts and components of Docker:
- Containerization: Containers are lightweight, standalone, and executable packages that include everything needed to run a piece of software, including the code, runtime, libraries, and system tools. Containers isolate applications from their environment, making them portable and consistent across various systems.
- Docker Engine: This is the core component of Docker. It is a lightweight and portable runtime that can run containers on various operating systems, including Linux and Windows. The Docker Engine consists of a server, a REST API, and a command-line interface.
- Docker Image: An image is a lightweight, standalone, and executable package that includes everything needed to run a piece of software, including the code, a runtime, libraries, environment variables, and config files. Images are used to create containers.
- Dockerfile: A Dockerfile is a text file that contains instructions for building a Docker image. It specifies the base image, sets up the environment, installs dependencies, and configures the application.
- Registry: Docker images can be stored in registries, which are repositories for sharing and distributing container images. Docker Hub is a popular public registry, and organizations often use private registries to store and manage their proprietary images.
- Container Orchestration: Docker can be used in conjunction with container orchestration tools like Kubernetes or Docker Swarm to manage the deployment, scaling, and orchestration of containerized applications in production environments.
- Portability: One of Docker's key advantages is its portability. Since containers encapsulate everything an application needs to run, they can run consistently across different environments, reducing the \"it works on my machine\" problem often encountered in software development.
Docker has become a widely adopted technology in the software development and deployment space due to its ease of use, portability, and the efficiency it brings to the development and deployment lifecycle. It has revolutionized the way applications are packaged, shipped, and deployed, making it easier for developers to build, test, and deploy applications in a more reliable and consistent manner.
"},{"location":"devops/docker/#differences-between-docker-and-virtual-machines","title":"Differences between Docker and Virtual Machines","text":"Docker containers and virtual machines (VMs) are both technologies used for virtualization, but they operate at different levels and have distinct characteristics. Here are the key differences between Docker containers and virtual machines:
Aspect Docker Containers Virtual Machines Architecture Containers share the host operating system's kernel and isolate the application processes from each other. Each container runs in its own user space but uses the host's kernel. VMs, on the other hand, run a complete operating system, including its own kernel, on top of a hypervisor. Each VM is essentially a full-fledged virtualized computer with its own resources. Resource Efficiency Containers are more lightweight and share the host OS kernel, which makes them more resource-efficient compared to VMs. Containers can start up quickly and consume fewer system resources. VMs have more overhead because each VM requires a full operating system and has its own kernel. This makes VMs less resource-efficient than containers. Isolation Containers provide process-level isolation, meaning that each container runs in its own process space, but they share the same OS kernel. This isolation is generally sufficient for most applications. VMs provide stronger isolation since each VM runs its own operating system and has its own kernel. This makes VMs a better choice in situations where strong isolation is a critical requirement. Portability Containers are highly portable because they encapsulate the application and its dependencies, ensuring consistency across different environments. VMs are less portable due to the larger size and complexity associated with bundling a full operating system with the application. Startup Time Containers can start up very quickly, typically in seconds, making them well-suited for microservices architectures and dynamic scaling. VMs generally have longer startup times, often measured in minutes, due to the time required to boot a full operating system. Resource Utilization Containers share the host OS resources, which can lead to higher density and more efficient resource utilization. VMs have a higher resource overhead because each VM requires its own set of resources, including memory, disk space, and CPU. Use Cases Containers are well-suited for microservices architectures, continuous integration/continuous deployment (CI/CD) pipelines, and scenarios where rapid deployment and scalability are crucial. VMs are suitable for scenarios that require strong isolation, compatibility with various operating systems, and where applications rely on specific OS configurations.
Source: Docker vs. Virtual Machines: Differences You Should Know In summary, Docker containers and virtual machines have different levels of abstraction and are suitable for different use cases. Containers are lightweight, portable, and efficient, making them popular for modern application development and deployment practices. Virtual machines provide stronger isolation and are more suitable for scenarios where running multiple instances of different operating systems is necessary. The choice between Docker containers and virtual machines depends on the specific requirements of the application and the environment in which it will be deployed. To install Docker Engine, see Install Docker Engine.
"},{"location":"devops/docker/#creating-a-simple-docker","title":"Creating a Simple Docker","text":"Command Description docker run <image>
Runs a Docker container from an image. docker ps
Lists running Docker containers. docker ps -a
Lists all Docker containers, both running and stopped. docker stop <container>
Stops a running Docker container. docker rm <container>
Removes a Docker container. docker images
Lists Docker images. docker rmi <image>
Removes a Docker image. docker pull <image>
Pulls a Docker image from a Docker registry. docker build -t <tag> .
Builds a Docker image from a Dockerfile in the current directory. docker exec -it <container> <command>
Executes a command in a running Docker container. docker logs <container>
Fetches the logs of a Docker container. Hello Markdown!
pip install termynalInstalled FROM openjdk:17-alpine\nVOLUME /tmp\nARG JAR_FILE=target/gateway-0.0.1-SNAPSHOT.jar\nCOPY ${JAR_FILE} app.jar\nENTRYPOINT [\"java\",\"-jar\",\"/app.jar\"]\n
https://docs.docker.com/engine/install/
https://www.docker.com/blog/how-to-use-your-own-registry-2/
-
Docker vs. Virtual Machines: Differences You Should Know \u21a9
"},{"location":"devops/kubernetes/","title":"Kubernetes","text":""},{"location":"devops/kubernetes/#kubernetes","title":"Kubernetes","text":"Kubernetes, also known as K8s, is an open-source platform designed to automate deploying, scaling, and operating application containers. It was originally designed by Google and is now maintained by the Cloud Native Computing Foundation.
Key features of Kubernetes include:
- Service discovery and load balancing: Kubernetes can expose a container using the DNS name or their own IP address. If traffic to a container is high, Kubernetes is able to load balance and distribute the network traffic to help the deployment stable.
- Storage orchestration: Kubernetes allows you to automatically mount a storage system of your choice, such as local storages, public cloud providers, and more.
- Automated rollouts and rollbacks: You can describe the desired state for your deployed containers using Kubernetes, and it can change the actual state to the desired state at a controlled rate. For example, you can automate Kubernetes to create new containers for your deployment, remove existing containers and adopt all their resources to the new container.
- Automatic bin packing: You provide Kubernetes with a cluster of nodes that it can use to run containerized tasks. You tell Kubernetes how much CPU and memory (RAM) each container needs. Kubernetes can fit containers onto your nodes to make the best use of your resources.
- Self-healing: Kubernetes restarts containers that fail, replaces and reschedules containers when nodes die, kills containers that don\u2019t respond to your user-defined health check, and doesn\u2019t advertise them to clients until they are ready to serve.
- Secret and configuration management: Kubernetes lets you store and manage sensitive information, such as passwords, OAuth tokens, and SSH keys. You can deploy and update secrets and application configuration without rebuilding your container images, and without exposing secrets in your stack configuration.
MiniKube
https://cloud.google.com/learn/what-is-kubernetes?hl=pt-br#section-4
https://serverlessland.com/
"},{"location":"devops/packaging/","title":"Packaging","text":""},{"location":"devops/packaging/#maven","title":"Maven","text":"Maven uses an XML file to describe the software project being built, its dependencies on other external modules and components, the build order, directories, and required plugins. It comes with pre-defined targets for performing certain well-defined tasks such as compilation of code and its packaging.
Key Features: - Simple project setup that follows best practices. - Dependency management including automatic updating, dependency closures (also known as transitive dependencies) - Able to easily work with multiple projects at the same time. - Large and mature community with a large ecosystem of plugins and integrations.
mvn clean package\n
mvn clean install\n
mvn clean package spring-boot:run\n
mvn versions:display-dependency-updates\n
mvn dependency:analyze\n
more about Maven dependency plugin
"},{"location":"devops/packaging/#gradle","title":"Gradle","text":"Gradle is another build automation tool that builds upon the concepts of Apache Ant and Apache Maven and introduces a Groovy-based domain-specific language (DSL) instead of the XML form used by Apache Maven for declaring the project configuration. Gradle provides a platform to support the entire development lifecycle of a software project.
Key Features: - Declarative builds and build-by-convention. - Language for dependency-based programming. - Structure your build. - Deep API. - Multi-project builds. - Many ways to manage dependencies. - Integration with existing structures. - Ease of migration.
"},{"location":"devops/release/","title":"Release","text":""},{"location":"devops/release/#infrastructure-as-code-and-automation-iac","title":"Infrastructure as Code and Automation (IaC)","text":"Infrastructure as Code (IaC) is a method of managing and provisioning computing infrastructure through machine-readable definition files, rather than physical hardware configuration or interactive configuration tools.
In other words, IaC is the process of managing and provisioning computer data centers through machine-readable definition files, rather than physical hardware configuration or interactive configuration tools.
The IT infrastructure managed by this comprises both physical equipment such as bare-metal servers as well as virtual machines and associated configuration resources. The definitions may be in a version control system. It can use either scripts or declarative definitions, rather than manual processes, but the term is more often used to promote declarative approaches.
IaC approaches are promoted for cloud computing, which is sometimes marketed as Infrastructure as a Service (IaaS). IaC supports IaaS, but should not be confused with it.
Jenkins DSL (Domain Specific Language)
"},{"location":"devops/release/#jenkins","title":"Jenkins","text":"Installing Jenkins
SDLC - Software Development LifeCycle
"},{"location":"devops/release/#service-level-agreement-sla","title":"Service-level agreement - SLA","text":"Service-level agreement, well-known as SLA, is
References:
-[Wiki Service-level Agreement](https://en.wikipedia.org/wiki/Service-level_agreement)\n
"},{"location":"devops/version-control-system/","title":"Version Control","text":""},{"location":"devops/version-control-system/#git","title":"Git","text":"Git is a distributed version control system designed to handle everything from small to very large projects with speed and efficiency. It was created by Linus Torvalds in 2005 for development of the Linux kernel.
Key features of Git include:
- Distributed Version Control: This means that every user has a complete copy of the project repository on their local machine. This allows for operations to be performed offline and provides a backup in case the central repository fails.
- Branching and Merging: Git's branching model allows developers to work on different features or bugs in isolation, without affecting the main codebase. These branches can then be merged back into the main codebase when the work is complete.
- Speed: Git is designed to be fast and efficient, even for large projects.
- Data Integrity: Git uses a data model that ensures the cryptographic integrity of every bit of your project. Every file and commit is checksummed and retrieved by its checksum when checked back out.
- Staging Area: Git provides a staging area or \"index\" that allows you to format and review your commits before completing the commit.
SCM - Supply-chain-management software
https://twitter.com/milan_milanovic/status/1745435542127349899
"},{"location":"handout/architecture/","title":"Architecture","text":""},{"location":"handout/architecture/#clean-architecture","title":"Clean Architecture","text":"Total desacoplamento das regras de neg\u00f3cios das camadas de interface:
Source: The Clean Code Blog Em nossa arquitetura:
flowchart LR\n subgraph Controller\n direction TB\n Interface:::adapter\n RecordIn:::adapter\n RecordOut:::adapter\n end\n subgraph Case\n direction TB\n Service:::case\n DTO:::case\n end\n subgraph Entity\n direction TB\n Repository:::entity\n Table:::entity\n end\n\n Interface --> RecordIn\n Interface --> RecordOut\n\n Controller <--> parser[\"Parser\"] <--> Case\n\n Service --> DTO\n\n Case <--> mapper[\"Mapper\"] <--> Entity\n\n Repository --> Table\n\n classDef adapter fill:#6f6\n classDef case fill:#f99\n classDef entity fill:#ff9\n
"},{"location":"handout/architecture/#referencias","title":"Refer\u00eancias:","text":" -
Criando um projeto Spring Boot com Arquitetura Limpa by Giuliana Silva Bezerra
\u21a9
-
Clean Architecture: A Craftsman's Guide to Software Structure and Design \u21a9
-
Como se faz DevOps: Organizando pessoas, dos silos aos times de plataforma \u21a9
"},{"location":"handout/business/","title":"Business","text":""},{"location":"handout/business/#compromissos-e-contratos","title":"Compromissos e Contratos","text":"SLI significa Service Level Indicator, ou Indicador de N\u00edvel de Servi\u00e7o. S\u00e3o m\u00e9tricas quantitativas que medem a qualidade de um servi\u00e7o. Por exemplo, se o SLA especificar que os sistemas v\u00e3o estar dispon\u00edveis 99,95% do tempo, o SLI \u00e9 a medi\u00e7\u00e3o real da disponibilidade.
SLO significa Service Level Objective, ou Objetivo de N\u00edvel de Servi\u00e7o. S\u00e3o metas espec\u00edficas de desempenho que uma equipe de SRE define para cumprir os requisitos do SLA.
SLA significa Service Level Agreement, ou Acordo de N\u00edvel de Servi\u00e7o. \u00c9 um acordo entre a empresa e o cliente acerca do servi\u00e7o contratado. Por exemplo, se assinamos com o cliente que vamos manter ativo o seu ecommerce durante pelo menos 99,99% do tempo do m\u00eas, isso quer dizer que o m\u00e1ximo de tempo que a p\u00e1gina pode estar inacess\u00edvel durante o m\u00eas ser\u00e1 4 minutos e 19 segundos.
\nflowchart LR\nsubgraph \"SLI\"\n a(\"M\u00e9tricas\")\nend\nsubgraph \"SLO\"\n b(\"Objetivos\")\nend\nsubgraph \"SLA\"\n c(\"Promessas\")\nend\n\n\nSLI --> SLO --> SLA --> SLI
"},{"location":"handout/business/#cicd-continuous-integration-and-continuous-delivery","title":"CI/CD - Continuous Integration and Continuous Delivery","text":"CI/CD \u00e9 uma abordagem pr\u00e1tica e \u00e1gil para o desenvolvimento de software que combina duas pr\u00e1ticas: Integra\u00e7\u00e3o Cont\u00ednua (CI) e Entrega Cont\u00ednua/Implanta\u00e7\u00e3o Cont\u00ednua (CD). Esses processos automatizam a constru\u00e7\u00e3o, teste e implanta\u00e7\u00e3o de aplica\u00e7\u00f5es, facilitando um ciclo de desenvolvimento mais r\u00e1pido e confi\u00e1vel.
"},{"location":"handout/business/#conceito-de-cicd","title":"Conceito de CI/CD","text":" - Integra\u00e7\u00e3o Cont\u00ednua (CI):
- Objetivo: Automatizar a integra\u00e7\u00e3o de c\u00f3digo de m\u00faltiplos desenvolvedores em um reposit\u00f3rio central v\u00e1rias vezes ao dia.
- Processo: Sempre que um desenvolvedor faz commit de c\u00f3digo em um reposit\u00f3rio, um servidor de CI automaticamente verifica e testa o novo c\u00f3digo para detectar problemas rapidamente.
-
Ferramentas Comuns: Jenkins, Travis CI, CircleCI, GitLab CI/CD.
-
Entrega Cont\u00ednua (CD - Continuous Delivery):
- Objetivo: Automatizar a entrega de c\u00f3digo para um ambiente de produ\u00e7\u00e3o de maneira segura e r\u00e1pida.
- Processo: Ap\u00f3s a fase de integra\u00e7\u00e3o cont\u00ednua, o c\u00f3digo \u00e9 preparado para a produ\u00e7\u00e3o atrav\u00e9s de uma s\u00e9rie de testes automatizados. O c\u00f3digo est\u00e1 sempre pronto para ser implantado com um simples clique ou comando.
-
Ferramentas Comuns: Jenkins, GitLab CI/CD, Bamboo.
-
Implanta\u00e7\u00e3o Cont\u00ednua (CD - Continuous Deployment):
- Objetivo: Automatizar a implanta\u00e7\u00e3o de c\u00f3digo diretamente em produ\u00e7\u00e3o sem interven\u00e7\u00e3o manual.
- Processo: Ap\u00f3s passar por todos os testes, o c\u00f3digo \u00e9 automaticamente implantado em produ\u00e7\u00e3o. Isso requer um alto n\u00edvel de confian\u00e7a nos testes automatizados.
- Ferramentas Comuns: Jenkins, GitLab CI/CD, Spinnaker.
"},{"location":"handout/business/#vantagens-do-cicd","title":"Vantagens do CI/CD","text":" - Detec\u00e7\u00e3o Precoce de Problemas: Integra\u00e7\u00e3o cont\u00ednua ajuda a detectar e corrigir problemas rapidamente.
- Entrega R\u00e1pida: Automatiza\u00e7\u00e3o da entrega permite que novas funcionalidades e corre\u00e7\u00f5es cheguem aos usu\u00e1rios mais rapidamente.
- Qualidade e Confiabilidade: Testes automatizados garantem que o c\u00f3digo est\u00e1 funcionando conforme esperado antes de ser implantado.
- Feedback R\u00e1pido: Desenvolvedores recebem feedback r\u00e1pido sobre o estado do c\u00f3digo, facilitando um desenvolvimento mais \u00e1gil e iterativo.
- Automa\u00e7\u00e3o: Reduz o trabalho manual, minimizando erros humanos e aumentando a efici\u00eancia.
"},{"location":"handout/business/#conclusao","title":"Conclus\u00e3o","text":"CI/CD \u00e9 uma pr\u00e1tica essencial no desenvolvimento moderno de software, promovendo automa\u00e7\u00e3o, rapidez e confiabilidade nos processos de integra\u00e7\u00e3o, teste e implanta\u00e7\u00e3o de aplica\u00e7\u00f5es. Utilizando ferramentas como Jenkins, GitLab CI/CD e outras, equipes de desenvolvimento podem entregar software de alta qualidade de forma cont\u00ednua e eficiente.
Source: Wikipedia - Devops"},{"location":"handout/business/#iac-infrastructure-as-code","title":"IaC - Infrastructure as Code","text":"IaC, ou \"Infrastructure as Code\" (Infraestrutura como C\u00f3digo), \u00e9 uma abordagem para gerenciar e provisionar a infraestrutura de TI atrav\u00e9s de arquivos de configura\u00e7\u00e3o leg\u00edveis por humanos, em vez de processos manuais. Esta pr\u00e1tica permite automatizar a configura\u00e7\u00e3o de infraestrutura, tornando-a mais eficiente, replic\u00e1vel e gerenci\u00e1vel.
"},{"location":"handout/business/#conceito-de-iac","title":"Conceito de IaC","text":"Em vez de configurar manualmente servidores, redes, e outros componentes de infraestrutura, voc\u00ea escreve c\u00f3digo para definir e gerenciar essas configura\u00e7\u00f5es. Esse c\u00f3digo pode ser armazenado em sistemas de controle de vers\u00e3o, revisado, testado e aplicado de maneira consistente.
"},{"location":"handout/business/#ferramentas-comuns-de-iac","title":"Ferramentas Comuns de IaC","text":" - Terraform: Uma ferramenta de c\u00f3digo aberto que permite definir a infraestrutura em um arquivo de configura\u00e7\u00e3o usando o HashiCorp Configuration Language (HCL) ou JSON.
- AWS CloudFormation: Um servi\u00e7o da Amazon Web Services que permite modelar e configurar recursos da AWS.
- Ansible: Uma ferramenta que pode automatizar o provisionamento de infraestrutura, al\u00e9m de gerenciamento de configura\u00e7\u00e3o e implanta\u00e7\u00e3o de aplica\u00e7\u00f5es.
"},{"location":"handout/business/#vantagens-do-iac","title":"Vantagens do IaC","text":" - Consist\u00eancia: A infraestrutura \u00e9 provisionada de forma consistente cada vez que o c\u00f3digo \u00e9 executado.
- Reprodutibilidade: F\u00e1cil de replicar ambientes, como desenvolvimento, teste e produ\u00e7\u00e3o.
- Controle de Vers\u00e3o: As configura\u00e7\u00f5es de infraestrutura podem ser versionadas e auditadas, assim como o c\u00f3digo de aplica\u00e7\u00e3o.
- Automa\u00e7\u00e3o: Reduz o erro humano e aumenta a velocidade ao automatizar tarefas repetitivas.
- Documenta\u00e7\u00e3o: O pr\u00f3prio c\u00f3digo serve como documenta\u00e7\u00e3o da infraestrutura.
"},{"location":"handout/business/#conclusao_1","title":"Conclus\u00e3o","text":"IaC transforma a gest\u00e3o de infraestrutura, permitindo uma abordagem mais \u00e1gil, escal\u00e1vel e segura. Usando ferramentas como Terraform, CloudFormation ou Ansible, equipes podem definir, gerenciar e versionar a infraestrutura de maneira eficiente e confi\u00e1vel.
"},{"location":"handout/business/#iaas-infrastructure-as-a-service","title":"IaaS - Infrastructure as a Service","text":"IaaS, ou \"Infrastructure as a Service\" (Infraestrutura como Servi\u00e7o), \u00e9 um modelo de servi\u00e7o de computa\u00e7\u00e3o em nuvem que oferece recursos computacionais fundamentais como servidores virtuais, armazenamento, e redes, sob demanda, na internet. Esses recursos s\u00e3o escal\u00e1veis e gerenciados por um provedor de servi\u00e7os, permitindo que as empresas evitem o custo e a complexidade de comprar e gerenciar a pr\u00f3pria infraestrutura f\u00edsica.
"},{"location":"handout/business/#conceito-de-iaas","title":"Conceito de IaaS","text":"Com IaaS, os usu\u00e1rios podem alugar recursos de computa\u00e7\u00e3o, como m\u00e1quinas virtuais, armazenamento, e redes, e pagar somente pelo que utilizam. Esse modelo oferece flexibilidade, escalabilidade e efici\u00eancia, permitindo que as empresas foquem em suas aplica\u00e7\u00f5es e servi\u00e7os em vez de gerenciar a infraestrutura subjacente.
"},{"location":"handout/business/#provedores-comuns-de-iaas","title":"Provedores Comuns de IaaS","text":" - Amazon Web Services (AWS): Oferece servi\u00e7os como EC2 (Elastic Compute Cloud), S3 (Simple Storage Service), e VPC (Virtual Private Cloud).
- Microsoft Azure: Oferece servi\u00e7os como Azure Virtual Machines, Azure Blob Storage, e Virtual Networks.
- Google Cloud Platform (GCP): Oferece servi\u00e7os como Compute Engine, Cloud Storage, e Virtual Private Cloud.
"},{"location":"handout/business/#vantagens-do-iaas","title":"Vantagens do IaaS","text":" - Escalabilidade: Capacidade de aumentar ou diminuir recursos rapidamente conforme a demanda.
- Custo-Efetivo: Pague apenas pelos recursos que utiliza, sem necessidade de grandes investimentos iniciais em hardware.
- Flexibilidade: Escolha e configure recursos conforme suas necessidades espec\u00edficas.
- Redu\u00e7\u00e3o de Tempo: Rapidamente provisiona e deprovisiona recursos, acelerando a implementa\u00e7\u00e3o de novos projetos.
- Gerenciamento: O provedor de IaaS gerencia a infraestrutura f\u00edsica, enquanto voc\u00ea gerencia apenas os recursos alocados.
"},{"location":"handout/business/#conclusao_2","title":"Conclus\u00e3o","text":"IaaS oferece uma solu\u00e7\u00e3o poderosa e flex\u00edvel para organiza\u00e7\u00f5es que precisam de infraestrutura computacional robusta sem o \u00f4nus de gerenciar hardware f\u00edsico. Provedores como AWS, Azure, e GCP facilitam o provisionamento e gerenciamento de servidores, armazenamento e redes, permitindo que as empresas se concentrem no desenvolvimento e opera\u00e7\u00e3o de suas aplica\u00e7\u00f5es e servi\u00e7os.
"},{"location":"handout/business/#paas-platform-as-a-service","title":"PaaS - Platform as a Service","text":"PaaS, ou \"Platform as a Service\" (Plataforma como Servi\u00e7o), \u00e9 um modelo de servi\u00e7o de computa\u00e7\u00e3o em nuvem que fornece uma plataforma permitindo que os clientes desenvolvam, executem e gerenciem aplica\u00e7\u00f5es sem a complexidade de construir e manter a infraestrutura normalmente associada ao desenvolvimento e ao lan\u00e7amento de uma aplica\u00e7\u00e3o.
"},{"location":"handout/business/#exemplo-de-paas","title":"Exemplo de PaaS","text":"Imagine que voc\u00ea \u00e9 um desenvolvedor de software e deseja criar um aplicativo web.
Sem PaaS
- Configura\u00e7\u00e3o do Servidor: Voc\u00ea precisaria comprar servidores f\u00edsicos ou m\u00e1quinas virtuais para hospedar sua aplica\u00e7\u00e3o.
- Instala\u00e7\u00e3o do Sistema Operacional: Configurar o sistema operacional nos servidores.
- Configura\u00e7\u00e3o de Redes e Seguran\u00e7a: Configurar redes, firewalls, e garantir a seguran\u00e7a da aplica\u00e7\u00e3o.
- Banco de Dados: Instalar e gerenciar o banco de dados.
- Manuten\u00e7\u00e3o: Monitorar e manter o sistema, aplicando patches de seguran\u00e7a e atualiza\u00e7\u00f5es.
Com PaaS
- Escolha da Plataforma: Voc\u00ea escolhe uma plataforma PaaS, como Google App Engine, Microsoft Azure, ou Heroku.
- Desenvolvimento da Aplica\u00e7\u00e3o: Foca apenas no desenvolvimento do c\u00f3digo da aplica\u00e7\u00e3o.
- Desdobramento: Sobe (deploy) o c\u00f3digo para a plataforma PaaS.
- Gest\u00e3o e Escalabilidade: A plataforma cuida automaticamente da hospedagem, seguran\u00e7a, balanceamento de carga, escalabilidade, e manuten\u00e7\u00e3o.
"},{"location":"handout/business/#vantagens-do-paas","title":"Vantagens do PaaS","text":" - Redu\u00e7\u00e3o de Complexidade: Voc\u00ea n\u00e3o precisa se preocupar com a infraestrutura subjacente.
- Escalabilidade: F\u00e1cil de escalar sua aplica\u00e7\u00e3o conforme a demanda.
- Foco no Desenvolvimento: Permite focar mais no desenvolvimento da aplica\u00e7\u00e3o e menos na gest\u00e3o de servidores.
- Custo-Efetivo: Geralmente paga-se apenas pelos recursos usados, evitando grandes investimentos iniciais em hardware.
"},{"location":"handout/business/#conclusao_3","title":"Conclus\u00e3o","text":"Em resumo, PaaS permite que desenvolvedores se concentrem em criar e melhorar suas aplica\u00e7\u00f5es sem se preocupar com a infraestrutura necess\u00e1ria para suport\u00e1-las.
"},{"location":"handout/business/#paap-platform-as-a-product","title":"PaaP - Platform as a Product","text":"\"PaaP\" significa \"Plataforma como Produto\", um conceito que v\u00ea uma plataforma n\u00e3o apenas como um conjunto de ferramentas ou servi\u00e7os, mas como um produto completo e coeso que fornece uma solu\u00e7\u00e3o abrangente para seus usu\u00e1rios. \u00c9 diferente de Plataforma como Servi\u00e7o (PaaS), que geralmente foca em fornecer a infraestrutura e o ambiente para desenvolver, executar e gerenciar aplica\u00e7\u00f5es. PaaP enfatiza a experi\u00eancia do usu\u00e1rio, a integra\u00e7\u00e3o e o valor entregue ao usu\u00e1rio como um produto unificado.
"},{"location":"handout/business/#conceitos-chave-do-paap","title":"Conceitos-Chave do PaaP","text":" -
Solu\u00e7\u00e3o de Ponta a Ponta: PaaP fornece uma solu\u00e7\u00e3o completa que cobre todos os aspectos das necessidades do usu\u00e1rio, desde o desenvolvimento e implanta\u00e7\u00e3o at\u00e9 o gerenciamento e escalabilidade. Ele integra v\u00e1rias ferramentas e servi\u00e7os em uma experi\u00eancia cont\u00ednua.
-
Design Centrado no Usu\u00e1rio: A plataforma \u00e9 projetada com foco na experi\u00eancia do usu\u00e1rio. Prioriza a facilidade de uso, interfaces intuitivas e fluxos de trabalho simplificados para garantir que os usu\u00e1rios possam atingir seus objetivos de forma eficiente.
-
Integra\u00e7\u00e3o e Interoperabilidade: Plataformas PaaP frequentemente integram m\u00faltiplos servi\u00e7os e ferramentas, garantindo que eles funcionem juntos de forma harmoniosa. Essa integra\u00e7\u00e3o reduz a complexidade para os usu\u00e1rios, que n\u00e3o precisam gerenciar sistemas diferentes.
-
Entrega de Valor: A plataforma \u00e9 empacotada e comercializada como um produto que entrega proposi\u00e7\u00f5es de valor espec\u00edficas aos seus usu\u00e1rios. \u00c9 projetada para resolver problemas espec\u00edficos ou atender necessidades espec\u00edficas de maneira abrangente.
-
Melhoria Cont\u00ednua: Produtos PaaP s\u00e3o continuamente melhorados com base no feedback dos usu\u00e1rios e nas demandas do mercado. Atualiza\u00e7\u00f5es e aprimoramentos regulares garantem que a plataforma permane\u00e7a relevante e eficaz.
"},{"location":"handout/business/#exemplo-salesforce","title":"Exemplo: Salesforce","text":"O Salesforce \u00e9 um exemplo not\u00e1vel de Plataforma como Produto. Ele oferece uma su\u00edte abrangente de ferramentas para gerenciamento de relacionamento com clientes (CRM), mas vai al\u00e9m de apenas fornecer infraestrutura.
-
Solu\u00e7\u00e3o de CRM de Ponta a Ponta: O Salesforce fornece ferramentas para vendas, atendimento ao cliente, automa\u00e7\u00e3o de marketing, an\u00e1lises e mais, tudo integrado em uma \u00fanica plataforma.
-
Design Centrado no Usu\u00e1rio: O Salesforce \u00e9 projetado para ser f\u00e1cil de usar, com pain\u00e9is personaliz\u00e1veis, interfaces intuitivas e amplos recursos de suporte.
-
Integra\u00e7\u00e3o e Interoperabilidade: Ele integra com uma ampla gama de aplica\u00e7\u00f5es e servi\u00e7os de terceiros, permitindo que os usu\u00e1rios conectem seu CRM com outras ferramentas que utilizam em seus neg\u00f3cios.
-
Entrega de Valor: O Salesforce \u00e9 comercializado como um produto que ajuda as empresas a gerenciar seus relacionamentos com clientes de forma mais eficaz, melhorar as vendas e aprimorar o atendimento ao cliente.
-
Melhoria Cont\u00ednua: O Salesforce lan\u00e7a regularmente atualiza\u00e7\u00f5es e novos recursos com base no feedback dos usu\u00e1rios e nos avan\u00e7os tecnol\u00f3gicos, garantindo que a plataforma evolua com as necessidades dos usu\u00e1rios.
"},{"location":"handout/business/#beneficios-do-paap","title":"Benef\u00edcios do PaaP","text":" -
Experi\u00eancia do Usu\u00e1rio Simplificada: os usu\u00e1rios interagem com uma \u00fanica plataforma unificada, simplificando seu fluxo de trabalho e reduzindo a necessidade de gerenciar m\u00faltiplas ferramentas.
-
Aumento da Produtividade: ferramentas e servi\u00e7os integrados simplificam os processos, levando a uma maior efici\u00eancia e produtividade.
-
Escalabilidade: solu\u00e7\u00f5es PaaP s\u00e3o projetadas para escalar com as necessidades do usu\u00e1rio, facilitando o crescimento sem a necessidade de trocar de plataformas ou ferramentas.
-
Maior Valor: ao fornecer uma solu\u00e7\u00e3o abrangente, PaaP entrega maior valor aos usu\u00e1rios, atendendo suas necessidades de forma mais eficaz do que ferramentas dispersas.
-
Adapta\u00e7\u00e3o Cont\u00ednua: atualiza\u00e7\u00f5es e melhorias regulares garantem que a plataforma permane\u00e7a relevante e \u00fatil \u00e0 medida que as necessidades dos usu\u00e1rios evoluem.
"},{"location":"handout/business/#conclusao_4","title":"Conclus\u00e3o","text":"Plataforma como Produto (PaaP) representa uma abordagem hol\u00edstica para a entrega de solu\u00e7\u00f5es tecnol\u00f3gicas, focando em fornecer produtos completos, integrados e centrados no usu\u00e1rio. Ao combinar as for\u00e7as de v\u00e1rias ferramentas e servi\u00e7os em uma plataforma coesa, PaaP oferece maior valor, simplicidade e efici\u00eancia aos seus usu\u00e1rios. Salesforce \u00e9 um exemplo not\u00e1vel, mas os princ\u00edpios de PaaP podem ser aplicados em diversas ind\u00fastrias e solu\u00e7\u00f5es tecnol\u00f3gicas para criar plataformas mais eficazes e amig\u00e1veis.
-
Platform Revolution: How Networked Markets Are Transforming the Economy and How to Make Them Work for You \u21a9
"},{"location":"handout/cloud/aws/cli/","title":"cli","text":""},{"location":"handout/cloud/aws/cli/#setting-up-the-aws-cli","title":"Setting up the AWS Cli","text":"aws configure\n
aws configureAWS Access Key ID: ****************5DMGAWS Secret Access Key]: *********************************fhwQtDefault region name [None]: Default output format [None]: aws sts get-session-token\n
aws sts get-session-token{ \"Credentials\": { \"AccessKeyId\": \"ASIA4MTWJ5HP4RFKVFX2\", \"SecretAccessKey\": \"RWfqFn9NZRZYEy1a5sFpdUPSd5i03YRer/9+PZ6V\", \"SessionToken\": \"FwoGZXIvYXdzEJX//////////wEaDIRJrTOKnJTZ/ ZpZGiKCAYnnc+16sxQl/eGYvj998q9u2eFb3VziCgpvNzKAuI/YcthL2XLp2VUXZswaOb5C3BikDENEKVbeH4va32ltJ/1Bm+F/ qkHNE9dTRMOxshV9iwkCe3/4+Sl9O6dZJguglcCq2Yfh+9HDzJxo6WtAd7UiCL6C/ hlcWgRS24IhvbdUDsgoy47qsQYyKNwLwW9ki4w5bmYRM9MVMinufs4LEkVRJGpEmc8 RG3gNaGvnRB0d840=\", \"Expiration\": \"2024-05-08T07:55:55+00:00\" }} aws sts get-caller-identity\n
aws sts get-caller-identity{ \"UserId\": \"AIDA4MTWJ5HPRUU7R22VG\", \"Account\": \"851725380063\", \"Arn\": \"arn:aws:iam::851725380063:user/root\"}"},{"location":"handout/cloud/aws/cli/#reference","title":"Reference","text":" -
AWS Command Line Interface Documentation - https://docs.aws.amazon.com/cli/
-
User Guide - Install AWS Cli
"},{"location":"handout/cloud/aws/eks/","title":"eks","text":""},{"location":"handout/cloud/aws/eks/#elastic-kubernetes-service","title":"Elastic Kubernetes Service","text":"Never spend your money before you have it, Jefferson T.
EKS n\u00e3o tem cota gr\u00e1tis, sempre \u00e9 muito bem cobrado.
"},{"location":"handout/cloud/aws/eks/#rise-up-an-eks","title":"Rise up an EKS","text":""},{"location":"handout/cloud/aws/eks/#1-creating-a-role","title":"1. Creating a role","text":"IAM - Identity and Access Management: gerencia usu\u00e1rios e acessos.
Role \u00e9 um grupo de policiies que est\u00e3o vinculadas a servi\u00e7os AWS, assim, o EKS precisa de permissionamento para acessar os recursos da AWS.
"},{"location":"handout/cloud/aws/eks/#2-creating-a-vpc","title":"2. Creating a VPC","text":"Virtual Private Cloud
Organiza\u00e7\u00e3o do Kubernetes
Kubernetes Components 2 \u00c9 necess\u00e1rio criar uma estrutura de rede para suportar o Kubernetes, para isso, \u00e9 aconselh\u00e1vel utilizar um template do Cloud Formation. Abaixe o arquivo amazon-eks-vpc-private-subnets.yaml e d\u00ea um upload na cria\u00e7\u00e3o da VPC.
https://s3.us-west-2.amazonaws.com/amazon-eks/cloudformation/2020-10-29/amazon-eks-vpc-private-subnets.yaml\n
flowchart TB\n subgraph Region\n direction LR\n subgraph Zone A\n direction LR\n subgraph subpri1[\"Subnet Private\"]\n direction TB\n poda1[\"pod 1\"]\n poda2[\"pod 2\"]\n poda3[\"pod 3\"]\n end\n subgraph subpub1[\"Subnet Public\"]\n loadbalancea[\"Load Balance\"]\n end\n end\n subgraph Zone B\n direction LR\n subgraph subpri2[\"Subnet Private\"]\n direction TB\n podb1[\"pod 1\"]\n podb2[\"pod 2\"]\n podb3[\"pod 3\"]\n end\n subgraph subpub2[\"Subnet Public\"]\n loadbalanceb[\"Load Balance\"]\n end\n end\n User --> loadbalancea\n loadbalancea --> poda1\n loadbalancea --> poda2\n loadbalancea --> poda3\n User --> loadbalanceb\n loadbalanceb --> podb1\n loadbalanceb --> podb2\n loadbalanceb --> podb3\n end
gateway --> auth gateway --> discovery
"},{"location":"handout/cloud/aws/eks/#3-building-an-eks","title":"3. Building an EKS","text":""},{"location":"handout/cloud/aws/eks/#4-accessing-the-eks","title":"4. Accessing the EKS","text":"On terminal, after that it had been set up the aws cli.
aws configure\n
See the configuration that was done.
aws configure list\n
aws configure list Name Value Type Location ---- ----- ---- -------- profile <not set> None Noneaccess_key ****************TTNI shared-credentials-file secret_key ****************zAJ1 shared-credentials-file region us-east-2 config-file ~/.aws/config Set up the kube-config to point to the remote aws eks cluster.
aws eks update-kubeconfig --name eks-store\n
aws eks update-kubeconfig --name eks-storeAdded new context arn:aws:eks:us-east-2:058264361068:cluster/eks-store to /Users/sandmann/.kube/config>>kubectl get podsNo resources found in default namespace.>>kubectl get nodesNo resources found> Come back to AWS EKS > compute:
Notice that there no nodes on cluster also, because only the Control Pane had been created, there is no exist a node for the worker nodes.
Attach roles to node group, it is exclusive for the worker nodes.
IAM > Roles
Add Permissions
- AmazonEKS_CNI_Policy (Configuration Network Interface)
- AmazonEKSWorkerNodePolicy
- AmazonEC2ContainerRegistryReadOnly
Review
Group Node Group
Only private subnets:
kubectl get nodes\n
kubectl get nodesNAME STATUS ROLES AGE VERSIONip-192-168-179-174.us-east-2.compute.internal Ready <none> 54s v1.29.3-eks-ae9a62aip-192-168-204-234.us-east-2.compute.internal Ready <none> 54s v1.29.3-eks-ae9a62a Now, deploy the microservice.
kubectl apply -f ./k8s/deployment.yamldeployment.apps/gateway createdkubectl apply -f ./k8s/service.yamlservice/gateway created>>>kubectl get allNAME READY STATUS RESTARTS AGEpod/gateway-7894679df8-lbngj 1/1 Running 0 81sNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEservice/gateway LoadBalancer 10.100.245.4 a3a5cc62ba81e466e9746f64f83fc349-1127848642.us-east-2.elb.amazonaws.com 8080:32681/TCP 25mservice/kubernetes ClusterIP 10.100.0.1 <none> 443/TCP 87mNAME READY UP-TO-DATE AVAILABLE AGEdeployment.apps/gateway 1/1 1 1 82sNAME DESIRED CURRENT READY AGEreplicaset.apps/gateway-7894679df8 1 1 1 82s> Jenkins update
Jenkins precisa instalar o awscli (adicionar ao docker-compose.yaml
)
RUN apt-get install -y awscli\n
Dentro da inst\u00e2ncia, configurar:
> aws configure\n> aws eks update-kubeconfig --name eks-store\n
Scale
> kubectl scale --replicas=3 deployment/gateway\n
kubectl scale --replicas=3 deployment/gatewaydeployment.apps/gateway scaledkubectl get podsNAME READY STATUS RESTARTS AGEgateway-7894679df8-62m7z 1/1 Running 0 12sgateway-7894679df8-r2kp2 1/1 Running 0 12sgateway-7894679df8-v6xhs 1/1 Running 0 5m58s
"},{"location":"handout/cloud/aws/eks/#references","title":"References:","text":" -
Setting up to use Amazon EKS \u21a9
-
Kubernetes Components \u21a9
-
Como criar um cluster Kubernetes na AWS com EKS by Fabricio Veronez
\u21a9
-
Creating a VPC for your Amazon EKS cluster \u21a9
-
AWS Princing Calculator - EKS \u21a9
-
Getting started with Amazon EKS \u2013 AWS Management Console and AWS CLI \u21a9
-
kubectl scale \u21a9
"},{"location":"handout/devops/jenkins/","title":"Jenkins","text":"Docker ComposeEnvironment Variables docker-compose.yaml# docker compose up -d --build --force-recreate\nversion: '3.8'\nname: ops\n\nservices:\n\njenkins:\n container_name: jenkins\n build:\n dockerfile_inline: |\n FROM jenkins/jenkins:jdk21\n USER root\n RUN apt-get update && apt-get install -y lsb-release\n RUN curl -fsSLo /usr/share/keyrings/docker-archive-keyring.asc \\\n https://download.docker.com/linux/debian/gpg\n RUN echo \"deb [arch=$(dpkg --print-architecture) \\\n signed-by=/usr/share/keyrings/docker-archive-keyring.asc] \\\n https://download.docker.com/linux/debian \\\n $(lsb_release -cs) stable\" > /etc/apt/sources.list.d/docker.list\n\n RUN apt-get update && apt-get install -y docker-ce maven\n\n RUN apt-get install -y apt-transport-https ca-certificates curl\n RUN curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg\n RUN chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg\n RUN echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list\n RUN chmod 644 /etc/apt/sources.list.d/kubernetes.list\n RUN apt-get update\n RUN apt-get install -y kubectl\n\n RUN apt-get install -y awscli\n\n RUN usermod -aG docker jenkins\n ports:\n - 9080:8080\n - 50000:50000 \n volumes:\n - $CONFIG/jenkins:/var/jenkins_home\n - /var/run/docker.sock:/var/run/docker.sock\n restart: always\n networks:\n - infra\n\nnetworks:\n infra:\n driver: bridge\n
.envCONFIG=./config\n
To run this container:
docker compose up -d --build\n
The will be avaliable at:
http://localhost:9000\n
"},{"location":"handout/devops/jenkins/#pipeline","title":"Pipeline","text":""},{"location":"handout/devops/jenkins/#checkout-scm","title":"Checkout SCM","text":"Jenkinsfilepipeline {\n agent any\n\n stages {\n stage('Build') {\n steps {\n sh 'mvn -B -DskipTests clean install'\n }\n }\n }\n}\n
Definindo o n\u00famero m\u00e1ximo de executores.
Instalando o plugin para executar o Docker dentro do Jenkins container.
BASED ARTICLE:
Getting \u201cPermission Denied\u201d error when pulling a docker image in Jenkins docker container on Mac
"},{"location":"handout/devops/kubernetes/","title":"Kubernetes","text":""},{"location":"handout/devops/kubernetes/#minikube","title":"Minikube","text":"Vers\u00e3o light do kubernetes, para rodar em m\u00e1quinas locais. Instala\u00e7\u00e3o do Kubernetes.
Para Inicializar o Minikube ap\u00f3s a instala\u00e7\u00e3o, utilize:
minikube start --driver=docker --profile=store\n
minikube profile list\n
minikube delete --all\n
minikube delete --all --purge\n
Dashboard
minikube dashboard\n
"},{"location":"handout/devops/kubernetes/#kubectl","title":"Kubectl","text":"Comando cliente de gerenciamento do Kubernetes.
kubectl apply -f <filename>\n
kubectl get deployments\n
kubectl get svc\n
kubectl get pods\n
kubectl port-forward <pod> 8080:8080\n
kubectl exec -it <pod> -- bash\n
kubectl delete --all\n
kubectl api-resources\n
kubectl logs <pod>\n
kubectl describe pod <pod>\n
reset the core dnskubectl -n kube-system rollout restart deployment coredns\n
"},{"location":"handout/devops/kubernetes/#services","title":"Services","text":" -
ClusterIp: apenas dentro do cluster.
-
NodePort: permite exposi\u00e7\u00e3o de porta para fora do cluster.
-
LoadBalance: uma porta para diversas inst\u00e2ncias no cluster.
"},{"location":"handout/devops/kubernetes/#deploying-a-postgres","title":"Deploying a Postgres","text":"Crie um novo reposit\u00f3rio para armazenar as configura\u00e7\u00f5es do banco de dados: platform.241.store.db.
estrutura de diret\u00f3rio sugerida\ud83d\udcc4 store.account\n\ud83d\udcc1 store.db\n\u2514\u2500\u2500 \ud83d\udcc1 k8s\n \u251c\u2500\u2500 \ud83d\udcc4 configmap.yaml\n \u251c\u2500\u2500 \ud83d\udcc4 credentials.yaml\n \u251c\u2500\u2500 \ud83d\udcc4 pv.yaml\n \u251c\u2500\u2500 \ud83d\udcc4 pvc.yaml\n \u251c\u2500\u2500 \ud83d\udcc4 deployment.yaml\n \u2514\u2500\u2500 \ud83d\udcc4 service.yaml\n
configmap.yamlcredentials.yamlpv.yamlpvc.yamldeployment.yamlservice.yaml Configura\u00e7\u00e3o de conex\u00e3o do banco
configmap.yamlapiVersion: v1\nkind: ConfigMap\nmetadata:\n name: postgres-configmap\n labels:\n app: postgres\ndata:\n POSTGRES_HOST: postgres\n POSTGRES_DB: store\n
kubectl apply -f ./k8s/configmap.yaml\nkubectl get configmap\n
Configura\u00e7\u00e3o de acesso ao banco
credentials.yamlapiVersion: v1\nkind: Secret\nmetadata:\n name: postgres-credentials\ndata:\n POSTGRES_USER: c3RvcmU=\n POSTGRES_PASSWORD: c3RvcmU=\n
kubectl apply -f ./k8s/credentials.yaml\nkubectl get secrets\n
Use encode base64 para ofuscar a senha. Vide: Base64Encode.
Persistence Volume: espa\u00e7o alocado no cluster
pv.yamlapiVersion: v1\nkind: PersistentVolume\nmetadata:\n name: postgres-volume\n labels:\n type: local\n app: postgres\nspec:\n storageClassName: manual\n capacity:\n storage: 10Gi\n accessModes:\n - ReadWriteMany\n hostPath:\n path: /data/postgresql\n
kubectl apply -f ./k8s/pv.yaml\nkubectl get pv\n
Persistence Volume Claim: espa\u00e7o alocado do cluster para o pods.
pvc.yamlapiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: postgres-volume-claim\n labels:\n app: postgres\nspec:\n storageClassName: manual\n accessModes:\n - ReadWriteMany\n resources:\n requests:\n storage: 10Gi\n
kubectl apply -f ./k8s/pvc.yaml\nkubectl get pvc\n
deployment.yamlapiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: postgres\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: postgres\n template:\n metadata:\n labels:\n app: postgres\n spec:\n containers:\n - name: postgres\n image: 'postgres:latest'\n imagePullPolicy: IfNotPresent\n ports:\n - containerPort: 5432\n env:\n\n - name: POSTGRES_DB\n valueFrom:\n configMapKeyRef:\n name: postgres-configmap\n key: POSTGRES_DB\n\n - name: POSTGRES_USER\n valueFrom:\n secretKeyRef:\n name: postgres-credentials\n key: POSTGRES_USER\n\n - name: POSTGRES_PASSWORD\n valueFrom:\n secretKeyRef:\n name: postgres-credentials\n key: POSTGRES_PASSWORD\n\n volumeMounts:\n - mountPath: /var/lib/postgresql/data\n name: postgresdata\n volumes:\n - name: postgresdata\n persistentVolumeClaim:\n claimName: postgres-volume-claim\n
kubectl apply -f ./k8s/deployment.yaml\nkubectl get deployments\nkubectl get pods\n
service.yamlapiVersion: v1\nkind: Service\nmetadata:\n name: postgres\n labels:\n app: postgres\nspec:\n type: ClusterIP\n ports:\n - port: 5432\n selector:\n app: postgres\n
kubectl apply -f ./k8s/service.yaml\nkubectl get services\n
Acessando o pod do Postgres:
kubectl exec -it postgres-<pod-id> -- psql -h localhost -U store --password -p 5432 store\n
Redirecionando porta:
kubectl port-forward <pod> 5432:5432\n
"},{"location":"handout/devops/kubernetes/#deploying-the-discovery-microservice","title":"Deploying the Discovery Microservice","text":"discovery\ud83d\udcc1 store.discovery-resource\n\u251c\u2500\u2500 \ud83d\udcc1 src\n\u2502 \u2514\u2500\u2500 \ud83d\udcc1 main\n\u2502 \u2514\u2500\u2500 \ud83d\udcc1 resources\n\u2502 \u2514\u2500\u2500 \ud83d\udcc4 application.yaml\n\u251c\u2500\u2500 \ud83d\udcc1 k8s\n\u2502 \u251c\u2500\u2500 \ud83d\udcc4 configmap.yaml\n\u2502 \u251c\u2500\u2500 \ud83d\udcc4 deployment.yaml\n\u2502 \u2514\u2500\u2500 \ud83d\udcc4 service.yaml\n\u251c\u2500\u2500 \ud83d\udcc4 Dockerfile\n\u251c\u2500\u2500 \ud83d\udcc4 Jenkins\n\u2514\u2500\u2500 \ud83d\udcc4 pom.xml\n
configmap.yamldeployment.yamlservice.yaml configmap.yamlapiVersion: v1\nkind: ConfigMap\nmetadata:\n name: discovery-configmap\n labels:\n app: discovery\ndata:\n DISCOVERY_HOST: discovery \n
configmap.yamlapiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: discovery\n labels:\n app: discovery\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: discovery\n template:\n metadata:\n labels:\n app: discovery\n spec:\n containers:\n - name: discovery\n image: humbertosandmann/discovery:latest\n ports:\n - containerPort: 8761\n
service.yamlapiVersion: v1\nkind: Service\nmetadata:\n name: discovery\n labels:\n app: discovery\nspec:\n type: ClusterIP\n ports:\n - port: 8761\n targetPort: 8761\n protocol: TCP\n selector:\n app: discovery\n
"},{"location":"handout/devops/kubernetes/#deploying-a-microservice","title":"Deploying a Microservice","text":"account\ud83d\udcc1 store.account-resource\n\u251c\u2500\u2500 \ud83d\udcc1 src\n\u2502 \u2514\u2500\u2500 \ud83d\udcc1 main\n\u2502 \u2514\u2500\u2500 \ud83d\udcc1 resources\n\u2502 \u2514\u2500\u2500 \ud83d\udcc4 application.yaml\n\u251c\u2500\u2500 \ud83d\udcc1 k8s\n\u2502 \u251c\u2500\u2500 \ud83d\udcc4 deployment.yaml\n\u2502 \u2514\u2500\u2500 \ud83d\udcc4 service.yaml\n\u251c\u2500\u2500 \ud83d\udcc4 Dockerfile\n\u251c\u2500\u2500 \ud83d\udcc4 Jenkins\n\u2514\u2500\u2500 \ud83d\udcc4 pom.xml\n
application.yamldeployment.yamlservice.yaml application.yamlserver:\n port: 8080\n\nspring:\n application:\n name: account\n datasource:\n url: jdbc:postgresql://${POSTGRES_HOST}:5432/${POSTGRES_DB}\n username: ${POSTGRES_USER:postgres}\n password: ${POSTGRES_PASSWORD:Post123321}\n driver-class-name: org.postgresql.Driver\n flyway:\n baseline-on-migrate: true\n schemas: account\n jpa:\n properties:\n hibernate:\n default_schema: account\n\nmanagement:\n endpoints:\n web:\n base-path: /account/actuator\n exposure:\n include: [ 'prometheus' ]\n\neureka:\n client:\n register-with-eureka: true\n fetch-registry: true\n service-url:\n defaultZone: http://${DISCOVERY_HOST}:8761/eureka/\n
Subir no Git e rodar o Jenkins.
deployment.yamlapiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: account\nspec:\n selector:\n matchLabels:\n app: account\n replicas: 1\n template:\n metadata:\n labels:\n app: account\n spec:\n containers:\n - name: account\n image: humbertosandmann/account:latest\n ports:\n - containerPort: 8080\n env:\n\n - name: DISCOVERY_HOST\n valueFrom:\n configMapKeyRef:\n name: discovery-configmap\n key: DISCOVERY_HOST\n\n - name: POSTGRES_HOST\n valueFrom:\n configMapKeyRef:\n name: postgres-configmap\n key: POSTGRES_HOST\n\n - name: POSTGRES_DB\n valueFrom:\n configMapKeyRef:\n name: postgres-configmap\n key: POSTGRES_DB\n\n - name: POSTGRES_USER\n valueFrom:\n secretKeyRef:\n name: postgres-credentials\n key: POSTGRES_USER\n\n - name: POSTGRES_PASSWORD\n valueFrom:\n secretKeyRef:\n name: postgres-credentials\n key: POSTGRES_PASSWORD\n
service.yamlapiVersion: v1\nkind: Service\nmetadata:\n name: account\n labels:\n name: account\nspec:\n type: NodePort\n ports:\n - port: 8080\n targetPort: 8080\n protocol: TCP\n selector:\n app: account\n
kubectl apply -f ./k8s/service.yaml\nkubectl get services\n
kubectl apply -f k8s/deployment.yaml\nkubectl apply -f k8s/service.yaml \n
"},{"location":"handout/devops/kubernetes/#deploying-using-jenkins","title":"Deploying using Jenkins","text":""},{"location":"handout/devops/kubernetes/#creating-crendentials-for-jenkins-to-k8s","title":"Creating crendentials for Jenkins to K8s","text":"Criar credentials no Kubernetes para que o Jenkins possa conectar.
jenkins.yaml---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: jenkins\n namespace: default\n---\n\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: jenkins\n namespace: default\nrules:\n- apiGroups: [\"\"]\n resources: [\"pods\",\"services\"]\n verbs: [\"create\",\"delete\",\"get\",\"list\",\"patch\",\"update\",\"watch\"]\n- apiGroups: [\"apps\"]\n resources: [\"deployments\"]\n verbs: [\"create\",\"delete\",\"get\",\"list\",\"patch\",\"update\",\"watch\"]\n- apiGroups: [\"\"]\n resources: [\"pods/exec\"]\n verbs: [\"create\",\"delete\",\"get\",\"list\",\"patch\",\"update\",\"watch\"]\n- apiGroups: [\"\"]\n resources: [\"pods/log\"]\n verbs: [\"get\",\"list\",\"watch\"]\n- apiGroups: [\"\"]\n resources: [\"secrets\"]\n verbs: [\"get\",\"create\"]\n- apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"create\",\"get\",\"update\"]\n- apiGroups: [\"\"]\n resources: [\"persistentvolumeclaims\"]\n verbs: [\"create\",\"delete\",\"get\",\"list\",\"patch\",\"update\",\"watch\"]\n\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: jenkins-token\n annotations:\n kubernetes.io/service-account.name: jenkins\ntype: kubernetes.io/service-account-token\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: jenkins\n namespace: default\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: jenkins\nsubjects:\n- kind: ServiceAccount\n name: jenkins\n---\n# Allows jenkins to create persistent volumes\n# This cluster role binding allows anyone in the \"manager\" group to read secrets in any namespace.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: jenkins-crb\nsubjects:\n- kind: ServiceAccount\n namespace: default\n name: jenkins\nroleRef:\n kind: ClusterRole\n name: jenkinsclusterrole\n apiGroup: rbac.authorization.k8s.io\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n # \"namespace\" omitted since ClusterRoles are not namespaced\n name: jenkinsclusterrole\nrules:\n- apiGroups: [\"\"]\n resources: [\"persistentvolumes\"]\n verbs: [\"create\",\"delete\",\"get\",\"list\",\"patch\",\"update\",\"watch\"]\n
Executar a declara\u00e7\u00e3o:
kubectl apply -f jenkins.yaml\n
"},{"location":"handout/devops/kubernetes/#recovering-the-jenkins-token","title":"Recovering the Jenkins' Token","text":"kubectl get secrets\n
kubectl get secretsNAME TYPE DATA AGEjenkins-token kubernetes.io/service-account-token 3 21s Abrindo o objeto com o token.
kubectl describe secrets/jenkins-token\n
kubectl describe secrets/jenkins-tokenName: jenkins-tokenNamespace: defaultLabels: <none>Annotations: kubernetes.io/service-account.name: jenkins kubernetes.io/service-account.uid: 0d06d343-fd34-4aff-8396-5dfec5a9e5b6Type: kubernetes.io/service-account-tokenData====ca.crt: 1111 bytesnamespace: 7 bytestoken: eyJhbGciOiJSUzI1NiIsImtpZCI6IklqTkZXdEVKcW1iclBrNHBnQzJSX1F6QjFIWDFMX0FvNGVkNGd2aWFKd00ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImplbmtpbnMtdG9rZW4iLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiamVua2lucyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjBkMDZkMzQzLWZkMzQtNGFmZi04Mzk2LTVkZmVjNWE5ZTViNiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmplbmtpbnMifQ.XkwD5vwC7CJNDv44PxCAIpLEfVlQbLE6VDNmTOEpFkaoe_x4ehU8QS8fnTgUz0a_vjUKuXum-PD2vF8Fx_WBsWVAG8BNhXJv79MMbEe7axYT7W91fjsnT0rMqSqzajNjTTDFvPDQu0KkLzC-UUnlG3RdNHhzxGVnUIA9lIeJuVKnlCXAexPQr6HeX5ggbe-CZO_uMjFZjwBnjLC-IJsIKKaz8I4CbFxz10vAl5SpJ7PadA1iZZEvr_VYhhG42qMqRFLzkrXtWUG0NX8aSitJT0Wk9c54ME13WDZb6MfRXwUWbARu-TLN56KrPaqtL2dBtRG2EFOn5nVXARI7jPzhjg Try it!!! Abra o token no site jwt.io e verifique seu conte\u00fado.
"},{"location":"handout/devops/kubernetes/#set-up-the-credential-to-jenkins","title":"Set up the credential to Jenkins","text":"Before to go ahead
Instale os plugins: Kubernetes Cli e Kubernetes pipeline.
Manage Jenkins > Credentials
"},{"location":"handout/devops/kubernetes/#updating-the-jenkinsfile","title":"Updating the Jenkinsfile","text":"Adding the Deploy on k8s
stage:
Jenkinsfile...\n stage('Deploy on local k8s') {\n steps {\n withCredentials([ string(credentialsId: 'minikube-credentials', variable: 'api_token') ]) {\n sh 'kubectl --token $api_token --server https://host.docker.internal:55529 --insecure-skip-tls-verify=true apply -f ./k8s/deployment.yaml '\n sh 'kubectl --token $api_token --server https://host.docker.internal:55529 --insecure-skip-tls-verify=true apply -f ./k8s/service.yaml '\n }\n }\n }\n...\n
"},{"location":"handout/devops/kubernetes/#kubectl-config","title":"kubectl config","text":"kubectl config get-contexts
"},{"location":"handout/devops/kubernetes/#references","title":"References:","text":"[1^]: Using a Service to Expose Your App
[2^]: Install Kubernetes's Tools
[3^]: How to Deploy Postgres to Kubernetes Cluster
[4^]: Spring boot, PostgreSQL and Kubernetes
[5^]: Deploy nodejs App in a Minikube Kubernetes using Jenkins CI/CD pipeline
[6^]: Horizontal Pod Autoscaling
"},{"location":"handout/devops/observability/","title":"Observability","text":" - Logging
- Monitoring
- Tracing
"},{"location":"handout/devops/observability/#microservice","title":"Microservice","text":"pom.xml<!-- metricas de uso -->\n<dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter-actuator</artifactId>\n</dependency>\n\n<!-- exporta no formato prometheus -->\n<dependency>\n <groupId>io.micrometer</groupId>\n <artifactId>micrometer-registry-prometheus</artifactId>\n <scope>runtime</scope>\n</dependency>\n
application.yamlmanagement:\n endpoints:\n web:\n base-path: /gateway/actuator\n exposure:\n include: [ 'prometheus' ]\n
"},{"location":"handout/devops/observability/#docker","title":"Docker","text":"docker-compose.yaml prometheus:\n image: prom/prometheus:latest\n container_name: store-prometheus\n ports:\n - 9090:9090\n volumes:\n - $VOLUME/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml\n networks:\n - private-network\n\n grafana:\n container_name: store-grafana\n image: grafana/grafana-enterprise\n ports:\n - 3000:3000\n environment:\n - GF_SECURITY_ADMIN_PASSWORD=admin\n volumes:\n - $VOLUME/grafana:/var/lib/grafana\n - $VOLUME/grafana/provisioning/datasources:/etc/grafana/provisioning/datasources \n restart: always\n networks:\n - private-network\n
"},{"location":"handout/devops/observability/#prometheus","title":"Prometheus","text":"$VOLUME/prometheus/prometheus.ymlscrape_configs:\n\n - job_name: 'GatewayMetrics'\n metrics_path: '/gateway/actuator/prometheus'\n scrape_interval: 1s\n static_configs:\n - targets:\n - gateway:8080\n labels:\n application: 'Gateway Application'\n\n - job_name: 'AuthMetrics'\n metrics_path: '/auth/actuator/prometheus'\n scrape_interval: 1s\n static_configs:\n - targets:\n - auth:8080\n labels:\n application: 'Auth Application'\n\n - job_name: 'AccountMetrics'\n metrics_path: '/accounts/actuator/prometheus'\n scrape_interval: 1s\n static_configs:\n - targets:\n - account:8080\n labels:\n application: 'Account Application'\n
http://localhost:9090/
"},{"location":"handout/devops/observability/#grafana","title":"Grafana","text":"$VOLUME/grafana/provisioning/datasources/datasources.ymlapiVersion: 1\ndatasources:\n - name: Prometheus\n type: prometheus\n access: proxy\n url: http://prometheus:9090\n isDefault: true\n
http://localhost:3000/
- Dashboard MarketPlace
"},{"location":"handout/microservices/account/","title":"Account","text":"Esse microservi\u00e7o \u00e9 respons\u00e1vel por gerenciar as contas dos usu\u00e1rios do sistema que est\u00e1 sendo desenvolvido. Ele tamb\u00e9m pode ser utilizado como template para o desenvolvimento de outros microservi\u00e7os que se utilizem de recuros semelhantes em seu funcionamento.
- Endpoints
- Modulariza\u00e7\u00e3o
- Interface
- Resource
- Persist\u00eancia
- Documenta\u00e7\u00e3o
- Integra\u00e7\u00e3o
- Docker
"},{"location":"handout/microservices/account/#endpoints","title":"Endpoints","text":"Create Account POST /accounts\n
Request
{\n \"name\": \"Antonio do Estudo\",\n \"email\": \"acme@insper.edu.br\",\n \"password\": \"123@\"\n}\n
Responses: codebody 201
{\n \"id\": \"45d16201-12a4-48bf-8c84-df768fdc4878\",\n \"name\": \"Antonio do Estudo\",\n \"email\": \"acme@insper.edu.br\"\n}\n
401 Login :: find by email and password Get Account GET /accounts/{uuid}\n
Responses:
codebody 200
{\n \"id\": \"45d16201-12a4-48bf-8c84-df768fdc4878\",\n \"name\": \"Antonio do Estudo\",\n \"email\": \"acme@insper.edu.br\"\n}\n
401 "},{"location":"handout/microservices/account/#modularizacao","title":"Modulariza\u00e7\u00e3o","text":"Class Diagram
Exemplo para o microsservi\u00e7o Account.
classDiagram\n namespace Interface {\n class AccountController {\n <<interface>>\n create(AccountIn)\n read(String id): AccountOut\n update(String id, AccountIn)\n delete(String id)\n findByEmailAndPassword(AccountIn)\n }\n class AccountIn {\n <<record>>\n String name\n String email\n String password\n }\n class AccountOut {\n <<record>>\n String id\n String name\n String email\n }\n }\n namespace Resource {\n class AccountResource {\n <<REST API>>\n -accountService\n }\n class AccountService {\n <<service>>\n -accountRepository\n create(Account)\n }\n class AccountRepository {\n <<nterface>>\n findByEmailAndHash(String, String)\n }\n class AccountModel {\n <<entity>>\n String id\n String name\n String email\n String hash\n }\n class Account {\n <<dto>>\n String id\n String name\n String email\n String password\n }\n }\n AccountController <|-- AccountResource\n AccountResource o-- AccountService\n AccountService o-- AccountRepository
"},{"location":"handout/microservices/account/#pom-dependecy","title":"POM dependecy","text":"Note que esse microsservi\u00e7o possui depend\u00eancia da interface, o Account. Logo, se torna necess\u00e1rio explicitar essa depend\u00eancia no pom.xml
do microsservi\u00e7o Account.
<dependency>\n <groupId>insper.store</groupId>\n <artifactId>account</artifactId>\n <version>${project.version}</version>\n</dependency>\n
Outras depend\u00eancias relevantes para adicionar no pom.xml
s\u00e3o o suporte ao registro no discovery.
<dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-netflix-eureka-client</artifactId>\n</dependency>\n
Quando adicionado o acesso ao Discovery, \u00e9 necess\u00e1rio definir no application.yalm
o nome com o qual o servi\u00e7o ser\u00e1 invocado, assim bem como, o endere\u00e7o de acesso do discovery ao qual o servi\u00e7o ir\u00e1 conectar:
spring:\n application:\n name: store-account\n\neureka:\n client:\n register-with-eureka: true\n fetch-registry: true\n service-url:\n defaultZone: ${EUREKA_URI:http://localhost:8761/eureka/}\n
J\u00e1 para disponibilizar o uso ao OpenFeign
.
<dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-openfeign</artifactId>\n</dependency>\n
"},{"location":"handout/microservices/auth/","title":"Auth","text":"A fim do sistema possuir um controle de acesso, \u00e9 conveniente a cria\u00e7\u00e3o de um microsservi\u00e7o Auth, que ser\u00e1 respons\u00e1vel pelo cadastro de usu\u00e1rios do sistema.
- Endpoints
- Modulariza\u00e7\u00e3o
- Interface
- Resource
- Documenta\u00e7\u00e3o
- Integra\u00e7\u00e3o
- Token
- Docker
"},{"location":"handout/microservices/auth/#endpoints","title":"Endpoints","text":"Register POST /auth/register\n
Autentica\u00e7\u00e3o :: Login POST /auth/login\n
"},{"location":"handout/microservices/auth/#request","title":"Request","text":"{\n \"name\": \"Antonio do Estudo\",\n \"email\": \"acme@insper.edu.br\",\n \"password\": \"123@321\"\n}\n
"},{"location":"handout/microservices/auth/#response","title":"Response","text":"code body 201"},{"location":"handout/microservices/auth/#sequence-diagram","title":"Sequence Diagram","text":"sequenceDiagram\nautonumber\nactor User\nUser->>+Auth: register(RegisterIn)\nAuth->>+Account: create(AccountIn)\nAccount->>-Auth: returns the new account (AccountOut)\nAuth->>-User: returns 201
"},{"location":"handout/microservices/auth/#request_1","title":"Request","text":"{\n \"email\": \"acme@insper.edu.br\",\n \"password\": \"123@321\"\n}\n
"},{"location":"handout/microservices/auth/#response_1","title":"Response","text":"code body 201 { \"token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiI0NWQxNjIwMS0xMmE0LTQ4YmYtOGM4NC1kZjc2OGZkYzQ4NzgiLCJuYW1lIjoiQW50b25pbyBkbyBFc3R1ZG8iLCJpYXQiOjE1MTYyMzkwMjIsInJvbGUiOiJyZWd1bGFyIn0.8eiTZjXGUFrseBP5J91UdDctw-Flp7HP-PAp1eO8f1M\" }
403"},{"location":"handout/microservices/auth/#sequence-diagram_1","title":"Sequence Diagram","text":"sequenceDiagram\nautonumber\nactor User\nUser->>+Auth: authenticate(CredentiaIn)\nAuth->>+Account: login(LoginIn)\ncritical validated\n Account->>-Auth: returns the account\noption denied\n Auth-->>User: unauthorized message\nend \nAuth->>Auth: generates a token\nAuth->>-User: returns LoginOut\nUser->>User: stores the token to use for the next requests
"},{"location":"handout/microservices/auth/#modularizacao","title":"Modulariza\u00e7\u00e3o","text":"Exemplo para o microsservi\u00e7o Auth.
classDiagram\n namespace Interface {\n class AuthController {\n <<interface>>\n register(RegisterIn)\n authenticate(CredentialIn): LoginOut\n solve(SolveIn): SolveOut\n }\n class RegisterIn {\n <<record>>\n String name\n String email\n String password\n }\n class CredentialIn {\n <<record>>\n String email\n String password\n }\n class LoginOut {\n <<Record>>\n String token\n }\n class SolveIn {\n <<Record>>\n String token\n }\n class SolveOut {\n <<Record>>\n String id\n String name\n String role\n }\n }\n namespace Resource {\n class AuthResource {\n <<REST API>>\n -authService\n }\n class AuthService {\n <<service>>\n JwtService jwtService\n register(RegisterIn)\n authenticate(CredentialIn)\n }\n class JwtService {\n <<service>>\n String secretKey\n String issuer\n long duration\n SecretKey key\n JwtParser parser\n init()\n create(String id, String name, String role): String\n getToken(String token): Token\n getRole(String token): String\n }\n class Token {\n <<record>>\n String id\n String name\n String role\n }\n }\n AuthController <|-- AuthResource\n AuthResource o-- AuthService\n AuthService o-- JwtService
Exemplo de uma implementa\u00e7\u00e3o da interface AuthController.
AuthController.javapackage store.auth;\n\nimport org.springframework.cloud.openfeign.FeignClient;\nimport org.springframework.http.ResponseEntity;\nimport org.springframework.web.bind.annotation.PostMapping;\nimport org.springframework.web.bind.annotation.RequestBody;\n\n@FeignClient(\"store-auth\")\npublic interface AuthController {\n\n @PostMapping(\"/auth/register\")\n ResponseEntity<?> create (\n @RequestBody(required = true) RegisterIn in\n );\n\n @PostMapping(\"/auth/login\")\n ResponseEntity<LoginOut> authenticate (\n @RequestBody(required = true) Credential in\n );\n}\n
Repare que h\u00e1 a publica\u00e7\u00e3o da interface como sendo um servi\u00e7o a ser registrado no Discovery.
"},{"location":"handout/microservices/auth/#documentacao","title":"Documenta\u00e7\u00e3o","text":"Para fazer a documenta\u00e7\u00e3o dos APIs, de forma automatizada, \u00e9 aconselh\u00e1vel a utiliza\u00e7\u00e3o da biblioteca SpringDoc OpenAPI
.
pom.xml<!-- https://mvnrepository.com/artifact/org.springdoc/springdoc-openapi-ui -->\n<dependency>\n <groupId>org.springdoc</groupId>\n <artifactId>springdoc-openapi-starter-webmvc-ui</artifactId>\n <version>[2.3.0,)</version>\n</dependency>\n
"},{"location":"handout/microservices/auth/#integracao","title":"Integra\u00e7\u00e3o","text":"A integra\u00e7\u00e3o entre os microsservi\u00e7os \u00e9 feita via OpenFeign. Esse framework precisa saber, quando a aplica\u00e7\u00e3o sobe, em quais pacotes ir\u00e1 procurar os servi\u00e7os. Para isso, se torna necess\u00e1rio anotar a classe AuthApplication
com a lista de pacotes, assim bem como, anotar que esse microsservi\u00e7o ir\u00e1 trabalhar com a sistema de descoberta de microsservi\u00e7os habitado.
AuthApplication.javapackage store.auth;\n\nimport org.springframework.boot.SpringApplication;\nimport org.springframework.boot.autoconfigure.SpringBootApplication;\nimport org.springframework.cloud.client.discovery.EnableDiscoveryClient;\nimport org.springframework.cloud.openfeign.EnableFeignClients;\n\n@EnableFeignClients(basePackages = {\n \"insper.store.account\"\n})\n@EnableDiscoveryClient\n@SpringBootApplication\npublic class AuthApplication {\n\n public static void main(String[] args) {\n SpringApplication.run(AuthApplication.class, args);\n }\n\n}\n
Necess\u00e1rio tamb\u00e9m atualizar o pom.xml
para que o microsservi\u00e7o possa enxergar o outro microsservi\u00e7o.
Note que esse microsservi\u00e7o possui depend\u00eancia de outro, o Account, al\u00e9m da depend\u00eancia da interface do pr\u00f3prio microsservi\u00e7o. Logo, se torna necess\u00e1rio explicitar essa depend\u00eancia no pom.xml
do microsservi\u00e7o Auth.
pom.xml<dependency>\n <groupId>insper.store</groupId>\n <artifactId>auth</artifactId>\n <version>${project.version}</version>\n</dependency>\n<dependency>\n <groupId>insper.store</groupId>\n <artifactId>account</artifactId>\n <version>${project.version}</version>\n</dependency>\n\n<!-- https://mvnrepository.com/artifact/io.jsonwebtoken/jjwt-api -->\n<dependency>\n <groupId>io.jsonwebtoken</groupId>\n <artifactId>jjwt-api</artifactId>\n <version>0.12.3</version>\n</dependency>\n<dependency>\n <groupId>io.jsonwebtoken</groupId>\n <artifactId>jjwt-impl</artifactId>\n <version>0.12.3</version>\n <scope>runtime</scope>\n</dependency>\n<dependency>\n <groupId>io.jsonwebtoken</groupId>\n <artifactId>jjwt-jackson</artifactId> <!-- or jjwt-gson if Gson is preferred -->\n <version>0.12.3</version>\n <scope>runtime</scope>\n</dependency>\n
Aproveitando esse ponto, vale a pena j\u00e1 incluir tamb\u00e9m no pom.xml
.
"},{"location":"handout/microservices/auth/#token","title":"Token","text":"Para gerar o token de acesso, no caso JWT, um servi\u00e7o foi criado, JwtService.java
.
Para gerar o JWT, alguns atributos s\u00e3o adicionados no application.yaml
.
application.yamlstore:\n jwt:\n issuer: \"In5pEr\"\n secretKey: \"\"\n duration: 31536000000 # 365 days in miliseconds\n
JwtService.javapackage store.auth;\n\nimport java.util.Date;\n\nimport javax.crypto.SecretKey;\n\nimport org.springframework.beans.factory.annotation.Value;\nimport org.springframework.stereotype.Service;\n\nimport io.jsonwebtoken.Claims;\nimport io.jsonwebtoken.ExpiredJwtException;\nimport io.jsonwebtoken.JwtParser;\nimport io.jsonwebtoken.Jwts;\nimport io.jsonwebtoken.io.Decoders;\nimport io.jsonwebtoken.security.Keys;\nimport jakarta.annotation.PostConstruct;\n\n@Service\npublic class JwtService {\n\n @Value(\"${store.jwt.secret-key}\")\n private String secretKey;\n\n @Value(\"${store.jwt.issuer}\")\n private String issuer;\n\n @Value(\"${store.jwt.duration}\")\n private long duration = 1l;\n\n private SecretKey key;\n private JwtParser parser;\n\n @PostConstruct\n public void init() {\n this.key = Keys.hmacShaKeyFor(Decoders.BASE64.decode(secretKey));\n this.parser = Jwts.parser().verifyWith(key).build();\n }\n\n public String create(String id, String name, String role) {\n String jwt = Jwts.builder()\n .header()\n .and()\n .id(id)\n .issuer(issuer)\n .subject(name)\n .signWith(key)\n .claim(\"role\", role)\n .notBefore(new Date())\n .expiration(new Date(new Date().getTime() + duration))\n .compact();\n return jwt;\n }\n\n public String getToken(String token) {\n final Claims claims = resolveClaims(token);\n return Token.builder\n .id(claims.getId())\n .role(claims.get(\"role\", String.class))\n .build();\n }\n\n private Claims resolveClaims(String token) {\n if (token == null) throw new io.jsonwebtoken.MalformedJwtException(\"token is null\");\n return validateClaims(parser.parseSignedClaims(token).getPayload());\n }\n\n private Claims validateClaims(Claims claims) throws ExpiredJwtException {\n if (claims.getExpiration().before(new Date())) throw new ExpiredJwtException(null, claims, issuer);\n if (claims.getNotBefore().after(new Date())) throw new ExpiredJwtException(null, claims, issuer);\n return claims;\n }\n\n}\n
"},{"location":"handout/microservices/auth/#docker","title":"Docker","text":"Adicione no docker-compose.yaml
o registro desse novo microsservi\u00e7o:
docker-compose.yaml auth:\n build:\n context: ../store.auth-resource/\n dockerfile: Dockerfile\n container_name: store-auth\n image: store-auth:latest\n # ports:\n # - 8080:8080\n environment:\n - eureka.client.service-url.defaultZone=http://store-discovery:8761/eureka/\n deploy:\n mode: replicated\n replicas: 1\n restart: always\n networks:\n - private-network\n depends_on:\n - discovery\n - account\n
NICE TO HAVE O projeto da disciplina pode ter um microsservi\u00e7o de registro que valide email ou SMS para criar a conta.
"},{"location":"handout/microservices/gateway/","title":"Gateway","text":"O gateway tem como fun\u00e7\u00e3o ser o \u00fanico ponto de entrada de todo o sistema, ele \u00e9 respons\u00e1vel por redirecionar todas as requisi\u00e7\u00f5es aos respectivos microsservi\u00e7os. Assim bem como, de autorizar ou negar acesso ao sistema baseando-se no token de seguran\u00e7a passado pela requisi\u00e7\u00e3o.
flowchart LR\n subgraph Client\n direction LR\n Web\n Mobile\n Desktop\n end\n subgraph Microservices\n direction LR\n gateway[\"Gateway\"]\n subgraph Essentials\n direction TB\n discovery[\"Discovery\"]\n auth[\"Auth\"]\n config[\"Configuration\"]\n end\n subgraph Businesses\n direction TB\n ms1[\"Service 1\"]\n ms2[\"Service 2\"]\n ms3[\"Service 3\"]\n end\n end\n Client --> lb[\"Load Balance\"] --> gateway --> Businesses\n gateway --> auth\n gateway --> discovery
"},{"location":"handout/microservices/gateway/#sequence-diagram","title":"Sequence Diagram","text":"sequenceDiagram\n autonumber\n actor User\n User->>Gateway: route(ServerHttpRequest)\n Gateway->>+AuthenticationFilter: filter(ServerWebExchange, GatewayFilterChain)\n AuthenticationFilter->>RouteValidator: isSecured.test(ServerHttpRequest)\n RouteValidator-->>AuthenticationFilter: True | False\n critical notSecured\n AuthenticationFilter->>Gateway: follow the flux\n end\n AuthenticationFilter->>AuthenticationFilter: isAuthMissing(ServerHttpRequest)\n critical isAuthMissing\n AuthenticationFilter->>User: unauthorized message\n end\n AuthenticationFilter->>AuthenticationFilter: validateAuthorizationHeader()\n critical isInvalidAuthorizationHeader\n AuthenticationFilter->>User: unauthorized message\n end\n AuthenticationFilter->>Auth: solve(Token)\n critical isInvalidToken\n Auth->>User: unauthorized message\n end\n Auth->>AuthenticationFilter: returns token claims\n AuthenticationFilter->>AuthenticationFilter: updateRequestHeader(ServerHttpRequest)\n AuthenticationFilter->>Gateway: follow the flux
pom.xml<dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-gateway</artifactId>\n</dependency>\n<dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-netflix-eureka-client</artifactId>\n</dependency>\n<dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-loadbalancer</artifactId>\n</dependency>\n<dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter-webflux</artifactId>\n</dependency>\n<!-- https://mvnrepository.com/artifact/com.github.ben-manes.caffeine/caffeine -->\n<dependency>\n <groupId>com.github.ben-manes.caffeine</groupId>\n <artifactId>caffeine</artifactId>\n <version>3.1.8</version>\n</dependency>\n<dependency>\n <groupId>insper.store</groupId>\n <artifactId>auth</artifactId>\n <version>0.0.1-SNAPSHOT</version>\n</dependency>\n
application.yamlspring:\n application:\n name: store-gateway\n cloud:\n discovery:\n locator:\n enabled: true\n gateway:\n routes:\n\n - id: auth\n uri: lb://store-auth\n predicates:\n - Path=/auth/**\n\n # - id: product\n # uri: lb://store-product\n # predicates:\n # - Path=/product/**\n\n default-filters:\n - DedupeResponseHeader=Access-Control-Allow-Credentials Access-Control-Allow-Origin\n globalcors:\n corsConfigurations:\n '[/**]':\n allowedOrigins: \"http://localhost\"\n allowedHeaders: \"*\"\n allowedMethods:\n - GET\n - POST\n\napi:\n endpoints:\n open: >\n POST /auth/register/,\n POST /auth/login/\n
GatewayConfiguration.javapackage insper.store.gateway;\n\nimport org.springframework.cloud.client.loadbalancer.LoadBalanced;\nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.Configuration;\nimport org.springframework.web.reactive.function.client.WebClient;\n\n@Configuration\npublic class GatewayConfiguration {\n\n @Bean\n @LoadBalanced\n public WebClient.Builder webClient() {\n return WebClient.builder();\n }\n\n}\n
RouterValidator.javapackage insper.store.gateway.security;\n\nimport java.util.List;\nimport java.util.function.Predicate;\n\nimport org.springframework.beans.factory.annotation.Value;\nimport org.springframework.http.server.reactive.ServerHttpRequest;\nimport org.springframework.stereotype.Component;\n\n@Component\npublic class RouterValidator {\n\n @Value(\"${api.endpoints.open}}\") \n private List<String> openApiEndpoints;\n\n public Predicate<ServerHttpRequest> isSecured =\n request -> openApiEndpoints\n .stream()\n .noneMatch(uri -> {\n String[] parts = uri.replaceAll(\"[^a-zA-Z0-9// ]\", \"\").split(\" \");\n return request.getMethod().toString().equalsIgnoreCase(parts[0])\n && request.getURI().getPath().equals(parts[1]);\n });\n\n}\n
AuthenticationFilter.javapackage insper.store.gateway.security;\n\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.cloud.gateway.filter.GatewayFilterChain;\nimport org.springframework.cloud.gateway.filter.GlobalFilter;\nimport org.springframework.http.HttpHeaders;\nimport org.springframework.http.HttpStatus;\nimport org.springframework.http.MediaType;\nimport org.springframework.http.server.reactive.ServerHttpRequest;\nimport org.springframework.stereotype.Component;\nimport org.springframework.web.reactive.function.client.WebClient;\nimport org.springframework.web.server.ResponseStatusException;\nimport org.springframework.web.server.ServerWebExchange;\n\nimport reactor.core.publisher.Mono;\nimport store.auth.IdIn;\nimport store.auth.IdOut;\n\n@Component\npublic class AuthenticationFilter implements GlobalFilter {\n\n private static final String HEADER_AUTHORIZATION = \"Authorization\";\n private static final String HEADER_BEARER = \"Bearer\";\n\n @Autowired\n private RouterValidator routerValidator;\n\n @Autowired\n private WebClient.Builder webClient;\n\n @Override\n public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) {\n ServerHttpRequest request = exchange.getRequest();\n if (!routerValidator.isSecured.test(request)) {\n return chain.filter(exchange);\n }\n if (!isAuthMissing(request)) {\n final String[] parts = this.getAuthHeader(request).split(\" \");\n if (parts.length != 2 || !parts[0].equals(HEADER_BEARER)) {\n throw new ResponseStatusException(HttpStatus.FORBIDDEN, \"Authorization header format must be Bearer {token}\");\n }\n final String token = parts[1];\n return webClient\n .defaultHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE)\n .build()\n .post()\n .uri(\"http://store-auth/auth/token/\")\n .bodyValue(new IdIn(token))\n .retrieve()\n .toEntity(IdOut.class)\n .flatMap(response -> {\n if (response != null && response.getBody() != null) {\n this.updateRequest(exchange, response.getBody().id());\n return chain.filter(exchange);\n } else {\n throw new ResponseStatusException(HttpStatus.UNAUTHORIZED, \"Invalid token\");\n }\n });\n }\n throw new ResponseStatusException(HttpStatus.UNAUTHORIZED, \"Missing authorization header\");\n }\n\n private String getAuthHeader(ServerHttpRequest request) {\n return request.getHeaders().getOrEmpty(HEADER_AUTHORIZATION).get(0);\n }\n\n private boolean isAuthMissing(ServerHttpRequest request) {\n return !request.getHeaders().containsKey(\"Authorization\");\n } \n\n private void updateRequest(ServerWebExchange exchange, String id) {\n exchange.getRequest().mutate()\n .header(\"id-user\", id)\n .build();\n }\n\n}\n
"},{"location":"handout/microservices/roadmap/","title":"Roadmap","text":""},{"location":"handout/microservices/roadmap/#microsservico","title":"Microsservi\u00e7o","text":"A fim de implementar microsservi\u00e7os em Spring Boot, aqui, \u00e9 proposto uma abordagem de modulariza\u00e7\u00e3o de cada microsservi\u00e7o, de forma que exista uma interface de comunica\u00e7\u00e3o Java a ser consumida por outros microsservi\u00e7os, tamb\u00e9m em Java, e tamb\u00e9m um compromisso de implementa\u00e7\u00e3o. Essa estrat\u00e9gia visa aumentar a produtividade do ambiente de desenvolvimento em Java, j\u00e1 que para o consumo da API por outros frameworks sempre ser\u00e1 necess\u00e1rio reescrever as assinaturas de cada endpoint.
"},{"location":"handout/microservices/roadmap/#modularizacao","title":"Modulariza\u00e7\u00e3o","text":"Crie dois projetos Mavens:
- um de interface, e;
- outro para o micro servi\u00e7o.
A vantagem dessa abordagem \u00e9 que a interface pode ser utilizada em outros projetos como uma biblioteca a ser consumida.
Exemplo de uso dessa abordagem no microsservi\u00e7o Account:
classDiagram\n namespace Interface {\n class AccountController {\n <<interface>>\n create(AccountIn)\n read(String id): AccountOut\n update(String id, AccountIn)\n delete(String id)\n findByEmailAndPassword(AccountIn)\n }\n class AccountIn {\n <<record>>\n String name\n String email\n String password\n }\n class AccountOut {\n <<record>>\n String id\n String name\n String email\n }\n }\n namespace Resource {\n class AccountResource {\n <<REST API>>\n -accountService\n }\n class AccountService {\n <<service>>\n -accountRepository\n create(Account)\n }\n class AccountRepository {\n <<nterface>>\n findByEmailAndHash(String, String)\n }\n class AccountModel {\n <<entity>>\n String id\n String name\n String email\n String hash\n }\n class Account {\n <<dto>>\n String id\n String name\n String email\n String password\n }\n }\n AccountController <|-- AccountResource\n AccountResource o-- AccountService\n AccountService o-- AccountRepository
"},{"location":"handout/microservices/roadmap/#interface","title":"Interface","text":"Para compilar e instalar a interface do microsservi\u00e7o, crie um pom.xml
espec\u00edfico para essa interface e seus dtos (AccountIn e AccountOut).
Installing the microservice interfacemvn clean install\n
"},{"location":"handout/microservices/roadmap/#implementacao","title":"Implementa\u00e7\u00e3o","text":"A implementa\u00e7\u00e3o n\u00e3o precisa ser instalada como biblioteca do reposit\u00f3rio Maven, pois \u00e9 apenas para execu\u00e7\u00e3o do microsservi\u00e7o. Por\u00e9m, o microsservi\u00e7o deve ter explic\u00edto a chamada da biblioteca de interface no seu pom.xml
.
<dependency>\n <groupId>insper.store</groupId>\n <artifactId>account</artifactId>\n <version>${project.version}</version>\n</dependency>\n
O comando para empacotar o microsservi\u00e7o \u00e9:
Packaging the microservicemvn clean package\n
Adicionalmente, para executar o microsservi\u00e7o:
Packaging and running the microservicemvn clean package spring-boot:run\n
"},{"location":"handout/microservices/roadmap/#banco-de-dados","title":"Banco de dados","text":"Muitos microsservi\u00e7os podem persistir seus dados em banco de dados. Cada microsservi\u00e7o \u00e9 respons\u00e1vel pelo acesso e grava\u00e7\u00e3o de seus dados de forma aut\u00f4noma.
Isso aumenta de forma significativa a complexidade do gerenciamento do microsservi\u00e7o, pois se torna necess\u00e1rio manter o gerenciamento da base de dados tais como: altera\u00e7\u00f5es, vers\u00f5es e roteiros de retornos.
O Flyway \u00e9 uma biblioteca que pode ser acoplado ao framework Spring Boot a fim de ajudar na tarefa de gerenciamento e cria\u00e7\u00e3o do sistema de persit\u00eancia dos dados do microsservi\u00e7o.
Para fazer uso dessa biblioteca, altere o pom.xml
adicionando a depend\u00eancia da biblioteca JPA assim bem como a depend\u00eancia da biblioteca Flyway.
<dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter-data-jpa</artifactId>\n</dependency>\n<dependency>\n <groupId>org.flywaydb</groupId>\n <artifactId>flyway-core</artifactId>\n</dependency>\n<dependency>\n <groupId>org.postgresql</groupId>\n <artifactId>postgresql</artifactId>\n <version>42.7.2</version>\n</dependency>\n
Altera\u00e7\u00f5es no arquivo de propriedades tamb\u00e9m s\u00e3o necess\u00e1rias, para definir o banco de dados e sua configura\u00e7\u00e3o JPA, assim bem como, a configura\u00e7\u00e3o do Flyway.
Exemplo baseado no microsservi\u00e7o Accountspring:\n datasource:\n url: ${DATABASE_URL:jdbc:postgresql://localhost:5432/store}\n username: ${DATABASE_USERNAME:store}\n password: ${DATABASE_PASSWORD:store123321}\n driver-class-name: org.postgresql.Driver\n flyway:\n baseline-on-migrate: true\n schemas: account\n jpa:\n properties:\n hibernate:\n default_schema: account\n
A estrutura de organiza\u00e7\u00e3o e execu\u00e7\u00e3o de scripts de banco de dados do Flyway \u00e9 persistido na seguinte hier\u00e1rquia de diret\u00f3rios, onde cada arquivo \u00e9 executado em ordem alfanum\u00e9rica.
exemplo\ud83d\udcc4 store.account\n\ud83d\udcc1 store.account-resource\n\u2514\u2500\u2500 \ud83d\udcc1 src\n \u2514\u2500\u2500 \ud83d\udcc1 main\n \u251c\u2500\u2500 \ud83d\udcc4 java\n \u2514\u2500\u2500 \ud83d\udcc1 resources\n \u251c\u2500\u2500 \ud83d\udcc1 db\n \u2502 \u2514\u2500\u2500 \ud83d\udcc1 migration\n \u2502 \u251c\u2500\u2500 \ud83d\udcc4 V2024.02.16.001__create_schema.sql\n \u2502 \u2514\u2500\u2500 \ud83d\udcc4 V2024.02.16.002__create_table_account.sql\n \u2514\u2500\u2500 \ud83d\udcc4 application.yaml\n
V2024.02.16.001__create_schema.sqlV2024.02.16.002__create_table_account.sql CREATE SCHEMA IF NOT EXISTS account;\n
CREATE TABLE account\n(\n id_account character varying(36) NOT NULL,\n tx_name character varying(256) NOT NULL,\n tx_email character varying(256) NOT NULL,\n tx_hash character varying(256) NOT NULL,\n CONSTRAINT account_pkey PRIMARY KEY (id_account)\n);\n
"},{"location":"handout/microservices/roadmap/#conectando-microsservicos-openfeign","title":"Conectando Microsservi\u00e7os - OpenFeign","text":"Nomeando o microsservi\u00e7o dentro do sistema de discovery.
<dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-openfeign</artifactId>\n</dependency>\n
@FeignClient(name = \"store-account\")\npublic interface AccountController {\n ...\n}\n
"},{"location":"handout/microservices/roadmap/#docker","title":"Docker","text":"Para cada microsservi\u00e7o Java Spring Cloud \u00e9 aconselh\u00e1vel criar um arquivo Dockerfile
no diret\u00f3rio raiz do projeto a fim de permitir a cria\u00e7\u00e3o adequada da imagem do microservi\u00e7o.
Typical Dockerfile for Java microserviceFROM openjdk:23-slim\nVOLUME /tmp\nCOPY target/*.jar app.jar\nENTRYPOINT [\"java\",\"-jar\",\"/app.jar\"]\n
"},{"location":"handout/microservices/roadmap/#docker-compose","title":"Docker Compose","text":"O Docker Compose permite criar um cluster com todos os microsservi\u00e7os neces\u00e1rios para o funcionamento de um sistema em uma rede apartada (nat).
Para criar um docker compose basta criar um arquivo de configura\u00e7\u00e3o chamado docker-compose.yaml
em uma pasta que possa acessar os demais microsservi\u00e7os, como uma pasta store.docker-platform.
exemplo\ud83d\udcc4 store.account\n\ud83d\udcc4 store.account-resource\n\ud83d\udcc1 store.docker-platform\n\u251c\u2500\u2500 \ud83d\udcc4 .env\n\u2514\u2500\u2500 \ud83d\udcc4 docker-compose.yaml\n
Dentro do arquivo, cada microsservi\u00e7o \u00e9 declarado e configurado, utilizando imagem que s\u00e3o criadas no momento de execu\u00e7\u00e3o do docker engine ou imagens que est\u00e3o dispon\u00edveis em algum diret\u00f3rio (eg.: DockerHub).
docker-compose.yaml# docker compose up -d --build --force-recreate\nversion: '3.8'\nname: store\n\nservices:\n\n db-store:\n container_name: store-db-store\n image: postgres:latest\n ports:\n - 5432:5432\n environment:\n - POSTGRES_USER=store\n - POSTGRES_PASSWORD=store\n - POSTGRES_DB=store\n volumes:\n - $VOLUME/postgres/store/data:/var/lib/postgresql/data\n restart: always\n networks:\n - private-network\n\n account:\n build:\n context: ../store.account-resource/\n dockerfile: Dockerfile\n image: store-account:latest\n environment:\n - spring.datasource.url=jdbc:postgresql://store-db-store:5432/store\n - spring.datasource.username=store\n - spring.datasource.password=store\n deploy:\n mode: replicated\n replicas: 1\n restart: always\n networks:\n - private-network\n depends_on:\n - db-store\n\nnetworks:\n private-network:\n driver: bridge\n
Arquivo de configura\u00e7\u00e3o de ambiente.
.envVOLUME=./volume\nCONFIG=./config\n
Na pasta do arquivo docker-compose.yaml
execute o comando docker para criar as imagens e subir os containers:
Rise up a clusterdocker compose up -d --build\n
Shutdown the clusterdocker compose down\n
Referencia:
"},{"location":"platform/circuit-breaker/","title":"Circuit Breaker","text":""},{"location":"platform/circuit-breaker/#spring-cloud-circuit-breaker","title":"Spring Cloud Circuit Breaker","text":"Spring Cloud Circuit Breaker is a service resilience pattern that allows you to provide default behavior when a network failure or any exception occurs while invoking a remote service. It's an abstraction over various circuit breaker implementations like Netflix Hystrix, Resilience4j, Sentinel, etc.
Key components of Spring Cloud Circuit Breaker include:
-
Dependency: To use Spring Cloud Circuit Breaker, you need to include the spring-cloud-starter-circuitbreaker-{implementation}
dependency in your project, where {implementation}
could be hystrix
, resilience4j
, sentinel
, etc.
-
Configuration: You can configure the circuit breaker parameters like failure threshold, delay time, etc. in the application.properties (or application.yml) file.
-
Usage: You can use the @CircuitBreaker
annotation on a method to apply the circuit breaker pattern. If the method throws an exception, the circuit breaker will open and provide a fallback method.
"},{"location":"platform/communcation/","title":"Communication","text":""},{"location":"platform/communcation/#synchronous-communication","title":"Synchronous Communication","text":""},{"location":"platform/communcation/#feign","title":"Feign","text":""},{"location":"platform/communcation/#asynchronous-communication","title":"Asynchronous Communication","text":""},{"location":"platform/communcation/#webclient","title":"WebClient","text":""},{"location":"platform/concepts/","title":"Concepts","text":""},{"location":"platform/concepts/#historical-context","title":"Historical Context","text":""},{"location":"platform/concepts/#single-block-system","title":"Single block system","text":"Single block system, following the concept that a system is a blackbox schema, so many projects started in a simple single project that is a good choice to raise a system and try to use the initial features. This is a good approach for small and compact systems or for specialist systems where the speed of application matters.
---\ntitle: blackbox\n---\nflowchart LR\n Input\n subgraph Processing\n direction TB\n Storage\n Business\n UI\n end\n Output\n Input --> UI --> Output
The main disadvantage of this approach is the strong coupling among business, user interface (UI), and storage. The coupling is so strong that there is a mix among all the components, which implies a high cost for maintenance.
"},{"location":"platform/concepts/#splitted-betweeen-data-and-program","title":"Splitted betweeen data and program","text":"---\ntitle: blackbox\n---\nflowchart LR\n subgraph Processing\n direction TB\n subgraph Storage\n x\n end\n subgraph Business\n UI\n end\n end\n Input --> UI --> Output\n Business <-- driver --> Storage
System communicates to only an UI.
Cobol
"},{"location":"platform/concepts/#multi-layer-approach","title":"Multi-layer approach","text":"---\ntitle: blackbox\n---\nflowchart LR\n Input\n subgraph Processing\n direction TB\n Storage\n subgraph _\n Businesses\n UI\n end\n end\n Output\n Input --> UI --> Output\n Business <-- driver --> Storage
"},{"location":"platform/concepts/#mvc-pattern","title":"MVC Pattern","text":"MVC stands for Model-View-Controller. It's a design pattern often used in web development. Here's a brief explanation of each component:
-
Model: This is the part of the system that handles the logic for the application data. Often model objects retrieve data (and store data) from a database.
-
View: This is the part of the system that handles the display of the data. Most often the views are created from the model data.
-
Controller: This is the part of the system that handles user interaction. Typically controllers read data from a view, control user input, and send input data to the model.
The idea behind MVC is that each of these components can be developed and tested independently, which can simplify the overall development process.
timeline\n title Relevant Events\n 1991 : CORBA\n 1994 : GoF\n 1999 : J2EE 1.2 <br> initial specification\n 2002 : Spring\n 2006 : Java EE 5\n 2014 : Spring Boot\n 2019 : Jakarta EE 8
"},{"location":"platform/concepts/#high-perfomance-architectures","title":"High-perfomance Architectures","text":"High-performance architectures refer to the design and configuration of computer systems, networks, and software to achieve optimal speed, responsiveness, throughput, and efficiency. These architectures are specifically tailored to handle large-scale, resource-intensive, and performance-critical workloads. High-performance systems are often employed in scenarios such as data centers, cloud computing environments, scientific computing, financial services, and other applications where speed and efficiency are paramount.
Here are key aspects and principles associated with high-performance architectures:
"},{"location":"platform/concepts/#parallelism-and-concurrency","title":"Parallelism and Concurrency","text":" - High-performance architectures often leverage parallelism and concurrency to execute multiple tasks simultaneously, improving overall throughput.
- Parallel processing involves dividing a task into smaller sub-tasks that can be processed concurrently, often across multiple processors or cores.
- Concurrency allows multiple tasks to be executed concurrently, even if they are not divided into explicit sub-tasks.
"},{"location":"platform/concepts/#distributed-systems","title":"Distributed Systems","text":" - Distributing workloads across multiple nodes in a network is a common strategy for achieving high performance.
- Distributed systems allow for horizontal scaling, where additional resources (nodes) can be added to handle increased demand.
"},{"location":"platform/concepts/#optimized-algorithms-and-data-structures","title":"Optimized Algorithms and Data Structures","text":" - Carefully designed algorithms and data structures are crucial for high performance.
- Efficient algorithms and data structures minimize computational complexity and memory usage.
"},{"location":"platform/concepts/#caching-and-memory-optimization","title":"Caching and Memory Optimization","text":" - Caching is used to store frequently accessed data in a location that allows faster retrieval, reducing the need to recompute or fetch data from slower storage.
- Memory optimization involves efficiently managing memory usage to minimize latency and improve responsiveness.
"},{"location":"platform/concepts/#scalability","title":"Scalability","text":" - High-performance architectures are designed to scale horizontally or vertically to accommodate growing workloads.
- Horizontal scalability involves adding more nodes or machines, while vertical scalability involves increasing the resources of individual nodes.
"},{"location":"platform/concepts/#load-balancing","title":"Load Balancing","text":" - Load balancing ensures that incoming requests are distributed evenly across multiple servers or resources.
- This helps prevent individual components from becoming bottlenecks and ensures optimal resource utilization.
"},{"location":"platform/concepts/#fault-tolerance-and-redundancy","title":"Fault Tolerance and Redundancy","text":" - High-performance architectures often incorporate redundancy and fault-tolerant mechanisms to ensure continuous operation in the face of hardware failures or network issues.
"},{"location":"platform/concepts/#specialized-hardware","title":"Specialized Hardware","text":" - In some cases, high-performance architectures may use specialized hardware, such as Graphics Processing Units (GPUs) or Field-Programmable Gate Arrays (FPGAs), to accelerate specific types of computations.
"},{"location":"platform/concepts/#optimized-network-architecture","title":"Optimized Network Architecture","text":" - Efficient communication between nodes is critical for high performance. Optimized network architectures, low-latency interconnects, and high-bandwidth connections contribute to overall system efficiency.
"},{"location":"platform/concepts/#monitoring-and-performance-tuning","title":"Monitoring and Performance Tuning","text":" - Continuous monitoring and performance tuning are essential to identify and address bottlenecks, optimize resource utilization, and ensure that the system is operating at peak efficiency.
"},{"location":"platform/concepts/#asynchronous-and-event-driven-design","title":"Asynchronous and Event-Driven Design","text":" - Asynchronous and event-driven architectures can improve system responsiveness by allowing components to operate independently and respond to events as they occur.
High-performance architectures are tailored to the specific requirements of the applications they support. They often involve a combination of hardware and software optimizations to achieve the desired level of performance for a given workload. It's important to note that designing and maintaining high-performance architectures can be complex and may involve trade-offs between factors such as cost, complexity, and ease of maintenance.
"},{"location":"platform/concepts/#cap-theorem","title":"CAP theorem","text":"CAP theorem, also known as Brewer's theorem, is a concept in distributed systems that addresses the trade-offs among three fundamental aspects: Consistency, Availability, and Partition Tolerance. It was introduced by computer scientist Eric Brewer in 2000. The CAP theorem suggests that in a distributed system, it is impossible to simultaneously achieve all three of these guarantees. A system can provide at most two out of the three.
Here are the key components of the CAP theorem:
"},{"location":"platform/concepts/#consistency-c","title":"Consistency (C)","text":" - Definition: Every read receives the most recent write or an error. In other words, all nodes in the system see the same data at the same time.
- Implication: Ensuring consistency means that any read operation on the system will reflect the most recent write, even in the presence of concurrent operations.
"},{"location":"platform/concepts/#availability-a","title":"Availability (A)","text":" - Definition: Every request for a read or write operation receives a response without the guarantee that it contains the most recent version of the data.
- Implication: An available system can provide a response to read or write requests even if it may not reflect the most recent update. The system is operational and accessible.
"},{"location":"platform/concepts/#partition-tolerance-p","title":"Partition Tolerance (P)","text":" - Definition: The system continues to operate even when network partitions occur, meaning that communication between nodes is lost or delayed.
- Implication: In a partition-tolerant system, the network can be unreliable or experience failures, and the system can still function.
According to the CAP theorem, a distributed system can prioritize at most two of these three guarantees, and the choice depends on the system's requirements and the nature of the application. Here are three possible scenarios:
- CA (Consistency and Availability): In scenarios where network partitions are rare and can be quickly resolved, a system may prioritize consistency and availability. This is common in traditional relational databases where consistency is crucial.
- CP (Consistency and Partition Tolerance): In scenarios where the network is unreliable, and partitions are frequent, a system may prioritize consistency and partition tolerance. This is common in systems that require strong consistency, such as many distributed databases.
- AP (Availability and Partition Tolerance): In scenarios where network partitions are common, and the system needs to remain operational, a system may prioritize availability and partition tolerance. This is common in systems where high availability and fault tolerance are critical, even if it means sacrificing strong consistency.
Source: Wikipedia - CAP Theorem It's important to note that the CAP theorem provides a theoretical framework for understanding trade-offs in distributed systems but does not prescribe specific solutions. Different systems may make different choices based on their specific requirements and use cases. Additionally, advancements in distributed systems research have led to the exploration of systems that aim to provide a balance between the three aspects, challenging the strict interpretation of the CAP theorem in some cases.
"},{"location":"platform/concepts/#scalability_1","title":"Scalability","text":"Scalability in the context of computer systems refers to the ability of a system to handle an increasing amount of work, or its potential to be enlarged to accommodate that growth. There are several types of scalability that are often discussed in the field of computing:
"},{"location":"platform/concepts/#vertical-scalability-scale-up","title":"Vertical Scalability (Scale-Up)","text":"Definition Vertical scalability involves adding more resources to a single node or machine in order to increase its capacity. Example Upgrading the CPU, adding more RAM, or increasing storage on a server. Pros Cons Simplicity in implementation. There's a limit to how much a single machine can be scaled vertically. it can be cost-effective for certain applications. It may also lead to downtime during upgrades."},{"location":"platform/concepts/#horizontal-scalability-scale-out","title":"Horizontal Scalability (Scale-Out)","text":"Definition Horizontal scalability involves adding more nodes or machines to a system, distributing the load across multiple machines. Example Adding more servers to a web application to handle increased traffic. Pros Cons Highly scalable, as resources can be easily added by adding more machines. Requires a distributed architecture. Can provide better fault tolerance. Some applications may not be easily parallelized."},{"location":"platform/concepts/#load-balancing_1","title":"Load Balancing","text":"Definition Load balancing involves distributing incoming network traffic or workload across multiple servers or resources to optimize resource utilization, maximize throughput, minimize response time, and avoid overloading any single resource. Example A load balancer distributing incoming web requests across multiple web servers. Pros Cons Improves overall system performance, ensures high availability, and can help with fault tolerance. Requires additional infrastructure, and the load balancer itself can become a potential bottleneck."},{"location":"platform/concepts/#elastic-scalability","title":"Elastic Scalability","text":"Definition Elastic scalability involves dynamically adjusting resources based on demand. Resources are automatically added or removed as needed. Example Cloud computing platforms that can automatically scale the number of virtual machines based on traffic. Pros Cons Efficient resource utilization, cost-effective as resources are only used when needed. Requires sophisticated monitoring and management systems."},{"location":"platform/concepts/#database-scalability","title":"Database Scalability","text":"Definition Database scalability refers to the ability of a database to handle an increasing amount of data and transactions. Vertical Database Scalability: Adding more resources to a single database server (e.g., increasing CPU, RAM). Horizontal Database Scalability: Distributing the database across multiple servers (e.g., sharding or partitioning). Pros Cons Can improve performance and handle increased data loads. Complex to implement, and horizontal scalability may require changes to the database schema."},{"location":"platform/concepts/#caching","title":"Caching","text":"Definition Caching involves storing frequently accessed data in a cache to reduce the need to fetch the same data from the original source repeatedly. Example Caching frequently used database queries or the results of computationally expensive operations. Pros Cons Improves response time, reduces load on backend systems. May lead to stale data if not managed properly. Each type of scalability has its own strengths and weaknesses, and the choice of scalability approach depends on the specific requirements and constraints of the system or application being developed. Often, a combination of these scalability types is employed to achieve optimal performance and resource utilization.
"},{"location":"platform/concepts/#design-patterns","title":"Design Patterns","text":"A design pattern in software development is a general, reusable solution to a common problem that occurs in a particular context within a software design. It's a template or a best practice that addresses a specific design or programming problem. Design patterns aren't complete solutions by themselves; rather, they provide a blueprint for solving certain types of problems.
The concept of design patterns was popularized by the book \"Design Patterns: Elements of Reusable Object-Oriented Software,\" written by Erich Gamma, Richard Helm, Ralph Johnson, and John Vlissides, often referred to as the \"Gang of Four\" (GoF)1. The book categorizes design patterns into three main types:
- Creational Patterns: These patterns deal with object creation mechanisms, trying to create objects in a manner suitable to the situation. Examples include the Singleton pattern, Factory Method pattern, and Abstract Factory pattern.
- Structural Patterns: These patterns focus on the composition of classes or objects. They help in creating a structure of classes and objects, making it easier to form larger structures. Examples include the Adapter pattern, Decorator pattern, and Composite pattern.
- Behavioral Patterns: Behavioral patterns are concerned with the interaction and responsibility of objects. They define communication patterns between objects and the responsibility of one object in a given situation. Examples include Observer pattern, Strategy pattern, and Command pattern.
Design patterns provide several benefits in software development:
- Reusability: Design patterns promote reusability of solutions to common problems. Once a design pattern is established, it can be applied to similar problems in different parts of the system.
- Scalability: Using design patterns can enhance the scalability of a system by providing proven solutions that can be applied as the system grows.
- Maintainability: Patterns make code more maintainable by providing a clear and organized structure. Developers familiar with design patterns can understand the overall architecture more easily.
- Common Vocabulary: Design patterns establish a common vocabulary for developers. When a developer mentions a particular pattern, others who are familiar with it can quickly understand the solution being implemented.
While design patterns are valuable tools, it's essential to use them judiciously. Not every problem requires a design pattern, and using patterns unnecessarily can lead to overly complex and difficult-to-maintain code. It's important to understand the problem at hand and choose the appropriate design pattern when it genuinely adds value to the solution.
-
GAMMA, E.; HELM, R.; JOHNSON, R., VLISSIDES, J., Design Patterns: Elements of Reusable Object-Oriented Software, 1\u00aa ed., Addison-Wesley Professional, 1994.\u00a0\u21a9
-
Wikipedia - CAP Theorem \u21a9
-
Gang of Four - Gof \u21a9
"},{"location":"platform/config/","title":"Config","text":""},{"location":"platform/config/#spring-cloud-config","title":"Spring Cloud Config","text":"Spring Cloud Config provides server-side and client-side support for externalized configuration in a distributed system. With the Config Server, you have a central place to manage external properties for applications across all environments.
Key components of Spring Cloud Config include:
-
Config Server: A standalone server that provides a REST API for providing configuration properties to clients. The server is embeddable in a Spring Boot application, by using the @EnableConfigServer
annotation. The properties can be stored in various types of repositories (Git, SVN, filesystem, etc.).
-
Config Client: A library for Spring Boot applications. It fetches the configuration properties from the Config Server and bootstrap them into the application's context. It's included in the classpath by adding the spring-cloud-starter-config
dependency.
-
Refresh Scope: Spring Cloud Config includes a RefreshScope
capability which allows properties to be reloaded without restarting the application. You can expose a /refresh
endpoint in your application that, when invoked, will cause the application to re-fetch properties from the Config Server.
Spring Cloud Config Server
"},{"location":"platform/discovery/","title":"Discovery","text":"Spring Cloud Discovery is a module in the Spring Cloud framework that provides a way for services to discover and communicate with each other in a distributed system. It helps manage the dynamic nature of microservices by allowing them to register themselves and discover other services without hardcoding their locations.
In a distributed system, services often need to communicate with each other to fulfill their functionalities. However, the locations of these services may change frequently due to scaling, failures, or deployments. Spring Cloud Discovery solves this problem by providing a service registry where services can register themselves and provide information about their location, such as IP address and port.
The service registry acts as a central database of all the services in the system. When a service needs to communicate with another service, it can query the service registry to obtain the necessary information. This allows services to be decoupled from each other and eliminates the need for hardcoding service locations in the code.
Spring Cloud Discovery supports multiple service registry implementations, such as Netflix Eureka, Consul, and ZooKeeper. These implementations provide additional features like service health checks, load balancing, and failover.
To use Spring Cloud Discovery, you need to include the necessary dependencies in your project and configure the service registry implementation you want to use. Then, you can annotate your services with @EnableDiscoveryClient to enable service registration and discovery. Spring Cloud Discovery will automatically register your services with the service registry and provide a client library to query the registry for service information.
Here's an example of how you can use Spring Cloud Discovery with Netflix Eureka:
@SpringBootApplication\n@EnableDiscoveryClient\npublic class MyServiceApplication {\n public static void main(String[] args) {\n SpringApplication.run(MyServiceApplication.class, args);\n }\n}\n
In this example, the @EnableDiscoveryClient annotation enables service registration and discovery using the configured service registry. When the application starts, it will register itself with the service registry and be discoverable by other services.
Overall, Spring Cloud Discovery simplifies the process of service discovery and communication in a distributed system, making it easier to build and maintain microservices architectures.
"},{"location":"platform/gateway/","title":"Gateway","text":""},{"location":"platform/gateway/#concepts","title":"Concepts","text":"The Gateway design pattern is a structural design pattern that provides a centralized entry point for handling requests from external systems. It acts as a mediator between the client and the server, allowing the client to make requests to multiple services through a single interface.
In the context of software development, a gateway acts as an intermediary between the client and the backend services. It abstracts away the complexity of interacting with multiple services by providing a unified API for the client to communicate with.
The main benefits of using the Gateway design pattern include:
-
Simplified client code: The client only needs to interact with the gateway, which handles the routing and communication with the appropriate backend services. This reduces the complexity and coupling in the client code.
-
Centralized cross-cutting concerns: The gateway can handle common concerns such as authentication, authorization, rate limiting, caching, and logging in a centralized manner. This eliminates the need to implement these features in each individual service.
-
Scalability and flexibility: The gateway can distribute requests to multiple instances of backend services, allowing for horizontal scaling. It also provides the flexibility to add or remove backend services without affecting the client code.
-
Protocol translation: The gateway can handle protocol translation, allowing clients to use different protocols (e.g., HTTP, WebSocket) while the backend services can use a different protocol.
-
Service aggregation: The gateway can aggregate data from multiple backend services and provide a unified response to the client. This reduces the number of requests made by the client and improves performance.
To implement the Gateway design pattern, various technologies and frameworks can be used, such as Spring Cloud Gateway, Netflix Zuul, or NGINX. These tools provide features like routing, load balancing, and request filtering, making it easier to build a robust and scalable gateway.
In summary, the Gateway design pattern provides a centralized entry point for handling requests from clients and abstracts away the complexity of interacting with multiple backend services. It simplifies client code, centralizes cross-cutting concerns, and provides scalability and flexibility in a distributed system architecture.
"},{"location":"platform/gateway/#spring-cloud-gateway","title":"Spring Cloud Gateway","text":"https://spring.io/projects/spring-cloud-gateway/
"},{"location":"platform/load-balancing/","title":"Load Balancing","text":""},{"location":"platform/load-balancing/#spring-cloud-loadbalancer","title":"Spring Cloud LoadBalancer","text":"Spring Cloud LoadBalancer is a generic abstraction over load balancing algorithms that you can use with service discovery clients like Eureka, Consul, and Zookeeper. It provides a round-robin load balancing implementation by default, but you can also implement your own custom load balancing algorithms.
Key components of Spring Cloud LoadBalancer include:
-
Dependency: To use Spring Cloud LoadBalancer, you need to include the spring-cloud-starter-loadbalancer
dependency in your project.
-
Configuration: By default, Spring Cloud LoadBalancer uses a simple round-robin strategy for load balancing. If you want to customize this, you can create a bean of type ServiceInstanceListSupplier
that returns a custom list of instances for load balancing.
-
Usage: You can use the @LoadBalanced
annotation on a RestTemplate
or WebClient.Builder
bean to integrate it with Spring Cloud LoadBalancer. When you make a request through this client, it will automatically be load balanced.
"},{"location":"platform/microservices/","title":"Microservices","text":""},{"location":"platform/microservices/#microservices-concepts","title":"Microservices Concepts","text":"Microservices, also known as the microservices architecture, is an architectural style that structures an application as a collection of small autonomous services, modeled around a business domain.
Key concepts of microservices include:
- Single Responsibility: Each microservice should have a single responsibility and should implement a single business capability.
- Independence: Microservices should be able to run and evolve independently of each other. They should be independently deployable and scalable.
- Decentralization: Microservices architecture favors decentralized governance. Teams have the freedom to choose the best technology stack that suits their service.
- Isolation of Failures: If a microservice fails, it should not impact the availability of other services.
- Data Isolation: Each microservice should have its own database to ensure that the services are loosely coupled and can evolve independently.
- Communication: Microservices communicate with each other through well-defined APIs and protocols, typically HTTP/REST with JSON or gRPC with Protobuf.
- Infrastructure Automation: Due to the distributed nature of the microservices architecture, automation of infrastructure is a must. This includes automated provisioning, scaling, and deployment.
- Observability: With many different services, it's important to have excellent monitoring and logging to detect and diagnose problems.
"},{"location":"platform/microservices/#domain-driven-design","title":"Domain Driven Design","text":"Domain-Driven Design (DDD) is a software development approach that emphasizes collaboration between technical experts and domain experts. The goal is to create software that is a deep reflection of the underlying domain, which is the specific area of business or activity that the software is intended to support.
Key concepts of DDD include:
- Ubiquitous Language: A common language established between developers and domain experts, used to describe all aspects of the domain.
- Bounded Context: A boundary within which a particular model is defined and applicable.
- Entities: Objects that have a distinct identity that persists over time and across different representations.
- Value Objects: Objects that are defined by their attributes, not their identity.
- Aggregates: Clusters of entities and value objects that are treated as a single unit.
- Repositories: They provide a way to obtain references to aggregates.
- Domain Events: Events that domain experts care about.
- Services: Operations that don't naturally belong to any entity or value object.
By focusing on the domain and domain logic, DDD provides techniques to develop complex systems targeting real-world scenarios. It helps to reduce the complexity by dividing the system into manageable and interconnected parts.
Source: System Design 101 - Microservice Architecture"},{"location":"platform/microservices/#design-a-microservice-platform","title":"Design a Microservice Platform","text":"flowchart LR\n subgraph Client\n direction LR\n Web\n Mobile\n Desktop\n end\n subgraph Microservices\n direction LR\n gateway[\"Gateway\"]\n subgraph Essentials\n direction TB\n discovery[\"Discovery\"]\n auth[\"Auth\"]\n config[\"Configuration\"]\n end\n subgraph Businesses\n direction TB\n ms1[\"Service 1\"]\n ms2[\"Service 2\"]\n ms3[\"Service 3\"]\n end\n end\n Client --> lb[\"Load Balance\"] --> gateway --> Businesses\n gateway --> auth\n gateway --> discovery\n click gateway \"../gateway/\" \"Gateway\"\n click discovery \"../discovery/\" \"Discovery\"\n click auth \"../auth-service/\" \"Auth\"\n click config \"../config/\" \"Configuration\"\n click lb \"../load-balancing/\" \"Load Balance\"
"},{"location":"platform/microservices/#containering","title":"Containering:","text":"Many microservices implies in many ports, then a complicated environment to manage
"},{"location":"platform/microservices/#gateway","title":"Gateway","text":""},{"location":"platform/microservices/#discovery","title":"Discovery","text":""},{"location":"platform/microservices/#communication","title":"Communication","text":" -
XU, A., System Design 101.\u00a0\u21a9
-
Wikipedia - Domain Driven Design \u21a9
"},{"location":"platform/payment/","title":"Payment","text":"FinOps
"},{"location":"platform/database/caching/","title":"Caching","text":"Spring Boot Cache
https://docs.spring.io/spring-framework/docs/4.1.5.RELEASE/spring-framework-reference/html/cache.html
Redis
https://medium.com/nstech/programa%C3%A7%C3%A3o-reativa-com-spring-boot-webflux-e-mongodb-chega-de-sofrer-f92fb64517c3
"},{"location":"platform/database/caching/#handout-redis","title":"Handout Redis","text":""},{"location":"platform/database/flyway/","title":"Flyway","text":""},{"location":"platform/database/flyway/#flyway","title":"Flyway","text":"Flyway is an open-source database migration tool that strongly favors simplicity and convention over configuration. It is designed to simplify the process of versioning a database, similar to how Git versions source code.
With Flyway, you can apply version control to your database which allows you to migrate it to a newer version and also revert changes if needed. Flyway uses SQL scripts or Java-based migrations to evolve your database schema in a way that is controllable and predictable.
Key features of Flyway include:
- Version control for your database: Allows you to track changes and apply version control to your database, similar to how you would with your source code.
- Support for SQL and Java-based migrations: You can use SQL for simple changes, and Java for complex migrations.
- Repeatable migrations: You can use this feature to manage objects in your database that can't be easily handled with versioned migrations, like stored procedures and views.
- Multiple database support: Flyway supports a wide variety of databases including MySQL, PostgreSQL, SQL Server, and more.
https://www.baeldung.com/liquibase-vs-flyway
-
https://www.baeldung.com/database-migrations-with-flyway\u00a0\u21a9
"},{"location":"platform/messaging/concepts/","title":"Concepts","text":"https://medium.com/@thiagolenz/tutorial-spring-boot-e-rabbitmq-como-fazer-e-porqu%C3%AA-4a6cc34a3bd1
https://www.simplilearn.com/kafka-vs-rabbitmq-article
https://mmarcosab.medium.com/criando-consumer-e-produkafka-com-spring-boot-b427cc2f841d
"},{"location":"platform/observability/logging/","title":"Logging","text":""},{"location":"platform/observability/monitoring/","title":"Monitoring","text":""},{"location":"platform/security/concepts/","title":"Concepts","text":"Security is an important aspect of software development. It involves protecting the confidentiality, integrity, and availability of data and resources. Two key concepts in security are authentication and authorization.
"},{"location":"platform/security/concepts/#authentication","title":"Authentication","text":"Authentication is the process of verifying the identity of a user or system. It ensures that the user or system is who they claim to be. Common authentication methods include passwords, biometrics, and two-factor authentication. The system checks these credentials against the stored data. If the credentials are valid, the system confirms the user's identity.
In many systems, after successful authentication, the system generates a token. This token is a piece of data that represents the user's authentication session. It's like a digital ticket that proves the user's identity for a certain period of time.
This token is then sent back to the user. The user's client software (like a web browser) stores this token and sends it along with every subsequent request to the server (in case of stateless server). This way, the server knows that the request comes from an authenticated user without needing to ask for the credentials again.
Here's a simplified step-by-step process:
sequenceDiagram\n autonumber\n actor User\n User->>+Auth Server: authentication(credentials)\n Auth Server->>Auth Server: verifies credenditals and generates a token\n Auth Server->>-User: returns the token\n User->>User: stores the token to use for the next requests
- The user sends their username and password (or other credentials) to the server;
- The server verifies the credentials. If they're valid, the server generates a token.
- The server sends this token back to the user.
- The user's client software stores this token.
- For every subsequent request, the client sends this token along with the request.
- The server checks the token to ensure it's valid and hasn't expired.
- This token-based authentication process is commonly used in many modern web applications and APIs. It helps maintain the user's session and allows the server to authenticate requests without storing the user's state.
"},{"location":"platform/security/concepts/#authorization","title":"Authorization","text":"Authorization is the process of granting or denying access to specific resources or actions based on the authenticated user's privileges. It determines what a user is allowed to do within a system. Authorization can be role-based, where permissions are assigned based on predefined roles, or attribute-based, where permissions are based on specific attributes of the user.
In many systems, the token not only represents the user's identity, but also includes information about their permissions or roles. This is often done using a type of token called a JSON Web Token (JWT), which can include a payload of data.
Here's a simplified step-by-step process:
sequenceDiagram\n autonumber\n actor User\n User->>Auth Server: request with token\n Auth Server->>Auth Server: decodes the token and extracts claims\n Auth Server->>Auth Server: verifies permissions\n critical allowed\n Auth Server->>Secured Resource: authorizes the request\n Secured Resource->>User: returns the response\n option denied\n Auth Server-->>User: unauthorized message\n end
- After authentication, the user's client software sends a request to a server. This request includes the token.
- The server decodes the token and extracts the user's identity and permissions.
- The server checks whether the user has the necessary permissions for the requested action. This could involve checking the user's roles or other attributes against the requirements for the action.
- If the user has the necessary permissions, the server allows the action. If not, the server denies the action.
This process allows the server to authorize actions without needing to repeatedly look up the user's permissions. It also allows for stateless servers, as the necessary information is included in every request.
By implementing strong authentication and authorization mechanisms, software systems can ensure that only authorized users have access to sensitive data and functionalities, reducing the risk of unauthorized access and potential security breaches.
As the platform has only one entrace point, it is
JWT is a decentralized
The point of entrance of API is the gateway, then as suggested by 1.
"},{"location":"platform/security/concepts/#auth-service","title":"Auth Service","text":" - Responsabilities:
- Registration:
- Authentication:
- Authorization:
Two Maven Projects
-
Interfaces
-
Implemmentation: resource
classDiagram\n namespace Interface {\n class AuthController {\n <<interface>>\n register(RegisterIn)\n authenticate(CredentialIn)\n identify(String)\n }\n class RegisterIn {\n <<record>>\n String firstName\n String lastName\n String email\n String password\n }\n class CredentialIn {\n <<record>>\n String email\n String password\n }\n }\n namespace Resource {\n class AuthResource {\n <<REST API>>\n -authService\n }\n class AuthService {\n <<service>>\n -registerRepository\n -userRepository\n register(Register)\n authenticate(Credential)\n identify(Session)\n }\n class RegisterRepository {\n <<interface>>\n }\n class RegisterEntity {\n <<entity>>\n }\n class UserRepository {\n <<interface>>\n }\n class UserEntity {\n <<entity>>\n }\n }\n AuthController <|-- AuthResource\n AuthResource o-- AuthService\n AuthService o-- RegisterRepository\n AuthService o-- UserRepository\n RegisterRepository \"1\" --> \"0..*\" RegisterEntity\n UserRepository \"1\" --> \"0..*\" UserEntity
"},{"location":"platform/security/concepts/#addtional-material","title":"Addtional Material","text":" -
JSON Web Token
-
Autentica\u00e7\u00e3o e Autoriza\u00e7\u00e3o com Spring Security e JWT Tokens by Fernanda Kipper
-
DELANTHA, R., Spring Cloud Gateway security with JWT, 2023.\u00a0\u21a9
"},{"location":"platform/security/jwt/","title":"JWT","text":""},{"location":"platform/security/jwt/#jwt-json-web-token","title":"JWT - JSON Web Token","text":"JWT stands for JSON Web Token. It is a compact, URL-safe means of representing claims between two parties. JWTs are commonly used to secure the transmission of information between parties in a web environment, typically for authentication and information exchange. The JWT specification is defined by RFC 75191 and it is a decentralized approach for security (which can support horizontal scalability).
Here are the key components and concepts of JWT:
- JSON Format: JWTs are represented as JSON objects that are easy to parse and generate. The JSON format makes them human-readable and easy to work with.
-
Three Parts: JWTs consist of three parts separated by dots (.
): Header, Payload, and Signature.
-
Header: The header typically consists of two parts: the type of the token (JWT) and the signing algorithm being used, such as HMAC SHA256 or RSA.
-
Payload: The payload contains the claims. Claims are statements about an entity (typically, the user) and additional data. There are three types of claims: registered, public, and private claims.
-
Signature: To create the signature part, you take the encoded header, the encoded payload, a secret, the algorithm specified in the header, and sign that.
-
Encoding: Each of the three parts is Base64Url encoded, and the resulting strings are concatenated with periods between them. The final JWT looks like: xxxxx.yyyyy.zzzzz
.
- Stateless and Self-contained: JWTs are stateless, meaning that all the information needed is within the token itself. The server doesn't need to store the user's state. They are also self-contained, meaning that all the information needed is contained within the token.
- Use Cases: JWTs are commonly used for authentication and information exchange between parties. For example, after a user logs in, a server could generate a JWT and send it to the client. The client can then include the JWT in the headers of subsequent requests to access protected resources. The server can verify the authenticity of the JWT using the stored secret key.
- Security Considerations: While JWTs are widely used and versatile, it's important to handle them securely. For instance, the key used to sign the JWT should be kept secret, and HTTPS should be used to transmit JWTs to prevent man-in-the-middle attacks.
Here's a simple example of a JWT created on JWT Builder2:
eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzUxMiJ9.eyJpc3MiOiJJbnNwZXIiLCJpYXQiOjE3MDMwMDgzMzgsImV4cCI6MjAxODU0MTEzOCwiYXVkIjoid3d3Lmluc3Blci5lZHUuYnIiLCJzdWIiOiJodW1iZXJ0b3JzQGluc3Blci5lZHUuYnIiLCJHaXZlbk5hbWUiOiJIdW1iZXJ0byIsIlN1cm5hbWUiOiJTYW5kbWFubiIsIkVtYWlsIjoiaHVtYmVydG9yc0BpbnNwZXIuZWR1LmJyIiwiUm9sZSI6IlByb2Zlc3NvciJ9.SsGdvR5GbYWTRbxY7IGxHt1vSxhkpRueBJWsi0lrPhJVCICp119QjU8F3QvHW0yF5tw-HhQ9RVh0l89t4M0LNw
This JWT consists of three parts, decoded by 3:
HeaderPayloadSignature eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzUxMiJ9
{\n \"typ\": \"JWT\",\n \"alg\": \"HS512\"\n}\n
eyJpc3MiOiJJbnNwZXIiLCJpYXQiOjE3MDMwMDgzMzgsImV4cCI6MjAxODU0MTEzOCwiYXVkIjoid3d3Lmluc3Blci5lZHUuYnIiLCJzdWIiOiJodW1iZXJ0b3JzQGluc3Blci5lZHUuYnIiLCJHaXZlbk5hbWUiOiJIdW1iZXJ0byIsIlN1cm5hbWUiOiJTYW5kbWFubiIsIkVtYWlsIjoiaHVtYmVydG9yc0BpbnNwZXIuZWR1LmJyIiwiUm9sZSI6IlByb2Zlc3NvciJ9
{\n \"iss\": \"Insper\",\n \"iat\": 1703008338,\n \"exp\": 2018541138,\n \"aud\": \"www.insper.edu.br\",\n \"sub\": \"humbertors@insper.edu.br\",\n \"GivenName\": \"Humberto\",\n \"Surname\": \"Sandmann\",\n \"Email\": \"humbertors@insper.edu.br\",\n \"Role\": \"Professor\"\n}\n
SsGdvR5GbYWTRbxY7IGxHt1vSxhkpRueBJWsi0lrPhJVCICp119QjU8F3QvHW0yF5tw-HhQ9RVh0l89t4M0LNw
HMACSHA512(\n base64UrlEncode(header) + \".\" +\n base64UrlEncode(payload),\n qwertyuiopasdfghjklzxcvbnm123456,\n)\n
JWTs are widely used in web development due to their simplicity, flexibility, and support across various programming languages and frameworks. They are commonly used in token-based authentication systems.
"},{"location":"platform/security/jwt/#addtional-material","title":"Addtional Material","text":" -
Spring Cloud Security
-
ByeteByteGo - Why is JWT popular?
-
RFC 7519 - JSON Web Token (JWT), 2015.\u00a0\u21a9
-
JWT - Builder.\u00a0\u21a9
-
jwt.io - JWT Verification.\u00a0\u21a9
-
Unix Time Stamp - Epoch Converter.\u00a0\u21a9
-
DELANTHA, R., Spring Cloud Gateway security with JWT, 2023.\u00a0\u21a9
-
Wikipedia - Pepper (cryptography).\u00a0\u21a9
-
PGzlan, Serve your hash with Salt and Pepper for Stronger Account Security, 2023.\u00a0\u21a9
"},{"location":"platform/security/oauth2/","title":"OAuth2","text":"OAuth2 is an authorization framework that allows applications to obtain limited access to user accounts on an HTTP service, such as Facebook, Google, or GitHub, without exposing the user's credentials. It provides a secure and standardized way for users to grant access to their resources to third-party applications.
The principal process to obtain a valid credential using OAuth2 involves the following steps:
-
Registration: The application developer needs to register their application with the OAuth2 provider (e.g., Google, Facebook) to obtain client credentials, including a client ID and client secret. These credentials are used to identify and authenticate the application.
-
User Authorization: When a user wants to grant access to their resources, the application redirects them to the OAuth2 provider's authorization endpoint. This typically involves the user being presented with a login screen and being asked to grant permission to the application.
-
Authorization Grant: Once the user grants permission, the OAuth2 provider issues an authorization grant to the application. This grant can take various forms, such as an authorization code or an access token.
-
Token Exchange: The application then exchanges the authorization grant for an access token by sending a request to the OAuth2 provider's token endpoint. The access token is a credential that the application can use to access the user's resources on behalf of the user.
-
Accessing Resources: With the access token, the application can make requests to the OAuth2 provider's API endpoints to access the user's resources. The access token is typically included in the request headers or as a query parameter.
-
Refreshing Tokens: Access tokens have a limited lifespan. To continue accessing the user's resources, the application can use a refresh token (if provided) to obtain a new access token without requiring the user to reauthorize the application.
It's important to note that the exact process and terminology may vary slightly depending on the OAuth2 provider and the specific implementation. However, the general flow remains consistent across most OAuth2 implementations.
"},{"location":"versions/2024.1/","title":"2024.1","text":"Info Prof. Humberto Sandmann
humbertors@insper.edu.br
Students
Meetings
Evento Dia In\u00edcio T\u00e9rmino Aula Qua. 09h45 11h45 Aula Sex. 07h30 09h30 Atendimento Seg. 12h00 13h30 Grades
FinalIndividualTeam \\[ \\text{Final Grade} = \\left\\{\\begin{array}{lll} \\text{Individual} \\geq 5 \\bigwedge \\text{Team} \\geq 5 & \\implies & \\displaystyle \\frac{ \\text{Individual} + \\text{Team} } {2} \\\\ \\\\ \\text{Otherwise} & \\implies & \\min\\left(\\text{Individual}, \\text{Team}\\right) \\end{array}\\right. \\] Avalia\u00e7\u00e3o Descri\u00e7\u00e3o Data Nota (%) Roteiros M\u00e9dia aritm\u00e9tica dos 2 roteiros de maiores notas. 60.0 Roteiro 1 Testes - Roteiro 2 Bottlenecks 22.mai Roteiro 3 Cloud 22.mai Participa\u00e7\u00e3o Nota geral atribu\u00edda ao grupo distribu\u00edda aos membros pelo pr\u00f3prio grupo, apenas notas inteiras \\([0; 10]\\) 40.0 Avalia\u00e7\u00e3o Descri\u00e7\u00e3o Data Nota (%) Checkpoints CP1 Montar um Spring Cloud 05.abr 7.5 CP2 Testes e Pipeline 19.abr 7.5 CP3 K8s 10.mai 7.5 CP4 Platform as a Product 22.mai 7.5 Apresenta\u00e7\u00e3o 10.0 Projeto 60.0 Individual
Roteiro 1Roteiro 2Roteiro 3Participa\u00e7\u00e3o Testes
- Roteiros de testes de funcionalidades ou de testes de carga
- Documenta\u00e7\u00e3o dos resultados obtidos
Bottlenecks
-
Implementa\u00e7\u00e3o de um microservi\u00e7o de bottleneck para o projeto:
- Mensageria
- RabbitMQ
- Kafka
- Spring e Kafka, Giuliana Bezerra
- Resili\u00eancia
- Spring Cloud Circuit Breaker
- Configura\u00e7\u00e3o
- Spring Cloud Config
- In-Memory Database
- Redis, Giuliana Bezerra
- Payments (sandboxes)
- PayPal
- Hearland
- Mercado Pago
- Jenkins
- SonarQube
- Dependency Analyzes
Cloud
- Roteiro de publica\u00e7\u00e3o de um microsservi\u00e7o em Cloud
- Contribui\u00e7\u00f5es no GitHub dos participantes
- Documenta\u00e7\u00e3o das reuni\u00f5es (dayly, retro, etc)
- Nota geral atribu\u00edda pelo professor mas dividida pelo grupo
Team
Checkpoint 1Checkpoint 2Checkpoint 3Checkpoint 4Apresenta\u00e7\u00e3oProjeto Desenvolvimento Spring Cloud
- Servi\u00e7o de discovery
- Servi\u00e7o de gateway
- Servi\u00e7o de autentica\u00e7\u00e3o e autoriza\u00e7\u00e3o
- 3 microsservi\u00e7os com persist\u00eancia de dados
- Comunica\u00e7\u00e3o entre, ao menos 2, microsservi\u00e7os, al\u00e9m de: Gateway \\(\\rightarrow\\) Auth \\(\\rightarrow\\) Account
- Monitoramento com dashboard de microsservi\u00e7os
- Documenta\u00e7\u00e3o das APIs padr\u00e3o Swagger
- Cluster em Docker Compose para deploy dos microsservi\u00e7os
Testes e Pipeline
- Plano de testes
- Script Jenkins - Pipeline as Code
K8s
- Release no Minikube
- Scripts declarativos dos servi\u00e7os
Platform as a Service
- Plano de uso da plataforma como um produto (PaaS)
- Vislumbrar uso da plataforma por terceiros
- Storytelling (come\u00e7o, meio, fim)
- Flu\u00eddez
- Qualidade do material apresentado
- Tempo
- Participa\u00e7\u00e3o
- Checkpoint 1
- Checkpoint 2
- Checkpoint 3
- Checkpoint 4
- Planejamento
- Documenta\u00e7\u00e3o (markdown)
- Frontend (funcionalidades b\u00e1sicas: login, registro, dashboard, etc)
Planning
"},{"location":"versions/2024.1/#repositories","title":"Repositories","text":"Dev
Microservice Context Interface Service Discovery Infra platform.241.store.discovery Gateway Infra platform.241.store.gateway Postgres Database platform.241.store.db Account Business platform.241.store.account platform.241.store.account-resource Auth Business platform.241.store.auth platform.241.store.auth-resource Ops
Description Repositories Commands Docker Compose API platform.241.store.docker-api docker compose up --build
docker compose down
Jenkins Pipelines platform.241.store.ops docker compose up --build
docker compose down
http://localhost:9000"}]}
\ No newline at end of file
diff --git a/sitemap.xml b/sitemap.xml
index aa88933..39f354e 100644
--- a/sitemap.xml
+++ b/sitemap.xml
@@ -2,282 +2,282 @@
https://hsandmann.github.io/platform/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/disclaimer/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/api/documentation/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/api/spring-boot-cloud/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/api/testing/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/appendix/ohmyzsh/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/appendix/others/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/appendix/rest-vs-graphql/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/appendix/rsa/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/appendix/tls-for-microservices/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/appendix/versioning-rest-apis/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/business/concepts/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/cloud/aws/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/cloud/azure/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/cloud/gcp/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/cloud/gitactions/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/cloud/terraform/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/devops/concepts/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/devops/docker/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/devops/kubernetes/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/devops/packaging/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/devops/release/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/devops/version-control-system/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/handout/architecture/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/handout/business/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/handout/cloud/aws/cli/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/handout/cloud/aws/eks/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/handout/devops/jenkins/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/handout/devops/kubernetes/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/handout/devops/observability/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/handout/microservices/account/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/handout/microservices/auth/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/handout/microservices/discovery/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/handout/microservices/gateway/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/handout/microservices/roadmap/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/circuit-breaker/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/communcation/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/concepts/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/config/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/discovery/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/gateway/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/load-balancing/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/microservices/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/payment/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/database/caching/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/database/flyway/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/messaging/concepts/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/messaging/kafka/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/messaging/rabbitmq/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/observability/logging/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/observability/monitoring/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/observability/tracing/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/security/concepts/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/security/jwt/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/platform/security/oauth2/
- 2024-06-17
+ 2024-06-19
daily
https://hsandmann.github.io/platform/versions/2024.1/
- 2024-06-17
+ 2024-06-19
daily
\ No newline at end of file
diff --git a/sitemap.xml.gz b/sitemap.xml.gz
index bdd520c..cd4de50 100644
Binary files a/sitemap.xml.gz and b/sitemap.xml.gz differ