diff --git a/.github/workflows/advanced-lb-sanity-ubuntu-22.yml b/.github/workflows/advanced-lb-sanity-ubuntu-22.yml index 4b77bbdd..aa1264a7 100644 --- a/.github/workflows/advanced-lb-sanity-ubuntu-22.yml +++ b/.github/workflows/advanced-lb-sanity-ubuntu-22.yml @@ -1,11 +1,9 @@ name: Adv-LB-Sanity-CI-Ubuntu-22 on: - #push: - # branches: - # - main - #pull_request: - # branches: [ "main" ] + schedule: + # Runs "At 18:00 UTC every day-of-week" + - cron: '0 18 * * *' workflow_dispatch: inputs: logLevel: @@ -14,11 +12,17 @@ on: default: 'warning' tags: description: 'Advanced LB Sanity ubuntu 22' + workflow_run: + workflows: ["Docker-Multi-Arch"] + types: + - completed jobs: build: name: advanced-lb-sanity-ubuntu-22 runs-on: ubuntu-22.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' steps: - uses: actions/checkout@v2 with: @@ -28,25 +32,7 @@ jobs: with: go-version: '>=1.18.0' - run: sudo apt-get update - - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool lksctp-tools - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - - run: docker pull ghcr.io/loxilb-io/loxilb:latest - - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp - - run: docker exec -dit loxilb mkllb_bpffs - - run: id=`docker ps -f name=loxilb | cut -d " " -f 1 | grep -iv "CONTAINER"` && docker commit $id ghcr.io/loxilb-io/loxilb:latest - - run: docker stop loxilb && docker rm loxilb + - run: sudo apt-get -y install linux-tools-$(uname -r) bridge-utils iperf iproute2 nodejs socat ethtool lksctp-tools - run: | cd cicd/k8slbsim/ ./config.sh @@ -101,3 +87,9 @@ jobs: ./validation.sh ./rmconfig.sh cd - + - run: | + cd cicd/tcplbmaxep/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - diff --git a/.github/workflows/advanced-lb-sanity-ubuntu-24.yml b/.github/workflows/advanced-lb-sanity-ubuntu-24.yml new file mode 100644 index 00000000..254ce6a8 --- /dev/null +++ b/.github/workflows/advanced-lb-sanity-ubuntu-24.yml @@ -0,0 +1,95 @@ +name: Adv-LB-Sanity-CI-Ubuntu-24 + +on: + schedule: + # Runs "At 11:00 UTC every day-of-week" + - cron: '0 11 * * *' + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + tags: + description: 'Advanced LB Sanity ubuntu 24' + workflow_run: + workflows: ["Docker-Multi-Arch"] + types: + - completed + +jobs: + build: + name: advanced-lb-sanity-ubuntu-24 + runs-on: ubuntu-24.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - uses: actions/setup-python@v2 + - uses: actions/setup-go@v3 + with: + go-version: '>=1.18.0' + - run: sudo apt-get update + - run: sudo apt-get -y install linux-tools-$(uname -r) bridge-utils iperf iproute2 nodejs socat ethtool lksctp-tools + - run: | + cd cicd/k8slbsim/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/onearml2/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/ulcltcplb/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/ulclsctplb/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/tcptunlb/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/sctptunlb/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/wrrtcplb1/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/wrrtcplb2/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/nat64tcp/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/tcplbmaxep/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - diff --git a/.github/workflows/advanced-lb-sanity.yml b/.github/workflows/advanced-lb-sanity.yml index 23549a66..0b3e82ab 100644 --- a/.github/workflows/advanced-lb-sanity.yml +++ b/.github/workflows/advanced-lb-sanity.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test @@ -101,15 +92,27 @@ jobs: ./validation.sh ./rmconfig.sh cd - + - run: | + cd cicd/tcplbmaxep/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - - run: | cd cicd/ipmasquerade/ ./config.sh ./validation.sh ./rmconfig.sh cd - - #- run: | - # cd cicd/httpsproxy/ - # ./config.sh - # ./validation.sh - # ./rmconfig.sh - # cd - + - run: | + cd cicd/httpsproxy/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/e2ehttpsproxy/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - diff --git a/.github/workflows/basic-sanity-ubuntu-22.yml b/.github/workflows/basic-sanity-ubuntu-22.yml index 34ceed77..9f4ad7c2 100644 --- a/.github/workflows/basic-sanity-ubuntu-22.yml +++ b/.github/workflows/basic-sanity-ubuntu-22.yml @@ -39,15 +39,6 @@ jobs: - run: | sudo ip netns add test sudo ip netns del test - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/basic-sanity-ubuntu-24.yml b/.github/workflows/basic-sanity-ubuntu-24.yml new file mode 100644 index 00000000..529ebec2 --- /dev/null +++ b/.github/workflows/basic-sanity-ubuntu-24.yml @@ -0,0 +1,51 @@ +name: Sanity-CI-Ubuntu-24 +on: + schedule: + # Runs "At 11:00 UTC every day-of-week" + - cron: '0 11 * * *' + pull_request: + branches: [ "main" ] + workflow_dispatch: + inputs: + testName: + description: 'Test Run-Name' + required: true + default: 'Sanity-BuildCI-u24' + workflow_run: + workflows: ["Docker-Multi-Arch"] + types: + - completed +jobs: + build: + name: basic-sanity-ubuntu-24 + runs-on: ubuntu-24.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Login to GitHub Container Registry + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - uses: actions/setup-python@v2 + - uses: actions/setup-go@v3 + with: + go-version: '>=1.18.0' + - run: | + cd cicd/sconnect/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/tcplb/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - diff --git a/.github/workflows/basic-sanity.yml b/.github/workflows/basic-sanity.yml index f4afa8ab..5a9df820 100644 --- a/.github/workflows/basic-sanity.yml +++ b/.github/workflows/basic-sanity.yml @@ -21,15 +21,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev elfutils dwarves git linux-tools-$(uname -r) libbsd-dev bridge-utils unzip build-essential bison flex iproute2 - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/cluster-sanity-ubuntu-22.yml b/.github/workflows/cluster-sanity-ubuntu-22.yml index 95eca4ad..b3e1c92f 100644 --- a/.github/workflows/cluster-sanity-ubuntu-22.yml +++ b/.github/workflows/cluster-sanity-ubuntu-22.yml @@ -1,6 +1,9 @@ name: Cluster-Sanity-CI-Ubuntu-22 on: + # schedule: + # Runs "At 18:00 UTC every day-of-week" + #- cron: '0 18 * * *' workflow_dispatch: inputs: userInput: @@ -9,11 +12,17 @@ on: default: 'Finished' tags: description: 'Cluster Sanity Ubuntu 22' + #workflow_run: + # workflows: ["Docker-Multi-Arch"] + # types: + # - completed jobs: build: name: cluster-sanity-ubuntu-22 runs-on: ubuntu-22.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' steps: - uses: actions/checkout@v2 with: @@ -23,24 +32,7 @@ jobs: with: go-version: '>=1.18.0' - run: sudo apt-get update - - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat lksctp-tools - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - - run: docker pull ghcr.io/loxilb-io/loxilb:latest - - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp - - run: id=`docker ps -f name=loxilb | cut -d " " -f 1 | grep -iv "CONTAINER"` && docker commit $id ghcr.io/loxilb-io/loxilb:latest - - run: docker stop loxilb && docker rm loxilb + - run: sudo apt-get -y install lksctp-tools - run: | cd cicd/cluster1/ ./config.sh diff --git a/.github/workflows/cluster-sanity-ubuntu-24.yml b/.github/workflows/cluster-sanity-ubuntu-24.yml new file mode 100644 index 00000000..526024db --- /dev/null +++ b/.github/workflows/cluster-sanity-ubuntu-24.yml @@ -0,0 +1,52 @@ +name: Cluster-Sanity-CI-Ubuntu-24 + +on: + #schedule: + # Runs "At 11:00 UTC every day-of-week" + #- cron: '0 11 * * *' + workflow_dispatch: + inputs: + userInput: + description: 'Enter string to print at end' + required: true + default: 'Finished' + tags: + description: 'Cluster Sanity Ubuntu 24' + #workflow_run: + # workflows: ["Docker-Multi-Arch"] + # types: + # - completed + +jobs: + build: + name: cluster-sanity-ubuntu-24 + runs-on: ubuntu-22.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - uses: actions/setup-python@v2 + - uses: actions/setup-go@v3 + with: + go-version: '>=1.18.0' + - run: sudo apt-get update + - run: sudo apt-get -y install linux-tools-$(uname -r) bridge-utils unzip iperf iproute2 nodejs socat lksctp-tools + - run: | + cd cicd/cluster1/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + cd cicd/cluster2/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + cd cicd/cluster3/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: echo ${{ github.event.inputs.userInput }} diff --git a/.github/workflows/cluster-sanity.yml b/.github/workflows/cluster-sanity.yml index 89d15b88..a23ae569 100644 --- a/.github/workflows/cluster-sanity.yml +++ b/.github/workflows/cluster-sanity.yml @@ -24,15 +24,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/data-store-CI.yml b/.github/workflows/data-store-CI.yml index 9f562437..5d34407f 100644 --- a/.github/workflows/data-store-CI.yml +++ b/.github/workflows/data-store-CI.yml @@ -23,13 +23,14 @@ on: options: - ubuntu-20.04 - ubuntu-22.04 + - ubuntu-24.04 jobs: build: name: data-store runs-on: ${{ github.event.inputs.runsOn }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: submodules: recursive - uses: actions/setup-python@v2 @@ -37,24 +38,7 @@ jobs: with: go-version: '>=1.18.0' - run: sudo apt-get update - - run: if [[ ${{ github.event.inputs.runsOn }} == 'ubuntu-22.04' ]]; then sudo apt-get -y install clang-13 lksctp-tools; else sudo apt-get -y install clang-10; fi - - run: sudo apt-get -y install llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - - run: sudo -E env "PATH=$PATH" make - - run: docker pull ghcr.io/loxilb-io/loxilb:latest - - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp - - run: id=`docker ps -f name=loxilb | cut -d " " -f 1 | grep -iv "CONTAINER"` && docker commit $id ghcr.io/loxilb-io/loxilb:latest - - run: docker stop loxilb && docker rm loxilb + - run: sudo apt-get -y install lksctp-tools linux-tools-$(uname -r) bridge-utils iperf iproute2 nodejs socat - run: | cd cicd/data-store/ ./test.sh ${{ github.event.inputs.testName }} diff --git a/.github/workflows/ipsec-sanity-rh9.yml b/.github/workflows/ipsec-sanity-rh9.yml index dae4a911..826c3640 100644 --- a/.github/workflows/ipsec-sanity-rh9.yml +++ b/.github/workflows/ipsec-sanity-rh9.yml @@ -41,6 +41,12 @@ jobs: ./validation.sh ./rmconfig.sh cd - + - run: | + cd cicd/ipsec-e2e/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - - run: echo ${{ github.event.inputs.userInput }} - name: Clean test-bed if: success() || failure() diff --git a/.github/workflows/ipsec-sanity-ubuntu-22.yml b/.github/workflows/ipsec-sanity-ubuntu-22.yml index af07185c..f90e4304 100644 --- a/.github/workflows/ipsec-sanity-ubuntu-22.yml +++ b/.github/workflows/ipsec-sanity-ubuntu-22.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test @@ -65,4 +56,10 @@ jobs: ./validation.sh ./rmconfig.sh cd - + - run: | + cd cicd/ipsec-e2e/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - - run: echo ${{ github.event.inputs.userInput }} diff --git a/.github/workflows/ipsec-sanity-ubuntu-24.yml b/.github/workflows/ipsec-sanity-ubuntu-24.yml new file mode 100644 index 00000000..bbeb2282 --- /dev/null +++ b/.github/workflows/ipsec-sanity-ubuntu-24.yml @@ -0,0 +1,57 @@ +name: IPsec-Sanity-CI-Ubuntu-24 + +on: + schedule: + # Runs "At 11:00 UTC every day-of-week" + - cron: '0 11 * * *' + workflow_dispatch: + inputs: + userInput: + description: 'Enter string to print at end' + required: true + default: 'Finished' + tags: + description: 'IPSec Sanity Ubuntu 24' + workflow_run: + workflows: ["Docker-Multi-Arch"] + types: + - completed +jobs: + build: + name: ipsec-sanity-ubuntu-24 + runs-on: ubuntu-24.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - uses: actions/setup-python@v2 + - uses: actions/setup-go@v3 + with: + go-version: '>=1.18.0' + - run: | + cd cicd/ipsec1/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/ipsec2/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/ipsec3/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/ipsec-e2e/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: echo ${{ github.event.inputs.userInput }} diff --git a/.github/workflows/ipsec-sanity.yml b/.github/workflows/ipsec-sanity.yml index ce467c4b..3ac2517c 100644 --- a/.github/workflows/ipsec-sanity.yml +++ b/.github/workflows/ipsec-sanity.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test @@ -65,4 +56,10 @@ jobs: ./validation.sh ./rmconfig.sh cd - + - run: | + cd cicd/ipsec-e2e/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - - run: echo ${{ github.event.inputs.userInput }} diff --git a/.github/workflows/k3s-calico-ubuntu-24.yml b/.github/workflows/k3s-calico-ubuntu-24.yml new file mode 100644 index 00000000..337d3e76 --- /dev/null +++ b/.github/workflows/k3s-calico-ubuntu-24.yml @@ -0,0 +1,51 @@ +name: K3s-Calico-Sanity-CI-Ubuntu-24 +on: + #schedule: + # Runs "At 11:00 UTC every day-of-week" + #- cron: '0 11 * * *' + workflow_dispatch: + inputs: + testName: + description: 'Test Run-Name' + required: true + default: 'k3s-calico-ubuntu-24' + #workflow_run: + # workflows: ["Docker-Multi-Arch"] + # types: + # - completed +jobs: + build: + name: k3s-calico-sanity + runs-on: ubuntu-24.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - run: | + echo "KUBECONFIG=--kubeconfig=/etc/rancher/k3s/k3s.yaml" >> $GITHUB_ENV + - uses: actions/checkout@v2 + with: + submodules: recursive + - run: sudo apt-get -y install lksctp-tools + - run: curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=v1.22.9+k3s1 INSTALL_K3S_EXEC="server --disable traefik --disable servicelb --disable-cloud-controller --kubelet-arg cloud-provider=external --flannel-backend=none --cluster-cidr=10.42.0.0/16" K3S_KUBECONFIG_MODE="644" sh - + - run: | + sleep 10 + kubectl "${{ env.KUBECONFIG }}" create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/tigera-operator.yaml + - run: | + sleep 10 + kubectl "${{ env.KUBECONFIG }}" create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/custom-resources.yaml + - run: | + sleep 10 + kubectl "${{ env.KUBECONFIG }}" taint nodes --all node.cloudprovider.kubernetes.io/uninitialized=false:NoSchedule- + sleep 60 + kubectl "${{ env.KUBECONFIG }}" get nodes + kubectl "${{ env.KUBECONFIG }}" get pods -A + wget https://github.com/loxilb-io/loxi-ccm/raw/master/manifests/loxi-ccm-k3s.yaml + kubectl "${{ env.KUBECONFIG }}" apply -f ./loxi-ccm-k3s.yaml + sleep 60 + kubectl "${{ env.KUBECONFIG }}" get pods -A + - run: | + cd cicd/k3s-calico/ + ./config.sh "${{ env.KUBECONFIG }}" + ./validation.sh "${{ env.KUBECONFIG }}" + ./rmconfig.sh "${{ env.KUBECONFIG }}" + cd - diff --git a/.github/workflows/k3s-loxi-ingress.yml b/.github/workflows/k3s-loxi-ingress.yml new file mode 100644 index 00000000..570596c6 --- /dev/null +++ b/.github/workflows/k3s-loxi-ingress.yml @@ -0,0 +1,33 @@ +name: K3s-Loxi-Ingress-Sanity-CI +on: + schedule: + # Runs "At 6:00 UTC every day-of-week" + - cron: '0 6 * * *' + workflow_dispatch: + inputs: + testName: + description: 'Test Run-Name' + required: true + default: 'k3s-loxi-ingress' +jobs: + build: + name: k3s-loxi-ingress-sanity + runs-on: [self-hosted, large] + if: github.repository == 'loxilb-io/loxilb' + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + - name: Run K3s LoxiIngress CICD + run: | + cd cicd/k3s-flannel-loxilb-ingress + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - name: Clean test-bed + if: success() || failure() + run: | + cd cicd/k3s-flannel-loxilb-ingress/ || true + ./rmconfig.sh + cd - diff --git a/.github/workflows/k3s-sctpmh-ubuntu-24.yml b/.github/workflows/k3s-sctpmh-ubuntu-24.yml new file mode 100644 index 00000000..f09818d7 --- /dev/null +++ b/.github/workflows/k3s-sctpmh-ubuntu-24.yml @@ -0,0 +1,58 @@ +name: K3s-SCTPMH-Sanity-CI-Ubuntu-24 +on: + schedule: + # Runs "At 11:00 UTC every day-of-week" + - cron: '0 11 * * *' + workflow_dispatch: + inputs: + testName: + description: 'Test Run-Name' + required: true + default: 'k3s-sctpmh-ubuntu-24' + workflow_run: + workflows: ["Docker-Multi-Arch"] + types: + - completed +jobs: + build: + name: k3s-sctpmh-sanity-ubuntu-24 + runs-on: ubuntu-24.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - run: | + for pkg in docker.io docker-doc docker-compose podman-docker containerd runc; do sudo apt-get remove $pkg; done + sudo apt-get update + sudo apt-get install ca-certificates curl gnupg + sudo install -m 0755 -d /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg + sudo chmod a+r /etc/apt/keyrings/docker.gpg + echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt-get update + sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + - run: sudo apt-get -y install bridge-utils iproute2 lksctp-tools iputils-ping net-tools + - run: | + echo "KUBECONFIG=--kubeconfig=/etc/rancher/k3s/k3s.yaml" >> $GITHUB_ENV + - uses: actions/checkout@v2 + with: + submodules: recursive + - run: curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --disable traefik --disable servicelb --disable-cloud-controller --kubelet-arg cloud-provider=external" K3S_KUBECONFIG_MODE="644" sh - + - run: | + sleep 10 + kubectl "${{ env.KUBECONFIG }}" taint nodes --all node.cloudprovider.kubernetes.io/uninitialized=false:NoSchedule- + sleep 60 + kubectl "${{ env.KUBECONFIG }}" get nodes + kubectl "${{ env.KUBECONFIG }}" get pods -A + wget https://github.com/loxilb-io/loxi-ccm/raw/master/manifests/loxi-ccm-k3s.yaml + kubectl "${{ env.KUBECONFIG }}" apply -f ./loxi-ccm-k3s.yaml + sleep 60 + kubectl "${{ env.KUBECONFIG }}" get pods -A + - run: | + cd cicd/k3s-sctpmh/ + ./config.sh "${{ env.KUBECONFIG }}" + ./validation.sh "${{ env.KUBECONFIG }}" + ./rmconfig.sh "${{ env.KUBECONFIG }}" + cd - diff --git a/.github/workflows/k8s-calico-incluster.yml b/.github/workflows/k8s-calico-incluster.yml new file mode 100644 index 00000000..eb92d5f7 --- /dev/null +++ b/.github/workflows/k8s-calico-incluster.yml @@ -0,0 +1,36 @@ +name: K8s-Calico-Incluster-Sanity-CI +on: + # schedule: + # Runs "At 11:00 UTC every day-of-week" + #- cron: '0 11 * * *' + workflow_dispatch: + inputs: + testName: + description: 'Test Run-Name' + required: true + default: 'k8s-calico-incluster' +jobs: + test-runner: + name: k8s-calico-incluster-sanity + runs-on: [self-hosted, large] + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Run the test + run: | + cd cicd/k8s-calico-incluster + ./config.sh + ./validation.sh + cd - + + - name: Clean test-bed + if: success() || failure() + run: | + cd cicd/k8s-calico-incluster || true + ./rmconfig.sh + cd - diff --git a/.github/workflows/liveness-sanity-ubuntu-22.yml b/.github/workflows/liveness-sanity-ubuntu-22.yml index d5983ab8..24ce44c6 100644 --- a/.github/workflows/liveness-sanity-ubuntu-22.yml +++ b/.github/workflows/liveness-sanity-ubuntu-22.yml @@ -1,11 +1,8 @@ name: Liveness-LB-Sanity-CI-Ubuntu-22 - on: - #push: - # branches: - # - main - #pull_request: - # branches: [ "main" ] + schedule: + # Runs "At 17:00 UTC every day-of-week" + - cron: '0 17 * * *' workflow_dispatch: inputs: logLevel: @@ -14,11 +11,16 @@ on: default: 'warning' tags: description: 'Liveness LB Sanity Ubuntu 22' + workflow_run: + workflows: ["Docker-Multi-Arch"] + types: + - completed jobs: build: name: liveness-lb-sanity-ubuntu-22 runs-on: ubuntu-22.04 + if: github.repository == 'loxilb-io/loxilb' steps: - uses: actions/checkout@v2 with: @@ -28,25 +30,7 @@ jobs: with: go-version: '>=1.18.0' - run: sudo apt-get update - - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool lksctp-tools - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - - run: docker pull ghcr.io/loxilb-io/loxilb:latest - - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp - - run: docker exec -dit loxilb mkllb_bpffs - - run: id=`docker ps -f name=loxilb | cut -d " " -f 1 | grep -iv "CONTAINER"` && docker commit $id ghcr.io/loxilb-io/loxilb:latest - - run: docker stop loxilb && docker rm loxilb + - run: sudo apt-get -y install iperf iproute2 nodejs socat ethtool lksctp-tools - run: | cd cicd/tcplbmon/ ./config.sh diff --git a/.github/workflows/liveness-sanity-ubuntu-24.yml b/.github/workflows/liveness-sanity-ubuntu-24.yml new file mode 100644 index 00000000..3a61c5fc --- /dev/null +++ b/.github/workflows/liveness-sanity-ubuntu-24.yml @@ -0,0 +1,89 @@ +name: Liveness-LB-Sanity-CI-Ubuntu-24 + +on: + schedule: + # Runs "At 11:00 UTC every day-of-week" + - cron: '0 11 * * *' + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + tags: + description: 'Liveness LB Sanity Ubuntu 24' + workflow_run: + workflows: ["Docker-Multi-Arch"] + types: + - completed + +jobs: + build: + name: liveness-lb-sanity-ubuntu-24 + runs-on: ubuntu-24.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - uses: actions/setup-python@v2 + - uses: actions/setup-go@v3 + with: + go-version: '>=1.18.0' + - run: sudo apt-get update + - run: sudo apt-get -y install linux-tools-$(uname -r) bridge-utils iperf iproute2 nodejs socat ethtool lksctp-tools + - run: | + cd cicd/tcplbmon/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/udplbmon/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/sctplbmon/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/tcplbmon6/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/tcplbepmod/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/lbtimeout/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/lb6timeout/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/httpsep/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/http2ep/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - diff --git a/.github/workflows/liveness-sanity.yml b/.github/workflows/liveness-sanity.yml index ddeadc1d..f1ebcc78 100644 --- a/.github/workflows/liveness-sanity.yml +++ b/.github/workflows/liveness-sanity.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/nat66-sanity-ubuntu-22.yml b/.github/workflows/nat66-sanity-ubuntu-22.yml index 3abcd4c9..073a8b52 100644 --- a/.github/workflows/nat66-sanity-ubuntu-22.yml +++ b/.github/workflows/nat66-sanity-ubuntu-22.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/nat66-sanity-ubuntu-24.yml b/.github/workflows/nat66-sanity-ubuntu-24.yml new file mode 100644 index 00000000..9b52b87e --- /dev/null +++ b/.github/workflows/nat66-sanity-ubuntu-24.yml @@ -0,0 +1,53 @@ +name: NAT66-LB-Sanity-CI-Ubuntu-24 + +on: + schedule: + # Runs "At 11:00 UTC every day-of-week" + - cron: '0 11 * * *' + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + tags: + description: 'NAT66 LB Sanity Ubuntu 24' + workflow_run: + workflows: ["Docker-Multi-Arch"] + types: + - completed + +jobs: + build: + name: nat66-lb-sanity-ubuntu-24 + runs-on: ubuntu-24.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - uses: actions/setup-python@v2 + - uses: actions/setup-go@v3 + with: + go-version: '>=1.18.0' + - run: sudo apt-get update + - run: sudo apt-get -y install linux-tools-$(uname -r) bridge-utils iperf iproute2 nodejs socat ethtool + - run: | + cd cicd/nat66tcp/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/nat66udp/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/nat66sctp/ + #./config.sh + #./validation.sh + #./rmconfig.sh + cd - diff --git a/.github/workflows/nat66-sanity.yml b/.github/workflows/nat66-sanity.yml index 4938899c..8a4351b2 100644 --- a/.github/workflows/nat66-sanity.yml +++ b/.github/workflows/nat66-sanity.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/perf.yml b/.github/workflows/perf.yml index 9c3c55cf..4988105f 100644 --- a/.github/workflows/perf.yml +++ b/.github/workflows/perf.yml @@ -23,7 +23,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-20.04, ubuntu-22.04] + os: [ubuntu-20.04, ubuntu-22.04, ubuntu-24.04] steps: - uses: actions/checkout@v2 with: @@ -33,24 +33,7 @@ jobs: with: go-version: '>=1.18.0' - run: sudo apt-get update - - run: if [[ ${{ matrix.os }} == 'ubuntu-22.04' ]]; then sudo apt-get -y install clang-13 lksctp-tools; else sudo apt-get -y install clang-10; fi - - run: sudo apt-get -y install llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat iperf3 - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - - run: sudo -E env "PATH=$PATH" make - - run: docker pull ghcr.io/loxilb-io/loxilb:latest - - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp-ebpf - - run: id=`docker ps -f name=loxilb | cut -d " " -f 1 | grep -iv "CONTAINER"` && docker commit $id ghcr.io/loxilb-io/loxilb:latest - - run: docker stop loxilb && docker rm loxilb + - run: sudo apt-get -y install lksctp-tools linux-tools-$(uname -r) bridge-utils iperf iproute2 nodejs socat iperf3 - run: | cd cicd/tcpsctpperf ./config.sh diff --git a/.github/workflows/scale-sanity-ubuntu-22.yml b/.github/workflows/scale-sanity-ubuntu-22.yml index 50eb2c07..085e5a31 100644 --- a/.github/workflows/scale-sanity-ubuntu-22.yml +++ b/.github/workflows/scale-sanity-ubuntu-22.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat lksctp-tools - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/scale-sanity-ubuntu-24.yml b/.github/workflows/scale-sanity-ubuntu-24.yml new file mode 100644 index 00000000..fff77c7a --- /dev/null +++ b/.github/workflows/scale-sanity-ubuntu-24.yml @@ -0,0 +1,41 @@ +name: Scale-Sanity-CI-Ubuntu-24 + +on: + schedule: + # Runs "At 11:00 UTC every day-of-week" + - cron: '0 11 * * *' + workflow_dispatch: + inputs: + userInput: + description: 'Enter string to print at end' + required: true + default: 'Finished' + tags: + description: 'Scale Sanity Ubuntu 24' + workflow_run: + workflows: ["Docker-Multi-Arch"] + types: + - completed + +jobs: + build: + name: scale-sanity-ubuntu-24 + runs-on: ubuntu-24.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - uses: actions/setup-python@v2 + - uses: actions/setup-go@v3 + with: + go-version: '>=1.18.0' + - run: sudo apt-get update + - run: sudo apt-get -y install linux-tools-$(uname -r) bridge-utils iperf iproute2 nodejs socat lksctp-tools + - run: | + cd cicd/tcpepscale/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - diff --git a/.github/workflows/scale-sanity.yml b/.github/workflows/scale-sanity.yml index ca4d8807..35fd189d 100644 --- a/.github/workflows/scale-sanity.yml +++ b/.github/workflows/scale-sanity.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/sctp-sanity-ubuntu-22.yml b/.github/workflows/sctp-sanity-ubuntu-22.yml index 32cb151f..a5b75331 100644 --- a/.github/workflows/sctp-sanity-ubuntu-22.yml +++ b/.github/workflows/sctp-sanity-ubuntu-22.yml @@ -1,11 +1,11 @@ name: SCTP-LB-Sanity-CI-Ubuntu-22 on: - #push: - # branches: - # - main - #pull_request: - # branches: [ "main" ] + push: + branches: + - main + pull_request: + branches: [ "main" ] workflow_dispatch: inputs: logLevel: @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool lksctp-tools - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/sctp-sanity-ubuntu-24.yml b/.github/workflows/sctp-sanity-ubuntu-24.yml new file mode 100644 index 00000000..82b74b01 --- /dev/null +++ b/.github/workflows/sctp-sanity-ubuntu-24.yml @@ -0,0 +1,59 @@ +name: SCTP-LB-Sanity-CI-Ubuntu-24 + +on: + schedule: + # Runs "At 11:00 UTC every day-of-week" + - cron: '0 11 * * *' + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + tags: + description: 'SCTP LB Sanity Ubuntu 24' + workflow_run: + workflows: ["Docker-Multi-Arch"] + types: + - completed + +jobs: + build: + name: sctp-lb-sanity-ubuntu-24 + runs-on: ubuntu-24.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - uses: actions/setup-python@v2 + - uses: actions/setup-go@v3 + with: + go-version: '>=1.18.0' + - run: sudo apt-get update + - run: sudo apt-get -y install linux-tools-$(uname -r) iperf iproute2 nodejs socat ethtool lksctp-tools + - run: | + cd cicd/sctplb/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/sctponearm/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/sctplbdsr/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/sctplblc/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - diff --git a/.github/workflows/sctp-sanity.yml b/.github/workflows/sctp-sanity.yml index 7abdc368..637b0feb 100644 --- a/.github/workflows/sctp-sanity.yml +++ b/.github/workflows/sctp-sanity.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/sctpmh-sanity.yml b/.github/workflows/sctpmh-sanity.yml index 1bfcb7a5..facc8fbb 100644 --- a/.github/workflows/sctpmh-sanity.yml +++ b/.github/workflows/sctpmh-sanity.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool curl lksctp-tools - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/tcp-sanity-ubuntu-22.yml b/.github/workflows/tcp-sanity-ubuntu-22.yml index 0f01a8de..ba91c851 100644 --- a/.github/workflows/tcp-sanity-ubuntu-22.yml +++ b/.github/workflows/tcp-sanity-ubuntu-22.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/tcp-sanity-ubuntu-24.yml b/.github/workflows/tcp-sanity-ubuntu-24.yml new file mode 100644 index 00000000..e0c78eb5 --- /dev/null +++ b/.github/workflows/tcp-sanity-ubuntu-24.yml @@ -0,0 +1,71 @@ +name: TCP-LB-Sanity-CI-Ubuntu-24 + +on: + schedule: + # Runs "At 11:00 UTC every day-of-week" + - cron: '0 11 * * *' + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + tags: + description: 'TCP LB Sanity Ubuntu 24' + workflow_run: + workflows: ["Docker-Multi-Arch"] + types: + - completed + +jobs: + build: + name: tcp-lb-sanity-ubuntu-24 + runs-on: ubuntu-24.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - uses: actions/setup-python@v2 + - uses: actions/setup-go@v3 + with: + go-version: '>=1.18.0' + - run: sudo apt-get update + - run: sudo apt-get -y install linux-tools-$(uname -r) bridge-utils iperf iproute2 nodejs socat ethtool + - run: | + cd cicd/tcplb/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/tcplbmark/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/tcplbdsr1/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/tcplbdsr2/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/tcplbl3dsr/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - + - run: | + cd cicd/tcplbhash/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - diff --git a/.github/workflows/tcp-sanity.yml b/.github/workflows/tcp-sanity.yml index 6610eeda..083cee68 100644 --- a/.github/workflows/tcp-sanity.yml +++ b/.github/workflows/tcp-sanity.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/tcpkali-longrun.yml b/.github/workflows/tcpkali-longrun.yml new file mode 100644 index 00000000..971b19b1 --- /dev/null +++ b/.github/workflows/tcpkali-longrun.yml @@ -0,0 +1,37 @@ +name: TCPKALI-Longrun-CI +on: + schedule: + # Runs "At 13:00 UTC every day-of-week" + - cron: '0 13 * * *' + workflow_dispatch: + inputs: + testName: + description: 'Test Run-Name' + required: true + default: 'tcpkali-longrun' +jobs: + test-runner: + name: tcpkali-longrun + runs-on: [self-hosted, sb] + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Run the test + run: | + cd cicd/tcpkali + ./config.sh + ./validation.sh + cd - + + - name: Clean test-bed + if: success() || failure() + run: | + cd cicd/tcpkali || true + ./rmconfig.sh + docker images -a | grep "loxilb-io/loxilb" | awk '{print $3}' | xargs docker rmi + cd - diff --git a/.github/workflows/test-scenario.yml b/.github/workflows/test-scenario.yml index e1488dfa..9781d2a5 100644 --- a/.github/workflows/test-scenario.yml +++ b/.github/workflows/test-scenario.yml @@ -15,6 +15,7 @@ on: options: - ubuntu-20.04 - ubuntu-22.04 + - ubuntu-24.04 jobs: build: @@ -29,25 +30,7 @@ jobs: with: go-version: '>=1.18.0' - run: sudo apt-get update - - run: if [[ ${{ github.event.inputs.runsOn }} == 'ubuntu-22.04' ]]; then sudo apt-get -y install clang-13 lksctp-tools; else sudo apt-get -y install clang-10; fi - - run: sudo apt-get -y install llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - - run: sudo -E env "PATH=$PATH" make - - run: docker pull ghcr.io/loxilb-io/loxilb:latest - - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp - - run: docker exec -dit loxilb mkllb_bpffs - - run: id=`docker ps -f name=loxilb | cut -d " " -f 1 | grep -iv "CONTAINER"` && docker commit $id ghcr.io/loxilb-io/loxilb:latest - - run: docker stop loxilb && docker rm loxilb + - run: sudo apt-get -y install lksctp-tools linux-tools-$(uname -r) bridge-utils iperf iproute2 nodejs socat - run: | cd cicd/${{ github.event.inputs.testName }}/ ./config.sh diff --git a/.github/workflows/udp-sanity-ubuntu-22.yml b/.github/workflows/udp-sanity-ubuntu-22.yml index 8ac6ab24..3ea2a3f8 100644 --- a/.github/workflows/udp-sanity-ubuntu-22.yml +++ b/.github/workflows/udp-sanity-ubuntu-22.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/.github/workflows/udp-sanity-ubuntu-24.yml b/.github/workflows/udp-sanity-ubuntu-24.yml new file mode 100644 index 00000000..e2794439 --- /dev/null +++ b/.github/workflows/udp-sanity-ubuntu-24.yml @@ -0,0 +1,41 @@ +name: UDP-LB-Sanity-CI-Ubuntu-24 + +on: + schedule: + # Runs "At 11:00 UTC every day-of-week" + - cron: '0 11 * * *' + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + tags: + description: 'UDP LB Sanity Ubuntu 24' + workflow_run: + workflows: ["Docker-Multi-Arch"] + types: + - completed + +jobs: + build: + name: udp-lb-sanity-ubuntu-24 + runs-on: ubuntu-24.04 + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - uses: actions/setup-python@v2 + - uses: actions/setup-go@v3 + with: + go-version: '>=1.18.0' + - run: sudo apt-get update + - run: sudo apt-get -y install linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool + - run: | + cd cicd/udplb/ + ./config.sh + ./validation.sh + ./rmconfig.sh + cd - diff --git a/.github/workflows/udp-sanity.yml b/.github/workflows/udp-sanity.yml index 3bcd88b2..d46d4ab2 100644 --- a/.github/workflows/udp-sanity.yml +++ b/.github/workflows/udp-sanity.yml @@ -29,15 +29,6 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - - run: | - git clone --recurse-submodules https://github.com/loxilb-io/iproute2 iproute2-main - cd iproute2-main/libbpf/src/ - sudo make install - mkdir build - DESTDIR=build OBJDIR=build make install - cd - - cd iproute2-main/ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && sudo cp -f tc/tc /usr/local/sbin/ntc && cd - - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test diff --git a/ADOPTERS.md b/ADOPTERS.md new file mode 100644 index 00000000..a2bef1da --- /dev/null +++ b/ADOPTERS.md @@ -0,0 +1,24 @@ +# LoxiLB Adopters + +This list captures the set of organizations that are using LoxiLB within their environments +(in production or at stages of R&D). If you are an adopter of LoxiLB and not yet on this +list, we encourage you to add your organization here as well! + +The goal for this list is to be the complete and authoritative source for the entire community of +LoxiLB adopters, and give inspiration to others that are earlier in their LoxiLB journey. + +Contributing to this list is a small effort that has a **big impact** to the project's growth, +maturity, and momentum. Thank you to all adopters and contributors of the LoxiLB project! +Feel free to edit this file and open a Pull-Request to get your organization listed. + +## Adopters (in alphabetical order) + +| Organization | Contact/Reference | Status | Description of Use | +| ------------ | ------- | ------| ------------------ | +| [BPFire](http://www.firebeeos.com/) | @vincentmli | ![deployment](https://img.shields.io/badge/deployment-blue) | eBPF based “BPFire” Distro | +| [Friedrich-Alexander-Universität](https://www.fau.de/)| [Research Paper](https://arxiv.org/pdf/2405.00078) | ![research](https://img.shields.io/badge/research-orange) | eBPF Runtime Security | +| [KETI](https://www.keti.re.kr/main/main.php) | [JinWon Park](mailto:jwpark9010@keti.re.kr?subject=LoxiLB) | ![deployment](https://img.shields.io/badge/deployment-blue) | Cloud-native LB for on-prem MLOps Deployment | +| [Kookmin University](https://english.kookmin.ac.kr/) | *TBD* | ![testing](https://img.shields.io/badge/development%20&%20testing-green) | Cloud-native LB for on-prem ORAN Testbed | +| [Oracle OCI](https://www.oracle.com/) | @esirame | ![testing](https://img.shields.io/badge/development%20&%20testing-green) | Telco cloud-native LB/Ingress for N2 interface | +| [Samsung](https://www.samsung.com/) | [Conference Presentation](https://blog.naver.com/PostView.naver?blogId=n_cloudplatform&logNo=223518118906&navType=by) | ![testing](https://img.shields.io/badge/development%20&%20testing-green) | Telco cloud-native LB/Ingress for N2/N4 interface | +| [Viettel](https://vietteltelecom.vn/) | @chuhuutiennam | ![testing](https://img.shields.io/badge/development%20&%20testing-green) | SCP for cloud-native telco deployments| diff --git a/Dockerfile b/Dockerfile index afb1ecec..86b5d1b5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -21,27 +21,19 @@ RUN mkdir -p /opt/loxilb && \ apt-get update && apt-get install -y wget && \ arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && echo $arch && if [ "$arch" = "arm64" ] ; then apt-get install -y gcc-multilib-arm-linux-gnueabihf; else apt-get update && apt-get install -y gcc-multilib;fi && \ # Arch specific packages - GoLang - wget https://go.dev/dl/go1.22.0.linux-${arch}.tar.gz && tar -xzf go1.22.0.linux-${arch}.tar.gz --directory /usr/local/ && rm go1.22.0.linux-${arch}.tar.gz && \ + wget https://go.dev/dl/go1.23.0.linux-${arch}.tar.gz && tar -xzf go1.23.0.linux-${arch}.tar.gz --directory /usr/local/ && rm go1.23.0.linux-${arch}.tar.gz && \ # Dev and util packages - apt-get install -y clang llvm libelf-dev libpcap-dev vim net-tools \ + apt-get install -y clang llvm libelf-dev libpcap-dev vim net-tools ca-certificates \ elfutils dwarves git libbsd-dev bridge-utils wget unzip build-essential \ bison flex sudo iproute2 pkg-config tcpdump iputils-ping curl bash-completion && \ - # Install openssl-3.0.0 - wget https://www.openssl.org/source/openssl-3.0.0.tar.gz && tar -xvzf openssl-3.0.0.tar.gz && \ - cd openssl-3.0.0 && ./Configure enable-ktls '-Wl,-rpath,$(LIBRPATH)' --prefix=/usr/local/build && \ + # Install openssl-3.3.1 + wget https://github.com/openssl/openssl/releases/download/openssl-3.3.1/openssl-3.3.1.tar.gz && tar -xvzf openssl-3.3.1.tar.gz && \ + cd openssl-3.3.1 && ./Configure enable-ktls '-Wl,-rpath,$(LIBRPATH)' --prefix=/usr/local/build && \ make -j$(nproc) && make install_dev install_modules && cd - && \ cp -a /usr/local/build/include/openssl /usr/include/ && \ if [ -d /usr/local/build/lib64 ] ; then mv /usr/local/build/lib64 /usr/local/build/lib; fi && \ cp -fr /usr/local/build/lib/* /usr/lib/ && ldconfig && \ - rm -fr openssl-3.0.0* && \ - # Install loxilb's custom ntc tool - wget https://github.com/loxilb-io/iproute2/archive/refs/heads/main.zip && \ - unzip main.zip && cd iproute2-main/ && rm -fr libbpf && wget https://github.com/loxilb-io/libbpf/archive/refs/heads/main.zip && \ - unzip main.zip && mv libbpf-main libbpf && cd libbpf/src/ && mkdir build && \ - make install && DESTDIR=build OBJDIR=build make install && cd - && \ - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:`pwd`/libbpf/src/ && \ - LIBBPF_FORCE=on LIBBPF_DIR=`pwd`/libbpf/src/build ./configure && make && \ - cp -f tc/tc /usr/local/sbin/ntc && cd .. && rm -fr main.zip iproute2-main && \ + rm -fr openssl-3.3.1* && \ # Install bpftool wget https://github.com/libbpf/bpftool/releases/download/v7.2.0/bpftool-libbpf-v7.2.0-sources.tar.gz && \ tar -xvzf bpftool-libbpf-v7.2.0-sources.tar.gz && cd bpftool/src/ && \ @@ -65,8 +57,8 @@ RUN mkdir -p /opt/loxilb && \ rm -fr /root/loxilb-io/loxilb/.github && mkdir -p /root/loxilb-io/loxilb/ && \ cp /usr/local/sbin/loxilb /root/loxilb-io/loxilb/loxilb && rm /usr/local/sbin/loxilb && \ # Install gobgp - wget https://github.com/osrg/gobgp/releases/download/v3.5.0/gobgp_3.5.0_linux_amd64.tar.gz && \ - tar -xzf gobgp_3.5.0_linux_amd64.tar.gz && rm gobgp_3.5.0_linux_amd64.tar.gz && \ + wget https://github.com/osrg/gobgp/releases/download/v3.29.0/gobgp_3.29.0_linux_${arch}.tar.gz && \ + tar -xzf gobgp_3.29.0_linux_${arch}.tar.gz && rm gobgp_3.29.0_linux_${arch}.tar.gz && \ mv gobgp* /usr/sbin/ && rm LICENSE README.md && \ apt-get purge -y clang llvm libelf-dev libpcap-dev libbsd-dev build-essential \ elfutils dwarves git bison flex wget unzip && apt-get -y autoremove && \ @@ -96,7 +88,7 @@ ENV PATH="${PATH}:/usr/local/go/bin" ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/lib64/" RUN apt-get update && apt-get install -y --no-install-recommends sudo \ - libbsd-dev iproute2 tcpdump bridge-utils net-tools libllvm10 && \ + libbsd-dev iproute2 tcpdump bridge-utils net-tools libllvm10 ca-certificates && \ rm -rf /var/lib/apt/lists/* && apt clean COPY --from=build /usr/lib64/libbpf* /usr/lib64/ @@ -108,7 +100,6 @@ COPY --from=build /usr/local/sbin/loxilb_dp_debug /usr/local/sbin/loxilb_dp_debu COPY --from=build /usr/local/sbin/loxicmd /usr/local/sbin/loxicmd COPY --from=build /opt/loxilb /opt/loxilb COPY --from=build /root/loxilb-io/loxilb/loxilb /root/loxilb-io/loxilb/loxilb -COPY --from=build /usr/local/sbin/ntc /usr/local/sbin/ntc COPY --from=build /usr/local/sbin/bpftool /usr/local/sbin/bpftool COPY --from=build /usr/sbin/gobgp* /usr/sbin/ COPY --from=build /root/.bashrc /root/.bashrc diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 9dbf90ed..427b122a 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -1,12 +1,13 @@ # LoxiLB Maintainers -This is the list of LoxiLB project Maintainers. + +This is the current list of LoxiLB project Maintainers. | | Name | Company | |:---------------------------------------------------------------------------:|:-----------------------------------------------------:|:------------:| -| | [PacketCrunch](https://github.com/PacketCrunch) | Netlox | | | [UltraInstinct14](https://github.com/UltraInstinct14) | Netlox | | | [Trekkie](https://github.com/TrekkieCoder) | Netlox | | | [Nikhil Malik](https://github.com/nik-netlox) | Netlox | | | [BackGuyn Jung](https://github.com/backguynn) | Netlox | | | [Inho Gog](https://github.com/inhogog2) | Netlox | -| | [SeokHwan Kong](https://github.com/NLX-SeokHwanKong) | Netlox | + +If any person or an organization wants to become a maintainer of LoxiLB, please feel free to go through LoxiLB [Governance Guide](https://github.com/loxilb-io/loxilb/blob/main/GOVERNANCE.md) for further details. diff --git a/README-KOR.md b/README-KOR.md new file mode 100644 index 00000000..ae54f67e --- /dev/null +++ b/README-KOR.md @@ -0,0 +1,176 @@ +![image](https://github.com/loxilb-io/loxilb/assets/75648333/87da0183-1a65-493f-b6fe-5bc738ba5468) + + +[![Website](https://img.shields.io/static/v1?label=www&message=loxilb.io&color=blue?style=for-the-badge&logo=appveyor)](https://www.loxilb.io) [![eBPF Emerging Project](https://img.shields.io/badge/ebpf.io-Emerging--App-success)](https://ebpf.io/projects#loxilb) [![Go Report Card](https://goreportcard.com/badge/github.com/loxilb-io/loxilb)](https://goreportcard.com/report/github.com/loxilb-io/loxilb) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8472/badge)](https://www.bestpractices.dev/projects/8472) ![build workflow](https://github.com/loxilb-io/loxilb/actions/workflows/docker-image.yml/badge.svg) ![sanity workflow](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity.yml/badge.svg) +![apache](https://img.shields.io/badge/license-Apache-blue.svg) [![Info][docs-shield]][docs-url] [![Slack](https://img.shields.io/badge/community-join%20slack-blue)](https://join.slack.com/t/loxilb/shared_invite/zt-2b3xx14wg-P7WHj5C~OEON_jviF0ghcQ) + +## loxilb란 무엇인가? +loxilb는 GoLang/eBPF를 기반으로 한 오픈 소스 클라우드 네이티브 로드 밸런서로, 온-프레미스, 퍼블릭 클라우드 또는 하이브리드 K8s 환경 전반에 걸쳐 호환성을 달성하는 것을 목표로 합니다. loxilb는 텔코 클라우드(5G/6G), 모빌리티 및 엣지 컴퓨팅에서 클라우드 네이티브 기술 채택을 지원하기 위해 개발되고 있습니다. + +## loxilb와 함께하는 Kubernetes + +Kubernetes는 ClusterIP, NodePort, LoadBalancer, Ingress 등 여러 서비스 구조를 정의하여 파드에서 파드로, 파드에서 서비스로, 외부 에서 서비스로의 통신을 가능하게 합니다. + +![LoxiLB Cover](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/photos/loxilb-cover.png) + +이 모든 서비스는 Layer4/Layer7에서 작동하는 로드 밸런서/프록시가 제공합니다. Kubernetes는 매우 모듈화되어 있으며, 다양한 소프트웨어 모듈이 이러한 서비스를 제공할 수 있습니다. 예를 들어, kube-proxy는 기본적으로 ClusterIP 와 NodePort 서비스를 제공하지만, LoadBalancer 와 Ingress 같은 일부 서비스는 기본적으로 제공되지 않습니다. + +로드 밸런서 서비스는 일반적으로 퍼블릭 클라우드 제공자가 관리 구성 요소로 함께 제공합니다. 그러나 온프레미스 및 자체 관리 클러스터의 경우 사용할 수 있는 옵션이 제한적입니다. 매니지드 K8S 서비스(예: EKS)의 경우에도 로드 밸런서를 클러스터 어디서나 가져오려는 사람들이 많습니다. 추가적으로, 텔코 5G/6G 및 엣지 서비스는 GTP, SCTP, SRv6, DTLS와 같은 범용적이지 않은 프로토콜 사용으로 인해 기존 K8S 서비스에서의 원활한 통합이 특히 어렵습니다. loxilb는 로드 밸런서 서비스 유형 기능을 주요 사용 사례로 제공합니다. loxilb는 사용자의 필요에 따라 클러스터 내 또는 클러스터 외부에서 실행할 수 있습니다. + +loxilb는 기본적으로 L4 로드 밸런서/서비스 프록시로 작동합니다. L4 로드 밸런싱이 우수한 성능과 기능을 제공하지만, 다양한 사용 사례를 위해 K8s에서 동일하게 성능이 뛰어난 L7 로드 밸런서도 필요합니다. loxilb는 또한 eBPF SOCKMAP Helper를 사용하여 향상된 Kubernetes Ingress 구현 형태로 L7 로드 밸런싱을 지원합니다. 이는 동일한 환경에서 L4와 L7 로드 밸런싱이 필요한 사용자에게도 유리합니다. + +추가적으로 loxilb는 다음을 지원합니다: +- [x] eBPF를 통한 kube-proxy 교체(Kubernetes의 전체 클러스터 메쉬 구현) +- [x] 인그레스 지원 +- [x] Kubernetes Gateway API +- [ ] Kubernetes 네트워크 정책 + +## loxilb와 함께하는 텔코 클라우드 +클라우드 네이티브 기능으로 텔코-클라우드를 배포하려면 loxilb를 SCP(Service Communication Proxy: 서비스 통신 프록시)로 사용할 수 있습니다. SCP는 [3GPP](https://www.etsi.org/deliver/etsi_ts/129500_129599/129500/16.04.00_60/ts_129500v160400p.pdf)에서 정의한 통신 프록시로, 클라우드 네이티브 환경에서 실행되는 텔코 마이크로 서비스에 목적을 두고 있습니다. 자세한 내용은 이 [블로그](https://dev.to/nikhilmalik/5g-service-communication-proxy-with-loxilb-4242)를 참조하십시오. +![image](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/photos/scp.svg) + +텔코-클라우드는 N2, N4, E2(ORAN), S6x, 5GLAN, GTP 등 다양한 인터페이스와 표준을 통한 로드 밸런싱 및 통신을 필요로 합니다. 각각 고유한 챌린지를 요구하며, loxilb는 이를 해결하는 것을 목표로 합니다. 예를 들어: +- N4는 PFCP 수준의 세션 인텔리전스를 요구합니다. +- N2는 NGAP 파싱 기능이 필요합니다(관련 블로그 - [블로그-1](https://www.loxilb.io/post/ngap-load-balancing-with-loxilb), [블로그-2](https://futuredon.medium.com/5g-sctp-loadbalancer-using-loxilb-b525198a9103), [블로그-3](https://medium.com/@ben0978327139/5g-sctp-loadbalancer-using-loxilb-applying-on-free5gc-b5c05bb723f0)). +- S6x는 Diameter/SCTP 멀티-호밍 LB 지원이 필요합니다(관련 [블로그](https://www.loxilb.io/post/k8s-introducing-sctp-multihoming-functionality-with-loxilb)). +- MEC 사용 사례는 UL-CL 이해가 필요할 수 있습니다(관련 [블로그](https://futuredon.medium.com/5g-uplink-classifier-using-loxilb-7593a4d66f4c)). +- 미션 크리티컬 애플리케이션을 위해 히트리스 장애 조치 지원이 필수적일 수 있습니다. +- E2는 OpenVPN과 번들된 SCTP-LB가 필요할 수 있습니다. +- 클라우드 네이티브 VOIP를 가능하게 하는 SIP 지원이 필요합니다. + +## loxilb를 선택해야 하는 이유? + +- 다양한 아키텍처 전반에서 경쟁자보다 ```성능```이 훨씬 뛰어납니다. + * [싱글 노드 성능](https://loxilb-io.github.io/loxilbdocs/perf-single/) + * [멀티 노드 성능](https://loxilb-io.github.io/loxilbdocs/perf-multi/) + * [ARM에서의 성능](https://www.loxilb.io/post/running-loxilb-on-aws-graviton2-based-ec2-instance) + * [성능 관련 데모](https://www.youtube.com/watch?v=MJXcM0x6IeQ) +- ebpf를 활용하여 ```유연```하고 ```사용자 정의```가 가능합니다. +- 워크로드에 대한 고급 ```서비스 품질```(LB별, 엔드포인트별 또는 클라이언트별) +- ```어떤``` Kubernetes 배포판/CNI와도 호환 - k8s/k3s/k0s/kind/OpenShift + Calico/Flannel/Cilium/Weave/Multus 등 +- loxilb를 사용한 kube-proxy 교체는 ```간단한 플러그인```으로 기존에 배포된 파드 네트워킹 소프트웨어와 통합이 가능합니다. +- K8s에서 ```SCTP 워크로드```(멀티-호밍 포함)에 대한 광범위한 지원 +- ```NAT66, NAT64```를 지원하는 듀얼 스택 K8s +- ```멀티 클러스터``` K8s 지원 (계획 중 🚧) +- ```어떤``` 클라우드(퍼블릭 클라우드/온프레미스) 또는 ```독립형``` 환경에서도 실행 가능 + +## loxilb의 전반적인 기능 +- L4/NAT 상태 저장 로드밸런서 + * NAT44, NAT66, NAT64를 지원하며 One-ARM, FullNAT, DSR 등 다양한 모드 제공 + * TCP, UDP, SCTP(멀티-호밍 포함), QUIC, FTP, TFTP 등 지원 +- Hiteless/maglev/cgnat 클러스터링을 위한 BFD 감지로 고가용성 지원 +- 클라우드 네이티브 환경을 위한 광범위하고 확장 가능한 엔드포인트 라이브니스 프로브 +- 상태 저장 방화벽 및 IPSEC/Wireguard 지원 +- [Conntrack](https://thermalcircle.de/doku.php?id=blog:linux:connection_tracking_1_modules_and_hooks), QoS 등 기능의 최적화된 구현 +- ipvs와 완전 호환(ipvs 정책 자동 상속 가능) +- 정책 지향 L7 프록시 지원 - HTTP1.0, 1.1, 2.0, 3.0 + +## loxilb의 구성 요소 +- GoLang 기반의 제어 평면 구성 요소 +- 확장 가능하고 효율적인 [eBPF](https://ebpf.io/) 기반 데이터 경로 구현 +- 통합된 goBGP 기반 라우팅 스택 +- Go로 작성된 Kubernetes 오퍼레이터 [kube-loxilb](https://github.com/loxilb-io/kube-loxilb) +- Kubernetes 인그레스 구현 + +## 아키텍처 고려 사항 +- [kube-loxilb와 함께하는 loxilb 모드 및 배포 이해하기](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/kube-loxilb.md) +- [loxilb와 함께하는 고가용성 이해하기](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/ha-deploy.md) + +## 시작하기 +#### 클러스터 외부에서 loxilb 실행 +- [K3s : flannel & loxilb](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k3s_quick_start_flannel.md) +- [K3s : calico & loxilb](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k3s_quick_start_calico.md) +- [K3s : cilium & loxilb](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/quick_start_with_cilium.md) +- [K0s : kube-router & loxilb](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k0s_quick_start.md) +- [EKS : loxilb 외부 모드](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/eks-external.md) + +#### 클러스터 내에서 loxilb 실행 +- [K3s : loxilb 인-클러스터](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k3s_quick_start_incluster.md) +- [K0s : loxilb 인-클러스터](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k0s_quick_start_incluster.md) +- [MicroK8s : loxilb 인-클러스터](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/microk8s_quick_start_incluster.md) +- [EKS : loxilb 인-클러스터](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/eks-incluster.md) + +#### 서비스 프록시로서의 loxilb(kube-proxy 대체) +- [K3s : flannel 서비스 프록시 & loxilb](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/service-proxy-flannel.md) +- [K3s : calico 서비스 프록시 & loxilb](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/service-proxy-calico.md) + +#### Kubernetes 인그레스로서의 loxilb +- [K3s: loxilb-ingress 실행 방법](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/loxilb-ingress.md) + +#### 독립형 모드에서 loxilb 실행 +- [독립형 모드에서 loxilb 실행](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/standalone.md) + +## 고급 가이드 +- [How-To : loxilb와 함께하는 서비스 그룹 존 설정](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/service-zones.md) +- [How-To : K8s 외부의 엔드포인트에 접근하기](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/ext-ep.md) +- [How-To : loxilb를 사용한 멀티 서버 K3s HA 배포](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k3s-multi-master.md) +- [How-To : AWS에서 멀티-AZ HA 지원과 함께 loxilb 배포](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/aws-multi-az.md) +- [How-To : ingress-nginx와 함께 loxilb 배포](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/loxilb-nginx-ingress.md) + +## 배경 지식 +- [eBPF란 무엇인가](ebpf.md) +- [k8s 서비스 - 로드 밸런서란 무엇인가](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/lb.md) +- [간단한 아키텍처](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/arch.md) +- [코드 조직](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/code.md) +- [loxilb의 eBPF 내부](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/loxilbebpf.md) +- [loxilb NAT 모드란 무엇인가](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/nat.md) +- [loxilb 로드 밸런서 알고리즘](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/lb-algo.md) +- [수동 빌드/실행 단계](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/run.md) +- [loxilb 디버깅](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/debugging.md) +- [loxicmd 커맨드 사용법](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/cmd.md) +- [loxicmd 개발자 가이드](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/cmd-dev.md) +- [loxilb API 개발자 가이드](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/api-dev.md) +- [API 참조 - loxilb 웹 API](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/api.md) +- [성능 보고서](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/perf.md) +- [개발 로드맵](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/roadmap.md) +- [기여하기](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/contribute.md) +- [시스템 요구 사항](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/requirements.md) +- [자주 묻는 질문(FAQ)](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/faq.md) +- [블로그](https://www.loxilb.io/blog) +- [데모 비디오](https://www.youtube.com/@loxilb697) + +## 커뮤니티 + +### Slack +loxilb 개발자 및 다른 loxilb 사용자와 채팅을 하려면 loxilb [Slack](https://www.loxilb.io/members) 채널에 가입하세요. 이곳은 loxilb에 대해 배우고, 질문을 하고, 협력작업을 하기에 좋은 장소입니다. + +### 일반 토론 +GitHub [토론](https://github.com/loxilb-io/loxilb/discussions)에 자유롭게 질문을 게시하세요. 문제나 버그가 발견되면 GitHub에서 [이슈](https://github.com/loxilb-io/loxilb/issues)를 제기해 주세요. loxilb 커뮤니티의 멤버들이 도와드릴 것입니다. + +## CICD 워크플로우 상태 + +| 기능(Ubuntu20.04) | 기능(Ubuntu22.04)| 기능(RedHat9)| +|:----------|:-------------|:-------------| +| ![build workflow](https://github.com/loxilb-io/loxilb/actions/workflows/docker-image.yml/badge.svg) | [![Docker-Multi-Arch](https://github.com/loxilb-io/loxilb/actions/workflows/docker-multiarch.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/docker-multiarch.yml) | [![SCTP-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity-rh9.yml) | +| ![simple workflow](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity.yml/badge.svg) | [![Sanity-CI-Ubuntu-22](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-ubuntu-22.yml) | [![Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-rh9.yml) | +| [![tcp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity.yml) | [![tcp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-ubuntu-22.yml) | [![TCP-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-rh9.yml) | +| [![udp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity.yml) | [![udp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-ubuntu-22.yml) | [![UDP-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-rh9.yml) | +| [![sctp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity.yml) | ![ipsec-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity-ubuntu-22.yml/badge.svg) | [![IPsec-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity-rh9.yml) | +| ![extlb workflow](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity.yml/badge.svg) | ![nat66-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity-ubuntu-22.yml/badge.svg) | [![NAT66-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity-rh9.yml) | +| ![ipsec-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity.yml/badge.svg) | [![Scale-Sanity-CI-Ubuntu-22](https://github.com/loxilb-io/loxilb/actions/workflows/scale-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/scale-sanity-ubuntu-22.yml) | [![Adv-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity-rh9.yml) | +| ![scale-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/scale-sanity.yml/badge.svg) | [![perf-CI](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml) | | +| [![liveness-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/liveness-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/liveness-sanity.yml) | | | +| ![nat66-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity.yml/badge.svg) | | | +| [![perf-CI](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml) | | | + +| K3s 테스트 | K8s 클러스터 테스트 | EKS 테스트 | +|:-------------|:-------------|:-------------| +|[![K3s-Base-Sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-base-sanity.yml/badge.svg?branch=main)](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-base-sanity.yml) | [![K8s-Calico-Cluster-IPVS-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k8s-calico-ipvs.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k8s-calico-ipvs.yml) | ![EKS](https://github.com/loxilb-io/loxilb/actions/workflows/eks.yaml/badge.svg?branch=main) | +| [![k3s-flannel-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-flannel.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-flannel.yml) | [![K8s-Calico-Cluster-IPVS2-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k8s-calico-ipvs2.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k8s-calico-ipvs2.yml) | | +| [![k3s-flannel-ubuntu22-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-flannel-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-flannel-ubuntu-22.yml) | [![K8s-Calico-Cluster-IPVS3-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k8s-calico-ipvs3.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k8s-calico-ipvs3.yml) | | +|[![k3s-flannel-cluster-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-flannel-cluster.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-flannel-cluster.yml) | [![K8s-Calico-Cluster-IPVS3-HA-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k8s-calico-ipvs3-ha.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k8s-calico-ipvs3-ha.yml) | | +| [![k3s-flannel-incluster-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-flannel-incluster.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-flannel-incluster.yml) | | | +|[![k3s-flannel-incluster-l2-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-flannel-incluster-l2.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-flannel-incluster-l2.yml) | | | +| [![k3s-calico-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-calico.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-calico.yml) | | | +| [![k3s-cilium-cluster-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-cilium-cluster.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-cilium-cluster.yml) | | +| [![k3s-sctpmh-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-sctpmh.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-sctpmh.yml) | | | +| [![k3s-sctpmh-ubuntu22-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-sctpmh-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-sctpmh-ubuntu22.yml) | | | +| [![k3s-sctpmh-2-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-sctpmh-2.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-sctpmh-2.yml) | | | + + +## 📚 자세한 정보는 loxilb [웹사이트](https://www.loxilb.io)를 확인하십시오. + +[docs-shield]: https://img.shields.io/badge/info-docs-blue +[docs-url]: https://loxilb-io.github.io/loxilbdocs/ +[slack=shield]: https://img.shields.io/badge/Community-Join%20Slack-blue +[slack-url]: https://www.loxilb.io/members diff --git a/README.md b/README.md index d6263a00..c0b15b75 100644 --- a/README.md +++ b/README.md @@ -5,20 +5,39 @@ ![apache](https://img.shields.io/badge/license-Apache-blue.svg) [![Info][docs-shield]][docs-url] [![Slack](https://img.shields.io/badge/community-join%20slack-blue)](https://join.slack.com/t/loxilb/shared_invite/zt-2b3xx14wg-P7WHj5C~OEON_jviF0ghcQ) ## What is loxilb -loxilb is an open source cloud-native load-balancer based on GoLang/eBPF with the goal of achieving cross-compatibility across a wide range of on-prem, public-cloud or hybrid K8s environments. +loxilb is an open source cloud-native load-balancer based on GoLang/eBPF with the goal of achieving cross-compatibility across a wide range of on-prem, public-cloud or hybrid K8s environments. loxilb is being developed to support the adoption of cloud-native tech in telco, mobility, and edge computing. ## Kubernetes with loxilb -Kubernetes defines many service constructs like cluster-ip, node-port, load-balancer etc for pod to pod, pod to service and service from outside communication. -
- -
+Kubernetes defines many service constructs like cluster-ip, node-port, load-balancer, ingress etc for pod to pod, pod to service and outside-world to service communication. -All these services are provided by load-balancers/proxies operating at Layer4/Layer7. Since Kubernetes's is highly modular, these services can be provided by different software modules. For example, kube-proxy is used by default to provide cluster-ip and node-port services. +![LoxiLB Cover](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/photos/loxilb-cover.png) -Service type load-balancer is usually provided by public cloud-provider(s) as a managed entity. But for on-prem and self-managed clusters, there are only a few good options available. Even for provider-managed K8s like EKS, there are many who would want to bring their own LB to clusters running anywhere. loxilb provides service type load-balancer as its main use-case. loxilb can be run in-cluster or ext-to-cluster as per user need. +All these services are provided by load-balancers/proxies operating at Layer4/Layer7. Since Kubernetes's is highly modular, these services can be provided by different software modules. For example, kube-proxy is used by default to provide cluster-ip and node-port services. For some services like LB and Ingress, no default is usually provided. -Additionally, loxilb can also support cluster-ip and node-port services, thereby providing full cluster-mesh implementation for Kubernetes (replacment of kube-proxy). +Service type load-balancer is usually provided by public cloud-provider(s) as a managed entity. But for on-prem and self-managed clusters, there are only a few good options available. Even for provider-managed K8s like EKS, there are many who would want to bring their own LB to clusters running anywhere. Additionally, Telco 5G and edge services introduce unique challenges due to the variety of exotic protocols involved, including GTP, SCTP, SRv6, SEPP, and DTLS, making seamless integration particularly challenging. loxilb provides service type load-balancer as its main use-case. loxilb can be run in-cluster or ext-to-cluster as per user need. + +loxilb works as a L4 load-balancer/service-proxy by default. Although L4 load-balancing provides great performance and functionality, an equally performant L7 load-balancer is also necessary in K8s for various use-cases. loxilb also supports L7 load-balancing in the form of Kubernetes Ingress implementation which is enhanced with eBPF sockmap helpers. This also benefit users who need L4 and L7 load-balancing under the same hood. + +Additionally, loxilb also supports: +- [x] kube-proxy replacement with eBPF(full cluster-mesh implementation for Kubernetes) +- [x] Ingress Support +- [x] Kubernetes Gateway API +- [ ] Kubernetes Network Policies + +## Telco-Cloud with loxilb +For deploying telco-cloud with cloud-native functions, loxilb can be used as an enhanced SCP(service communication proxy). SCP is a communication proxy defined by [3GPP](https://www.etsi.org/deliver/etsi_ts/129500_129599/129500/16.04.00_60/ts_129500v160400p.pdf) and aimed at telco micro-services running in cloud-native environment. Read more in this [blog](https://dev.to/nikhilmalik/5g-service-communication-proxy-with-loxilb-4242) +![image](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/photos/scp.svg) + +Telco-cloud requires load-balancing and communication across various interfaces/standards like N2, N4, E2(ORAN), S6x, 5GLAN, GTP etc. Each of these present its own unique challenges which loxilb aims to solve e.g.: +- N4 requires PFCP level session-intelligence +- N2 requires NGAP parsing capability(Related Blogs - [Blog-1](https://www.loxilb.io/post/ngap-load-balancing-with-loxilb), [Blog-2](https://futuredon.medium.com/5g-sctp-loadbalancer-using-loxilb-b525198a9103), [Blog-3](https://medium.com/@ben0978327139/5g-sctp-loadbalancer-using-loxilb-applying-on-free5gc-b5c05bb723f0)) +- S6x requires Diameter/SCTP multi-homing LB support(Related [Blog](https://www.loxilb.io/post/k8s-introducing-sctp-multihoming-functionality-with-loxilb)) +- MEC use-cases might require UL-CL understanding(Related [Blog](https://futuredon.medium.com/5g-uplink-classifier-using-loxilb-7593a4d66f4c)) +- Hitless failover support might be essential for mission-critical applications +- E2 might require SCTP-LB with OpenVPN bundled together +- SIP support is needed to enable cloud-native VOIP +- N32 requires support for Security Edge Protection Proxy(SEPP) ## Why choose loxilb? @@ -45,26 +64,14 @@ Additionally, loxilb can also support cluster-ip and node-port services, thereby - Stateful firewalling and IPSEC/Wireguard support - Optimized implementation for features like [Conntrack](https://thermalcircle.de/doku.php?id=blog:linux:connection_tracking_1_modules_and_hooks), QoS etc - Full compatibility for ipvs (ipvs policies can be auto inherited) -- Policy oriented L7 proxy support - HTTP1.0, 1.1, 2.0 etc (planned 🚧) +- Policy oriented L7 proxy support - HTTP1.0, 1.1, 2.0, 3.0 ## Components of loxilb - GoLang based control plane components - A scalable/efficient [eBPF](https://ebpf.io/) based data-path implementation - Integrated goBGP based routing stack -- A kubernetes agent [kube-loxilb](https://github.com/loxilb-io/kube-loxilb) written in Go - -## Layer4 Vs Layer7 -loxilb works as a L4 load-balancer/service-proxy by default. Although it provides great performance, at times, L7 load-balancing might become necessary in K8s. There are many good L7 proxies already available for K8s. Still, we are working on providing a great L7 solution natively in eBPF. It is a tough endeavor one which should reap great benefits once completed. Please keep an eye for updates on this. - -## Telco-Cloud with loxilb -For deploying telco-cloud with cloud-native functions, loxilb can be used as a SCP(service communication proxy). SCP is a communication proxy defined by [3GPP](https://www.etsi.org/deliver/etsi_ts/129500_129599/129500/16.04.00_60/ts_129500v160400p.pdf) and aimed at telco micro-services running in cloud-native environment. Telco-cloud requires load-balancing and communication across various interfaces/standards like N2, N4, E2(ORAN), S6x, 5GLAN, GTP etc. Each of these present its own unique challenges which loxilb aims to solve e.g.: -- N4 requires PFCP level session-intelligence -- N2 requires NGAP parsing capability -- S6x requires Diameter/SCTP multi-homing LB support -- MEC use-cases might require UL-CL understanding -- Hitless failover support might be essential for mission-critical applications -- E2 might require SCTP-LB with OpenVPN bundled together -- SIP support is needed to enable cloud-native VOIP +- A kubernetes operator [kube-loxilb](https://github.com/loxilb-io/kube-loxilb) written in Go +- A kubernetes ingress [implementation](https://github.com/loxilb-io/loxilb-ingress) ## Architectural Considerations - [Understanding loxilb modes and deployment in K8s with kube-loxilb](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/kube-loxilb.md) @@ -88,6 +95,9 @@ For deploying telco-cloud with cloud-native functions, loxilb can be used as a S - [K3s : loxilb service-proxy with flannel](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/service-proxy-flannel.md) - [K3s : loxilb service-proxy with calico](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/service-proxy-calico.md) +#### loxilb as Kubernetes Ingress +- [K3s: How to run loxilb-ingress](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/loxilb-ingress.md) + #### loxilb in standalone mode - [Run loxilb standalone](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/standalone.md) @@ -96,7 +106,8 @@ For deploying telco-cloud with cloud-native functions, loxilb can be used as a S - [How-To : Access end-points outside K8s](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/ext-ep.md) - [How-To : Deploy multi-server K3s HA with loxilb](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/k3s-multi-master.md) - [How-To : Deploy loxilb with multi-AZ HA support in AWS](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/aws-multi-az.md) -- [How-To : Deploy loxilb with Ingress](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/loxilb-nginx-ingress.md) +- [How-To : Deploy loxilb with multi-cloud HA support](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/multi-cloud-ha.md) +- [How-To : Deploy loxilb with ingress-nginx](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/loxilb-nginx-ingress.md) ## Knowledge-Base - [What is eBPF](ebpf.md) @@ -118,6 +129,7 @@ For deploying telco-cloud with cloud-native functions, loxilb can be used as a S - [System Requirements](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/requirements.md) - [Frequenctly Asked Questions- FAQs](https://github.com/loxilb-io/loxilbdocs/blob/main/docs/faq.md) - [Blogs](https://www.loxilb.io/blog) +- [Demo Videos](https://www.youtube.com/@loxilb697) ## Community @@ -127,21 +139,29 @@ Join the loxilb [Slack](https://www.loxilb.io/members) channel to chat with loxi ### General Discussion Feel free to post your queries in github [discussion](https://github.com/loxilb-io/loxilb/discussions). If you find any issue/bugs, please raise an [issue](https://github.com/loxilb-io/loxilb/issues) in github and members from loxilb community will be happy to help. +### Community Posts +- [5G SCTP Load Balancer using LoxiLB](https://futuredon.medium.com/5g-sctp-loadbalancer-using-loxilb-b525198a9103) +- [5G Uplink Classifier using LoxiLB](https://futuredon.medium.com/5g-uplink-classifier-using-loxilb-7593a4d66f4c) +- [5G SCTP Load Balancer with free5gc](https://medium.com/@ben0978327139/5g-sctp-loadbalancer-using-loxilb-applying-on-free5gc-b5c05bb723f0) +- [K8s - Bring load balancing to Multus workloads with LoxiLB](https://cloudybytes.medium.com/k8s-bringing-load-balancing-to-multus-workloads-with-loxilb-a0746f270abe) +- [K3s - Using LoxiLB as External Service Load Balancer](https://cloudybytes.medium.com/k3s-using-loxilb-as-external-service-lb-2ea4ce61e159) +- [Kubernetes Services - Achieving Optimal performance is elusive](https://cloudybytes.medium.com/kubernetes-services-achieving-optimal-performance-is-elusive-5def5183c281) + ## CICD Workflow Status -| Features(Ubuntu20.04) | Features(Ubuntu22.04)| Features(RedHat9)| -|:----------|:-------------|:-------------| -| ![build workflow](https://github.com/loxilb-io/loxilb/actions/workflows/docker-image.yml/badge.svg) | [![Docker-Multi-Arch](https://github.com/loxilb-io/loxilb/actions/workflows/docker-multiarch.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/docker-multiarch.yml) | [![SCTP-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity-rh9.yml) | -| ![simple workflow](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity.yml/badge.svg) | [![Sanity-CI-Ubuntu-22](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-ubuntu-22.yml) | [![Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-rh9.yml) | -| [![tcp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity.yml) | [![tcp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-ubuntu-22.yml) | [![TCP-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-rh9.yml) | -| [![udp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity.yml) | [![udp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-ubuntu-22.yml) | [![UDP-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-rh9.yml) | -| [![sctp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity.yml) | ![ipsec-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity-ubuntu-22.yml/badge.svg) | [![IPsec-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity-rh9.yml) | -| ![extlb workflow](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity.yml/badge.svg) | ![nat66-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity-ubuntu-22.yml/badge.svg) | [![NAT66-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity-rh9.yml) | -| ![ipsec-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity.yml/badge.svg) | [![Scale-Sanity-CI-Ubuntu-22](https://github.com/loxilb-io/loxilb/actions/workflows/scale-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/scale-sanity-ubuntu-22.yml) | [![Adv-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity-rh9.yml) | -| ![scale-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/scale-sanity.yml/badge.svg) | [![perf-CI](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml) | | -| [![liveness-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/liveness-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/liveness-sanity.yml) | | | -| ![nat66-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity.yml/badge.svg) | | | -| [![perf-CI](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml) | | | +| Features(Ubuntu20.04) | Features(Ubuntu22.04)| Features(Ubuntu24.04)| Features(RedHat9)| +|:----------|:-------------|:-------------|:-------------| +| [![build workflow](https://github.com/loxilb-io/loxilb/actions/workflows/docker-image.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/docker-image.yml) | [![Docker-Multi-Arch](https://github.com/loxilb-io/loxilb/actions/workflows/docker-multiarch.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/docker-multiarch.yml) | [![Docker-Multi-Arch](https://github.com/loxilb-io/loxilb/actions/workflows/docker-multiarch.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/docker-multiarch.yml) | [![Docker-Multi-Arch](https://github.com/loxilb-io/loxilb/actions/workflows/docker-multiarch.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/docker-multiarch.yml) | +| [![simple workflow](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity.yml) | [![Sanity-CI-Ubuntu-22](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-ubuntu-22.yml) | [![Sanity-CI-Ubuntu-24](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-ubuntu-24.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-ubuntu-24.yml) | [![Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/basic-sanity-rh9.yml) | +| [![tcp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity.yml) | [![tcp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-ubuntu-22.yml) | [![tcp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-ubuntu-24.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-ubuntu-24.yml) | [![TCP-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/tcp-sanity-rh9.yml) | +| [![udp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity.yml) | [![udp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-ubuntu-22.yml) | [![udp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-ubuntu-24.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-ubuntu-24.yml) | [![UDP-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/udp-sanity-rh9.yml) | +| [![sctp-lb-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity.yml) | [![SCTP-LB-Sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity-ubuntu-22.yml) | [![SCTP-LB-Sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity-ubuntu-24.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity-ubuntu-24.yml) |[![SCTP-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/sctp-sanity-rh9.yml) | +| [![extlb workflow](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity.yml)| [![extlb workflow](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity-ubuntu-22.yml) | [![extlb workflow](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity-ubuntu-24.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity-ubuntu-24.yml) | [![Adv-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/advanced-lb-sanity-rh9.yml)| +| [![nat66-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity.yml) | [![nat66-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity-ubuntu-22.yml) | [![nat66-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity-ubuntu-24.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity-ubuntu-24.yml) | [![NAT66-LB-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/nat66-sanity-rh9.yml) | +| [![ipsec-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity.yml) | [![ipsec-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity-ubuntu-22.yml) | [![ipsec-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity-ubuntu-24.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity-ubuntu-24.yml) | [![IPsec-Sanity-CI-RH9](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/ipsec-sanity-rh9.yml) | +| [![liveness-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/liveness-sanity.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/liveness-sanity.yml) | [![liveness-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/liveness-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/liveness-sanity-ubuntu-22.yml) | [![liveness-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/liveness-sanity-ubuntu-24.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/liveness-sanity-ubuntu-24.yml) | [![liveness-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/liveness-sanity-rh9.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/liveness-sanity-rh9.yml) | +|![scale-sanity-CI](https://github.com/loxilb-io/loxilb/actions/workflows/scale-sanity.yml/badge.svg) | [![Scale-Sanity-CI-Ubuntu-22](https://github.com/loxilb-io/loxilb/actions/workflows/scale-sanity-ubuntu-22.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/scale-sanity-ubuntu-22.yml) | [![Scale-Sanity-CI-Ubuntu-24](https://github.com/loxilb-io/loxilb/actions/workflows/scale-sanity-ubuntu-24.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/scale-sanity-ubuntu-24.yml) | | +|[![perf-CI](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml) | [![perf-CI](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml) |[![perf-CI](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/perf.yml) | | | K3s Tests | K8s Cluster Tests | EKS Test | |:-------------|:-------------|:-------------| @@ -158,9 +178,6 @@ Feel free to post your queries in github [discussion](https://github.com/loxilb- | [![k3s-sctpmh-2-CI](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-sctpmh-2.yml/badge.svg)](https://github.com/loxilb-io/loxilb/actions/workflows/k3s-sctpmh-2.yml) | | | - - - ## 📚 Please check loxilb [website](https://www.loxilb.io) for more detailed info. [docs-shield]: https://img.shields.io/badge/info-docs-blue diff --git a/api/loxinlp/nlp.go b/api/loxinlp/nlp.go index 4bdce6cf..bcc15c16 100644 --- a/api/loxinlp/nlp.go +++ b/api/loxinlp/nlp.go @@ -86,6 +86,8 @@ type NlH struct { IMap map[string]Intf BlackList string BLRgx *regexp.Regexp + WhiteList string + WLRgx *regexp.Regexp } var ( @@ -97,7 +99,12 @@ func NlpRegister(hook cmn.NetHookInterface) { hooks = hook } -func iSBlackListedIntf(name string, masterIdx int) bool { +func NlpIsBlackListedIntf(name string, masterIdx int) bool { + if nNl.WhiteList != "none" { + filter := nNl.WLRgx.MatchString(name) + return !filter + } + if name == "lo" { return true } @@ -1189,8 +1196,18 @@ func AddRoute(route nlp.Route) int { ipNet = *route.Dst } - ret, err := hooks.NetRouteAdd(&cmn.RouteMod{Protocol: int(route.Protocol), Flags: route.Flags, - Gw: route.Gw, LinkIndex: route.LinkIndex, Dst: ipNet}) + var gws []cmn.GWInfo + + if len(route.MultiPath) <= 0 { + gw := cmn.GWInfo{Gw: route.Gw, LinkIndex: route.LinkIndex} + gws = append(gws, gw) + } else { + for i := range route.MultiPath { + gws = append(gws, cmn.GWInfo{Gw: route.MultiPath[i].Gw, LinkIndex: route.MultiPath[i].LinkIndex}) + } + } + + ret, err := hooks.NetRouteAdd(&cmn.RouteMod{Protocol: int(route.Protocol), Flags: route.Flags, Dst: ipNet, GWs: gws}) if err != nil { if route.Gw != nil { tk.LogIt(tk.LogError, "[NLP] RT %s via %s proto %d add failed-%s\n", ipNet.String(), @@ -1279,7 +1296,7 @@ func DelRoute(route nlp.Route) int { func LUWorkSingle(m nlp.LinkUpdate) int { var ret int - if iSBlackListedIntf(m.Link.Attrs().Name, m.Link.Attrs().MasterIndex) { + if NlpIsBlackListedIntf(m.Link.Attrs().Name, m.Link.Attrs().MasterIndex) { return -1 } @@ -1332,7 +1349,7 @@ func NUWorkSingle(m nlp.NeighUpdate) int { return -1 } - if iSBlackListedIntf(link.Attrs().Name, link.Attrs().MasterIndex) { + if NlpIsBlackListedIntf(link.Attrs().Name, link.Attrs().MasterIndex) { return -1 } @@ -1350,14 +1367,28 @@ func NUWorkSingle(m nlp.NeighUpdate) int { func RUWorkSingle(m nlp.RouteUpdate) int { var ret int - link, err := nlp.LinkByIndex(m.LinkIndex) - if err != nil { - fmt.Println(err) - return -1 - } + if len(m.MultiPath) <= 0 { + link, err := nlp.LinkByIndex(m.LinkIndex) + if err != nil { + tk.LogIt(tk.LogError, "RUWorkSingle: link find error %s\n", err) + return -1 + } - if iSBlackListedIntf(link.Attrs().Name, link.Attrs().MasterIndex) { - return -1 + if NlpIsBlackListedIntf(link.Attrs().Name, link.Attrs().MasterIndex) { + return -1 + } + } else { + for _, path := range m.MultiPath { + link, err := nlp.LinkByIndex(path.LinkIndex) + if err != nil { + tk.LogIt(tk.LogError, "RUWorkSingle: link find error %s\n", err) + return -1 + } + + if NlpIsBlackListedIntf(link.Attrs().Name, link.Attrs().MasterIndex) { + return -1 + } + } } if skipIfRoute { @@ -1430,7 +1461,7 @@ func NLWorker(nNl *NlH, bgpPeerMode bool, ch chan bool, wch chan bool) { defer func() { if e := recover(); e != nil { - tk.LogIt(tk.LogCritical, "%s: %s", e, debug.Stack()) + tk.LogIt(tk.LogCritical, "%s: %s\n", e, debug.Stack()) } hooks.NetHandlePanic() os.Exit(1) @@ -1458,7 +1489,7 @@ func GetBridges() { return } for _, link := range links { - if iSBlackListedIntf(link.Attrs().Name, link.Attrs().MasterIndex) { + if NlpIsBlackListedIntf(link.Attrs().Name, link.Attrs().MasterIndex) { continue } switch link.(type) { @@ -1484,7 +1515,7 @@ func NlpGet(ch chan bool) int { for _, link := range links { - if iSBlackListedIntf(link.Attrs().Name, link.Attrs().MasterIndex) { + if NlpIsBlackListedIntf(link.Attrs().Name, link.Attrs().MasterIndex) { continue } @@ -1496,7 +1527,7 @@ func NlpGet(ch chan bool) int { for _, link := range links { - if iSBlackListedIntf(link.Attrs().Name, link.Attrs().MasterIndex) { + if NlpIsBlackListedIntf(link.Attrs().Name, link.Attrs().MasterIndex) { // Need addresss to work with addrs, err := nlp.AddrList(link, nlp.FAMILY_ALL) if err != nil { @@ -1559,25 +1590,28 @@ func NlpGet(ch chan bool) int { AddNeigh(neigh, link) } } + } - /* Get Routes */ - routes, err := nlp.RouteList(link, nlp.FAMILY_ALL) - if err != nil { - tk.LogIt(tk.LogError, "[NLP] Error getting route list %v\n", err) - } + /* Get Routes */ + routes, err := nlp.RouteList(nil, nlp.FAMILY_ALL) + if err != nil { + tk.LogIt(tk.LogError, "[NLP] Error getting route list %v\n", err) + } - if len(routes) == 0 { - tk.LogIt(tk.LogDebug, "[NLP] No STATIC routes found for intf %s\n", link.Attrs().Name) - } else { - for _, route := range routes { - if skipIfRoute { - if route.Scope.String() == "link" && tk.IsNetIPv4(route.Dst.IP.String()) { - continue - } + if len(routes) == 0 { + tk.LogIt(tk.LogDebug, "[NLP] No STATIC routes found\n") + } else { + for _, route := range routes { + var m nlp.RouteUpdate + if skipIfRoute { + if route.Scope.String() == "link" && tk.IsNetIPv4(route.Dst.IP.String()) { + continue } - - AddRoute(route) } + m.Type = syscall.RTM_NEWROUTE + m.Route = route + + RUWorkSingle(m) } } tk.LogIt(tk.LogInfo, "[NLP] nlp get done\n") @@ -1642,12 +1676,14 @@ func LbSessionGet(done bool) int { return 0 } -func NlpInit(bgpPeerMode bool, blackList string, ipvsCompat bool) *NlH { +func NlpInit(bgpPeerMode bool, blackList, whitelist string, ipvsCompat bool) *NlH { nNl = new(NlH) nNl.BlackList = blackList nNl.BLRgx = regexp.MustCompile(blackList) + nNl.WhiteList = whitelist + nNl.WLRgx = regexp.MustCompile(whitelist) checkInit := make(chan bool) waitInit := make(chan bool) diff --git a/api/models/loadbalance_entry.go b/api/models/loadbalance_entry.go index 79cb9f18..a7920530 100644 --- a/api/models/loadbalance_entry.go +++ b/api/models/loadbalance_entry.go @@ -315,9 +315,12 @@ type LoadbalanceEntryServiceArguments struct { // block-number if any of this LB entry Block uint16 `json:"block,omitempty"` - // IP address for externel access + // IP address for external access ExternalIP string `json:"externalIP,omitempty"` + // Ingress specific host URL path + Host string `json:"host,omitempty"` + // value for inactivity timeout (in seconds) InactiveTimeOut int32 `json:"inactiveTimeOut,omitempty"` @@ -339,6 +342,9 @@ type LoadbalanceEntryServiceArguments struct { // port number for the access Port int64 `json:"port,omitempty"` + // private IP (NAT'd) address for external access + PrivateIP string `json:"privateIP,omitempty"` + // value for probe retries ProbeRetries int32 `json:"probeRetries,omitempty"` diff --git a/api/models/route_get_entry.go b/api/models/route_get_entry.go index f83534b4..e0c59c4c 100644 --- a/api/models/route_get_entry.go +++ b/api/models/route_get_entry.go @@ -32,7 +32,7 @@ type RouteGetEntry struct { HardwareMark int64 `json:"hardwareMark,omitempty"` // Route protocol - Protocol int64 `json:"protocol,omitempty"` + Protocol string `json:"protocol,omitempty"` // statistic Statistic *RouteGetEntryStatistic `json:"statistic,omitempty"` diff --git a/api/restapi/configure_loxilb_rest_api.go b/api/restapi/configure_loxilb_rest_api.go index df3fd873..8d2735c4 100644 --- a/api/restapi/configure_loxilb_rest_api.go +++ b/api/restapi/configure_loxilb_rest_api.go @@ -60,7 +60,8 @@ func configureAPI(api *operations.LoxilbRestAPIAPI) http.Handler { // Load balancer add and delete and get api.PostConfigLoadbalancerHandler = operations.PostConfigLoadbalancerHandlerFunc(handler.ConfigPostLoadbalancer) - api.DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoHandler = operations.DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoHandlerFunc(handler.ConfigDeleteLoadbalancer) + api.DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandler = operations.DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandlerFunc(handler.ConfigDeleteLoadbalancer) + api.DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoHandler = operations.DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoHandlerFunc(handler.ConfigDeleteLoadbalancerWithoutPath) api.GetConfigLoadbalancerAllHandler = operations.GetConfigLoadbalancerAllHandlerFunc(handler.ConfigGetLoadbalancer) api.DeleteConfigLoadbalancerAllHandler = operations.DeleteConfigLoadbalancerAllHandlerFunc(handler.ConfigDeleteAllLoadbalancer) api.DeleteConfigLoadbalancerNameLbNameHandler = operations.DeleteConfigLoadbalancerNameLbNameHandlerFunc(handler.ConfigDeleteLoadbalancerByName) diff --git a/api/restapi/embedded_spec.go b/api/restapi/embedded_spec.go index ffc261a1..2ea83877 100644 --- a/api/restapi/embedded_spec.go +++ b/api/restapi/embedded_spec.go @@ -2124,6 +2124,101 @@ func init() { } } }, + "/config/loadbalancer/hosturl/{hosturl}/externalipaddress/{ip_address}/port/{port}/protocol/{proto}": { + "delete": { + "description": "Delete an existing load balancer service with .", + "summary": "Delete an existing Load balancer service", + "parameters": [ + { + "type": "string", + "description": "Attributes for load balance service", + "name": "hosturl", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Attributes for load balance service", + "name": "ip_address", + "in": "path", + "required": true + }, + { + "type": "number", + "description": "Attributes for load balance service", + "name": "port", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Attributes for load balance service", + "name": "proto", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "option for BGP enable", + "name": "bgp", + "in": "query" + }, + { + "type": "number", + "description": "block value if any", + "name": "block", + "in": "query" + } + ], + "responses": { + "204": { + "description": "OK" + }, + "400": { + "description": "Malformed arguments for API call", + "schema": { + "$ref": "#/definitions/Error" + } + }, + "401": { + "description": "Invalid authentication credentials", + "schema": { + "$ref": "#/definitions/Error" + } + }, + "403": { + "description": "Capacity insufficient", + "schema": { + "$ref": "#/definitions/Error" + } + }, + "404": { + "description": "Resource not found", + "schema": { + "$ref": "#/definitions/Error" + } + }, + "409": { + "description": "Resource Conflict. VLAN already exists OR dependency VRF/VNET not found", + "schema": { + "$ref": "#/definitions/Error" + } + }, + "500": { + "description": "Internal service error", + "schema": { + "$ref": "#/definitions/Error" + } + }, + "503": { + "description": "Maintanence mode", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + } + }, "/config/loadbalancer/name/{lb_name}": { "delete": { "description": "Delete an existing load balancer service with name.", @@ -4920,7 +5015,11 @@ func init() { "format": "uint16" }, "externalIP": { - "description": "IP address for externel access", + "description": "IP address for external access", + "type": "string" + }, + "host": { + "description": "Ingress specific host URL path", "type": "string" }, "inactiveTimeOut": { @@ -4954,6 +5053,10 @@ func init() { "description": "port number for the access", "type": "integer" }, + "privateIP": { + "description": "private IP (NAT'd) address for external access", + "type": "string" + }, "probeRetries": { "description": "value for probe retries", "type": "integer", @@ -5441,7 +5544,7 @@ func init() { }, "protocol": { "description": "Route protocol", - "type": "integer" + "type": "string" }, "statistic": { "type": "object", @@ -7732,6 +7835,101 @@ func init() { } } }, + "/config/loadbalancer/hosturl/{hosturl}/externalipaddress/{ip_address}/port/{port}/protocol/{proto}": { + "delete": { + "description": "Delete an existing load balancer service with .", + "summary": "Delete an existing Load balancer service", + "parameters": [ + { + "type": "string", + "description": "Attributes for load balance service", + "name": "hosturl", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Attributes for load balance service", + "name": "ip_address", + "in": "path", + "required": true + }, + { + "type": "number", + "description": "Attributes for load balance service", + "name": "port", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Attributes for load balance service", + "name": "proto", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "option for BGP enable", + "name": "bgp", + "in": "query" + }, + { + "type": "number", + "description": "block value if any", + "name": "block", + "in": "query" + } + ], + "responses": { + "204": { + "description": "OK" + }, + "400": { + "description": "Malformed arguments for API call", + "schema": { + "$ref": "#/definitions/Error" + } + }, + "401": { + "description": "Invalid authentication credentials", + "schema": { + "$ref": "#/definitions/Error" + } + }, + "403": { + "description": "Capacity insufficient", + "schema": { + "$ref": "#/definitions/Error" + } + }, + "404": { + "description": "Resource not found", + "schema": { + "$ref": "#/definitions/Error" + } + }, + "409": { + "description": "Resource Conflict. VLAN already exists OR dependency VRF/VNET not found", + "schema": { + "$ref": "#/definitions/Error" + } + }, + "500": { + "description": "Internal service error", + "schema": { + "$ref": "#/definitions/Error" + } + }, + "503": { + "description": "Maintanence mode", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + } + }, "/config/loadbalancer/name/{lb_name}": { "delete": { "description": "Delete an existing load balancer service with name.", @@ -10957,7 +11155,11 @@ func init() { "format": "uint16" }, "externalIP": { - "description": "IP address for externel access", + "description": "IP address for external access", + "type": "string" + }, + "host": { + "description": "Ingress specific host URL path", "type": "string" }, "inactiveTimeOut": { @@ -10991,6 +11193,10 @@ func init() { "description": "port number for the access", "type": "integer" }, + "privateIP": { + "description": "private IP (NAT'd) address for external access", + "type": "string" + }, "probeRetries": { "description": "value for probe retries", "type": "integer", @@ -11084,7 +11290,11 @@ func init() { "format": "uint16" }, "externalIP": { - "description": "IP address for externel access", + "description": "IP address for external access", + "type": "string" + }, + "host": { + "description": "Ingress specific host URL path", "type": "string" }, "inactiveTimeOut": { @@ -11118,6 +11328,10 @@ func init() { "description": "port number for the access", "type": "integer" }, + "privateIP": { + "description": "private IP (NAT'd) address for external access", + "type": "string" + }, "probeRetries": { "description": "value for probe retries", "type": "integer", @@ -11859,7 +12073,7 @@ func init() { }, "protocol": { "description": "Route protocol", - "type": "integer" + "type": "string" }, "statistic": { "type": "object", diff --git a/api/restapi/handler/loadbalancer.go b/api/restapi/handler/loadbalancer.go index c1110b0a..585abdfc 100644 --- a/api/restapi/handler/loadbalancer.go +++ b/api/restapi/handler/loadbalancer.go @@ -29,6 +29,7 @@ func ConfigPostLoadbalancer(params operations.PostConfigLoadbalancerParams) midd var lbRules cmn.LbRuleMod lbRules.Serv.ServIP = params.Attr.ServiceArguments.ExternalIP + lbRules.Serv.PrivateIP = params.Attr.ServiceArguments.PrivateIP lbRules.Serv.ServPort = uint16(params.Attr.ServiceArguments.Port) lbRules.Serv.Proto = params.Attr.ServiceArguments.Protocol lbRules.Serv.BlockNum = params.Attr.ServiceArguments.Block @@ -47,6 +48,7 @@ func ConfigPostLoadbalancer(params operations.PostConfigLoadbalancerParams) midd lbRules.Serv.ProbeRetries = int(params.Attr.ServiceArguments.ProbeRetries) lbRules.Serv.Name = params.Attr.ServiceArguments.Name lbRules.Serv.Oper = cmn.LBOp(params.Attr.ServiceArguments.Oper) + lbRules.Serv.HostUrl = params.Attr.ServiceArguments.Host if lbRules.Serv.Proto == "sctp" { for _, data := range params.Attr.SecondaryIPs { @@ -77,7 +79,7 @@ func ConfigPostLoadbalancer(params operations.PostConfigLoadbalancerParams) midd return &ResultResponse{Result: "Success"} } -func ConfigDeleteLoadbalancer(params operations.DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoParams) middleware.Responder { +func ConfigDeleteLoadbalancer(params operations.DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams) middleware.Responder { tk.LogIt(tk.LogDebug, "[API] Load balancer %s API called. url : %s\n", params.HTTPRequest.Method, params.HTTPRequest.URL) var lbServ cmn.LbServiceArg @@ -85,6 +87,11 @@ func ConfigDeleteLoadbalancer(params operations.DeleteConfigLoadbalancerExternal lbServ.ServIP = params.IPAddress lbServ.ServPort = uint16(params.Port) lbServ.Proto = params.Proto + if params.Hosturl == "any" { + lbServ.HostUrl = "" + } else { + lbServ.HostUrl = params.Hosturl + } if params.Block != nil { lbServ.BlockNum = uint16(*params.Block) } @@ -102,6 +109,32 @@ func ConfigDeleteLoadbalancer(params operations.DeleteConfigLoadbalancerExternal return &ResultResponse{Result: "Success"} } +func ConfigDeleteLoadbalancerWithoutPath(params operations.DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoParams) middleware.Responder { + tk.LogIt(tk.LogDebug, "[API] Load balancer %s API called. url : %s\n", params.HTTPRequest.Method, params.HTTPRequest.URL) + + var lbServ cmn.LbServiceArg + var lbRules cmn.LbRuleMod + lbServ.ServIP = params.IPAddress + lbServ.ServPort = uint16(params.Port) + lbServ.Proto = params.Proto + lbServ.HostUrl = "" + if params.Block != nil { + lbServ.BlockNum = uint16(*params.Block) + } + if params.Bgp != nil { + lbServ.Bgp = *params.Bgp + } + + lbRules.Serv = lbServ + tk.LogIt(tk.LogDebug, "[API] lbRules (w/o Path): %v\n", lbRules) + _, err := ApiHooks.NetLbRuleDel(&lbRules) + if err != nil { + tk.LogIt(tk.LogDebug, "[API] Error occur : %v\n", err) + return &ResultResponse{Result: err.Error()} + } + return &ResultResponse{Result: "Success"} +} + func ConfigGetLoadbalancer(params operations.GetConfigLoadbalancerAllParams) middleware.Responder { // Get LB rules tk.LogIt(tk.LogDebug, "[API] Load balancer %s API called. url : %s\n", params.HTTPRequest.Method, params.HTTPRequest.URL) @@ -133,6 +166,7 @@ func ConfigGetLoadbalancer(params operations.GetConfigLoadbalancerAllParams) mid tmpSvc.Probeport = lb.Serv.ProbePort tmpSvc.Name = lb.Serv.Name tmpSvc.Snat = lb.Serv.Snat + tmpSvc.Host = lb.Serv.HostUrl tmpLB.ServiceArguments = &tmpSvc diff --git a/api/restapi/handler/route.go b/api/restapi/handler/route.go index c911bfe7..7d4515f8 100644 --- a/api/restapi/handler/route.go +++ b/api/restapi/handler/route.go @@ -17,13 +17,13 @@ package handler import ( "fmt" - "strings" - "github.com/go-openapi/runtime/middleware" "github.com/loxilb-io/loxilb/api/loxinlp" "github.com/loxilb-io/loxilb/api/models" "github.com/loxilb-io/loxilb/api/restapi/operations" tk "github.com/loxilb-io/loxilib" + "strconv" + "strings" ) func ConfigPostRoute(params operations.PostConfigRouteParams) middleware.Responder { @@ -58,7 +58,20 @@ func ConfigGetRoute(params operations.GetConfigRouteAllParams) middleware.Respon tmpResult.Flags = strings.TrimSpace(route.Flags) tmpResult.Gateway = route.Gw tmpResult.HardwareMark = int64(route.HardwareMark) - tmpResult.Protocol = int64(route.Protocol) + protoStr := strconv.Itoa(route.Protocol) + switch route.Protocol { + case 0: + protoStr = "unspec" + case 1: + protoStr = "redirect" + case 2: + protoStr = "kernel" + case 3: + protoStr = "boot" + case 4: + protoStr = "static" + } + tmpResult.Protocol = protoStr tmpResult.Sync = int64(route.Sync) tmpStats := new(models.RouteGetEntryStatistic) diff --git a/api/restapi/operations/delete_config_loadbalancer_hosturl_hosturl_externalipaddress_ip_address_port_port_protocol_proto.go b/api/restapi/operations/delete_config_loadbalancer_hosturl_hosturl_externalipaddress_ip_address_port_port_protocol_proto.go new file mode 100644 index 00000000..d8f7773b --- /dev/null +++ b/api/restapi/operations/delete_config_loadbalancer_hosturl_hosturl_externalipaddress_ip_address_port_port_protocol_proto.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" +) + +// DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandlerFunc turns a function with the right signature into a delete config loadbalancer hosturl hosturl externalipaddress IP address port port protocol proto handler +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandlerFunc func(DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams) middleware.Responder + +// Handle executing the request and returning a response +func (fn DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandlerFunc) Handle(params DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams) middleware.Responder { + return fn(params) +} + +// DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandler interface for that can handle valid delete config loadbalancer hosturl hosturl externalipaddress IP address port port protocol proto params +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandler interface { + Handle(DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams) middleware.Responder +} + +// NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProto creates a new http.Handler for the delete config loadbalancer hosturl hosturl externalipaddress IP address port port protocol proto operation +func NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProto(ctx *middleware.Context, handler DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandler) *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProto { + return &DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProto{Context: ctx, Handler: handler} +} + +/* + DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProto swagger:route DELETE /config/loadbalancer/hosturl/{hosturl}/externalipaddress/{ip_address}/port/{port}/protocol/{proto} deleteConfigLoadbalancerHosturlHosturlExternalipaddressIpAddressPortPortProtocolProto + +# Delete an existing Load balancer service + +Delete an existing load balancer service with . +*/ +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProto struct { + Context *middleware.Context + Handler DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandler +} + +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProto) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams() + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/api/restapi/operations/delete_config_loadbalancer_hosturl_hosturl_externalipaddress_ip_address_port_port_protocol_proto_parameters.go b/api/restapi/operations/delete_config_loadbalancer_hosturl_hosturl_externalipaddress_ip_address_port_port_protocol_proto_parameters.go new file mode 100644 index 00000000..4caef334 --- /dev/null +++ b/api/restapi/operations/delete_config_loadbalancer_hosturl_hosturl_externalipaddress_ip_address_port_port_protocol_proto_parameters.go @@ -0,0 +1,216 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams creates a new DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams object +// +// There are no default values defined in the spec. +func NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams() DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams { + + return DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams{} +} + +// DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams contains all the bound params for the delete config loadbalancer hosturl hosturl externalipaddress IP address port port protocol proto operation +// typically these are obtained from a http.Request +// +// swagger:parameters DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProto +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*option for BGP enable + In: query + */ + Bgp *bool + /*block value if any + In: query + */ + Block *float64 + /*Attributes for load balance service + Required: true + In: path + */ + Hosturl string + /*Attributes for load balance service + Required: true + In: path + */ + IPAddress string + /*Attributes for load balance service + Required: true + In: path + */ + Port float64 + /*Attributes for load balance service + Required: true + In: path + */ + Proto string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams() beforehand. +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + qBgp, qhkBgp, _ := qs.GetOK("bgp") + if err := o.bindBgp(qBgp, qhkBgp, route.Formats); err != nil { + res = append(res, err) + } + + qBlock, qhkBlock, _ := qs.GetOK("block") + if err := o.bindBlock(qBlock, qhkBlock, route.Formats); err != nil { + res = append(res, err) + } + + rHosturl, rhkHosturl, _ := route.Params.GetOK("hosturl") + if err := o.bindHosturl(rHosturl, rhkHosturl, route.Formats); err != nil { + res = append(res, err) + } + + rIPAddress, rhkIPAddress, _ := route.Params.GetOK("ip_address") + if err := o.bindIPAddress(rIPAddress, rhkIPAddress, route.Formats); err != nil { + res = append(res, err) + } + + rPort, rhkPort, _ := route.Params.GetOK("port") + if err := o.bindPort(rPort, rhkPort, route.Formats); err != nil { + res = append(res, err) + } + + rProto, rhkProto, _ := route.Params.GetOK("proto") + if err := o.bindProto(rProto, rhkProto, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindBgp binds and validates parameter Bgp from query. +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams) bindBgp(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + + value, err := swag.ConvertBool(raw) + if err != nil { + return errors.InvalidType("bgp", "query", "bool", raw) + } + o.Bgp = &value + + return nil +} + +// bindBlock binds and validates parameter Block from query. +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams) bindBlock(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + + value, err := swag.ConvertFloat64(raw) + if err != nil { + return errors.InvalidType("block", "query", "float64", raw) + } + o.Block = &value + + return nil +} + +// bindHosturl binds and validates parameter Hosturl from path. +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams) bindHosturl(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.Hosturl = raw + + return nil +} + +// bindIPAddress binds and validates parameter IPAddress from path. +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams) bindIPAddress(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.IPAddress = raw + + return nil +} + +// bindPort binds and validates parameter Port from path. +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams) bindPort(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + value, err := swag.ConvertFloat64(raw) + if err != nil { + return errors.InvalidType("port", "path", "float64", raw) + } + o.Port = value + + return nil +} + +// bindProto binds and validates parameter Proto from path. +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams) bindProto(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.Proto = raw + + return nil +} diff --git a/api/restapi/operations/delete_config_loadbalancer_hosturl_hosturl_externalipaddress_ip_address_port_port_protocol_proto_responses.go b/api/restapi/operations/delete_config_loadbalancer_hosturl_hosturl_externalipaddress_ip_address_port_port_protocol_proto_responses.go new file mode 100644 index 00000000..682cbbf7 --- /dev/null +++ b/api/restapi/operations/delete_config_loadbalancer_hosturl_hosturl_externalipaddress_ip_address_port_port_protocol_proto_responses.go @@ -0,0 +1,354 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/loxilb-io/loxilb/api/models" +) + +// DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNoContentCode is the HTTP code returned for type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNoContent +const DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNoContentCode int = 204 + +/* +DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNoContent OK + +swagger:response deleteConfigLoadbalancerHosturlHosturlExternalipaddressIpAddressPortPortProtocolProtoNoContent +*/ +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNoContent struct { +} + +// NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNoContent creates DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNoContent with default headers values +func NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNoContent() *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNoContent { + + return &DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNoContent{} +} + +// WriteResponse to the client +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequestCode is the HTTP code returned for type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequest +const DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequestCode int = 400 + +/* +DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequest Malformed arguments for API call + +swagger:response deleteConfigLoadbalancerHosturlHosturlExternalipaddressIpAddressPortPortProtocolProtoBadRequest +*/ +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequest struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequest creates DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequest with default headers values +func NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequest() *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequest { + + return &DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequest{} +} + +// WithPayload adds the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto bad request response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequest) WithPayload(payload *models.Error) *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto bad request response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequest) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorizedCode is the HTTP code returned for type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorized +const DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorizedCode int = 401 + +/* +DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorized Invalid authentication credentials + +swagger:response deleteConfigLoadbalancerHosturlHosturlExternalipaddressIpAddressPortPortProtocolProtoUnauthorized +*/ +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorized struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorized creates DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorized with default headers values +func NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorized() *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorized { + + return &DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorized{} +} + +// WithPayload adds the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto unauthorized response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorized) WithPayload(payload *models.Error) *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorized { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto unauthorized response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorized) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(401) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbiddenCode is the HTTP code returned for type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbidden +const DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbiddenCode int = 403 + +/* +DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbidden Capacity insufficient + +swagger:response deleteConfigLoadbalancerHosturlHosturlExternalipaddressIpAddressPortPortProtocolProtoForbidden +*/ +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbidden struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbidden creates DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbidden with default headers values +func NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbidden() *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbidden { + + return &DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbidden{} +} + +// WithPayload adds the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto forbidden response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbidden) WithPayload(payload *models.Error) *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto forbidden response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbidden) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFoundCode is the HTTP code returned for type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFound +const DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFoundCode int = 404 + +/* +DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFound Resource not found + +swagger:response deleteConfigLoadbalancerHosturlHosturlExternalipaddressIpAddressPortPortProtocolProtoNotFound +*/ +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFound struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFound creates DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFound with default headers values +func NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFound() *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFound { + + return &DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFound{} +} + +// WithPayload adds the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto not found response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFound) WithPayload(payload *models.Error) *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto not found response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFound) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflictCode is the HTTP code returned for type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflict +const DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflictCode int = 409 + +/* +DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflict Resource Conflict. VLAN already exists OR dependency VRF/VNET not found + +swagger:response deleteConfigLoadbalancerHosturlHosturlExternalipaddressIpAddressPortPortProtocolProtoConflict +*/ +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflict struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflict creates DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflict with default headers values +func NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflict() *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflict { + + return &DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflict{} +} + +// WithPayload adds the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto conflict response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflict) WithPayload(payload *models.Error) *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflict { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto conflict response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflict) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(409) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerErrorCode is the HTTP code returned for type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerError +const DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerErrorCode int = 500 + +/* +DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerError Internal service error + +swagger:response deleteConfigLoadbalancerHosturlHosturlExternalipaddressIpAddressPortPortProtocolProtoInternalServerError +*/ +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerError struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerError creates DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerError with default headers values +func NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerError() *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerError { + + return &DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerError{} +} + +// WithPayload adds the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto internal server error response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerError) WithPayload(payload *models.Error) *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto internal server error response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerError) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailableCode is the HTTP code returned for type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailable +const DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailableCode int = 503 + +/* +DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailable Maintanence mode + +swagger:response deleteConfigLoadbalancerHosturlHosturlExternalipaddressIpAddressPortPortProtocolProtoServiceUnavailable +*/ +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailable struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailable creates DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailable with default headers values +func NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailable() *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailable { + + return &DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailable{} +} + +// WithPayload adds the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto service unavailable response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailable) WithPayload(payload *models.Error) *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailable { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete config loadbalancer hosturl hosturl externalipaddress Ip address port port protocol proto service unavailable response +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailable) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoServiceUnavailable) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(503) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/api/restapi/operations/delete_config_loadbalancer_hosturl_hosturl_externalipaddress_ip_address_port_port_protocol_proto_urlbuilder.go b/api/restapi/operations/delete_config_loadbalancer_hosturl_hosturl_externalipaddress_ip_address_port_port_protocol_proto_urlbuilder.go new file mode 100644 index 00000000..f6e9f09e --- /dev/null +++ b/api/restapi/operations/delete_config_loadbalancer_hosturl_hosturl_externalipaddress_ip_address_port_port_protocol_proto_urlbuilder.go @@ -0,0 +1,148 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/swag" +) + +// DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL generates an URL for the delete config loadbalancer hosturl hosturl externalipaddress IP address port port protocol proto operation +type DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL struct { + Hosturl string + IPAddress string + Port float64 + Proto string + + Bgp *bool + Block *float64 + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL) WithBasePath(bp string) *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/config/loadbalancer/hosturl/{hosturl}/externalipaddress/{ip_address}/port/{port}/protocol/{proto}" + + hosturl := o.Hosturl + if hosturl != "" { + _path = strings.Replace(_path, "{hosturl}", hosturl, -1) + } else { + return nil, errors.New("hosturl is required on DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL") + } + + iPAddress := o.IPAddress + if iPAddress != "" { + _path = strings.Replace(_path, "{ip_address}", iPAddress, -1) + } else { + return nil, errors.New("ipAddress is required on DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL") + } + + port := swag.FormatFloat64(o.Port) + if port != "" { + _path = strings.Replace(_path, "{port}", port, -1) + } else { + return nil, errors.New("port is required on DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL") + } + + proto := o.Proto + if proto != "" { + _path = strings.Replace(_path, "{proto}", proto, -1) + } else { + return nil, errors.New("proto is required on DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/netlox/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var bgpQ string + if o.Bgp != nil { + bgpQ = swag.FormatBool(*o.Bgp) + } + if bgpQ != "" { + qs.Set("bgp", bgpQ) + } + + var blockQ string + if o.Block != nil { + blockQ = swag.FormatFloat64(*o.Block) + } + if blockQ != "" { + qs.Set("block", blockQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/api/restapi/operations/loxilb_rest_api_api.go b/api/restapi/operations/loxilb_rest_api_api.go index 088e4243..4d0909b7 100644 --- a/api/restapi/operations/loxilb_rest_api_api.go +++ b/api/restapi/operations/loxilb_rest_api_api.go @@ -75,6 +75,9 @@ func NewLoxilbRestAPIAPI(spec *loads.Document) *LoxilbRestAPIAPI { DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoHandler: DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoHandlerFunc(func(params DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoParams) middleware.Responder { return middleware.NotImplemented("operation DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProto has not yet been implemented") }), + DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandler: DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandlerFunc(func(params DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoParams) middleware.Responder { + return middleware.NotImplemented("operation DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProto has not yet been implemented") + }), DeleteConfigLoadbalancerNameLbNameHandler: DeleteConfigLoadbalancerNameLbNameHandlerFunc(func(params DeleteConfigLoadbalancerNameLbNameParams) middleware.Responder { return middleware.NotImplemented("operation DeleteConfigLoadbalancerNameLbName has not yet been implemented") }), @@ -310,6 +313,8 @@ type LoxilbRestAPIAPI struct { DeleteConfigLoadbalancerAllHandler DeleteConfigLoadbalancerAllHandler // DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoHandler sets the operation handler for the delete config loadbalancer externalipaddress IP address port port protocol proto operation DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoHandler DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoHandler + // DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandler sets the operation handler for the delete config loadbalancer hosturl hosturl externalipaddress IP address port port protocol proto operation + DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandler DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandler // DeleteConfigLoadbalancerNameLbNameHandler sets the operation handler for the delete config loadbalancer name lb name operation DeleteConfigLoadbalancerNameLbNameHandler DeleteConfigLoadbalancerNameLbNameHandler // DeleteConfigMirrorIdentIdentHandler sets the operation handler for the delete config mirror ident ident operation @@ -538,6 +543,9 @@ func (o *LoxilbRestAPIAPI) Validate() error { if o.DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoHandler == nil { unregistered = append(unregistered, "DeleteConfigLoadbalancerExternalipaddressIPAddressPortPortProtocolProtoHandler") } + if o.DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandler == nil { + unregistered = append(unregistered, "DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandler") + } if o.DeleteConfigLoadbalancerNameLbNameHandler == nil { unregistered = append(unregistered, "DeleteConfigLoadbalancerNameLbNameHandler") } @@ -850,6 +858,10 @@ func (o *LoxilbRestAPIAPI) initHandlerCache() { if o.handlers["DELETE"] == nil { o.handlers["DELETE"] = make(map[string]http.Handler) } + o.handlers["DELETE"]["/config/loadbalancer/hosturl/{hosturl}/externalipaddress/{ip_address}/port/{port}/protocol/{proto}"] = NewDeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProto(o.context, o.DeleteConfigLoadbalancerHosturlHosturlExternalipaddressIPAddressPortPortProtocolProtoHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } o.handlers["DELETE"]["/config/loadbalancer/name/{lb_name}"] = NewDeleteConfigLoadbalancerNameLbName(o.context, o.DeleteConfigLoadbalancerNameLbNameHandler) if o.handlers["DELETE"] == nil { o.handlers["DELETE"] = make(map[string]http.Handler) diff --git a/api/restapi/server.go b/api/restapi/server.go index c61df9e2..97f5e78d 100644 --- a/api/restapi/server.go +++ b/api/restapi/server.go @@ -8,7 +8,6 @@ import ( "crypto/x509" "errors" "fmt" - "github.com/loxilb-io/loxilb/options" "log" "net" "net/http" @@ -26,6 +25,7 @@ import ( "golang.org/x/net/netutil" "github.com/loxilb-io/loxilb/api/restapi/operations" + "github.com/loxilb-io/loxilb/options" ) const ( @@ -82,7 +82,7 @@ type Server struct { ListenLimit int `long:"listen-limit" description:"limit the number of outstanding requests"` KeepAlive time.Duration `long:"keep-alive" description:"sets the TCP keep-alive timeouts on accepted connections. It prunes dead TCP connections ( e.g. closing laptop mid-download)" default:"3m"` ReadTimeout time.Duration `long:"read-timeout" description:"maximum duration before timing out read of the request" default:"30s"` - WriteTimeout time.Duration `long:"write-timeout" description:"maximum duration before timing out write of the response" default:"60s"` + WriteTimeout time.Duration `long:"write-timeout" description:"maximum duration before timing out write of the response" default:"30s"` httpServerL net.Listener TLSHost string `long:"tls-host" description:"the IP to listen on for tls, when not specified it's the same as --host" env:"TLS_HOST"` diff --git a/api/swagger.yml b/api/swagger.yml index adca60e6..2e930bfe 100644 --- a/api/swagger.yml +++ b/api/swagger.yml @@ -234,6 +234,71 @@ paths: description: Maintanence mode schema: $ref: '#/definitions/Error' + '/config/loadbalancer/hosturl/{hosturl}/externalipaddress/{ip_address}/port/{port}/protocol/{proto}': + delete: + summary: Delete an existing Load balancer service + description: Delete an existing load balancer service with . + parameters: + - name: hosturl + in: path + type: string + required: true + description: Attributes for load balance service + - name: ip_address + in: path + type: string + required: true + description: Attributes for load balance service + - name: port + in: path + type: number + required: true + description: Attributes for load balance service + - name: proto + in: path + type: string + required: true + description: Attributes for load balance service + - name: bgp + in: query + type: boolean + description: option for BGP enable + - name: block + in: query + type: number + required: false + description: block value if any + responses: + '204': + description: OK + '400': + description: Malformed arguments for API call + schema: + $ref: '#/definitions/Error' + '401': + description: Invalid authentication credentials + schema: + $ref: '#/definitions/Error' + '403': + description: Capacity insufficient + schema: + $ref: '#/definitions/Error' + '404': + description: Resource not found + schema: + $ref: '#/definitions/Error' + '409': + description: Resource Conflict. VLAN already exists OR dependency VRF/VNET not found + schema: + $ref: '#/definitions/Error' + '500': + description: Internal service error + schema: + $ref: '#/definitions/Error' + '503': + description: Maintanence mode + schema: + $ref: '#/definitions/Error' #---------------------------------------------- # Conntrack #---------------------------------------------- @@ -2835,7 +2900,10 @@ definitions: properties: externalIP: type: string - description: IP address for externel access + description: IP address for external access + privateIP: + type: string + description: private IP (NAT'd) address for external access port: type: integer description: port number for the access @@ -2901,6 +2969,9 @@ definitions: type: integer format: int32 description: end-point specific op (0-create, 1-attachEP, 2-detachEP) + host: + type: string + description: Ingress specific host URL path endpoints: type: array @@ -2958,7 +3029,7 @@ definitions: type: integer description: index of the route protocol: - type: integer + type: string description: Route protocol flags: type: string diff --git a/cicd/common.sh b/cicd/common.sh index fcf7ea8e..619a1c36 100644 --- a/cicd/common.sh +++ b/cicd/common.sh @@ -16,6 +16,7 @@ hostdocker="ghcr.io/loxilb-io/nettest:latest" cluster_opts="" extra_opts="" ka_opts="" +docker_extra_opts="" #var=$(lsb_release -r | cut -f2) #if [[ $var == *"22.04"* ]];then # lxdocker="ghcr.io/loxilb-io/loxilb:latestu22" @@ -76,7 +77,7 @@ spawn_docker_host() { fi shift 2 ;; - -d | --ka-config ) + -n | --ka-config ) kpath="$2" if [[ -z ${ka+x} ]]; then ka="in" @@ -87,6 +88,10 @@ spawn_docker_host() { extra_opts="$2" shift 2 ;; + -x | --docker-args) + docker_extra_opts="$2" + shift 2 + ;; -*|--*) echo "Unknown option $1" exit @@ -109,11 +114,11 @@ spawn_docker_host() { fi if [[ ! -z ${ka+x} ]]; then sudo mkdir -p /etc/shared/$dname/ - docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dt --pid=host --cgroupns=host --entrypoint /bin/bash $bgp_conf -v /dev/log:/dev/log -v /etc/shared/$dname:/etc/shared $loxilb_config --name $dname $lxdocker + docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dt $docker_extra_opts --entrypoint /bin/bash $bgp_conf -v /dev/log:/dev/log -v /etc/shared/$dname:/etc/shared $loxilb_config --name $dname $lxdocker get_llb_peerIP $dname docker exec -dt $dname /root/loxilb-io/loxilb/loxilb $bgp_opts $cluster_opts $ka_opts $extra_opts else - docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dt --pid=host --cgroupns=host --entrypoint /bin/bash $bgp_conf -v /dev/log:/dev/log $loxilb_config --name $dname $lxdocker $bgp_opts + docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dt $docker_extra_opts --entrypoint /bin/bash $bgp_conf -v /dev/log:/dev/log $loxilb_config --name $dname $lxdocker $bgp_opts docker exec -dt $dname /root/loxilb-io/loxilb/loxilb $bgp_opts $cluster_opts $extra_opts fi elif [[ "$dtype" == "host" ]]; then @@ -552,8 +557,12 @@ function create_lb_rule() { echo "$1: loxicmd create lb ${args[*]}" $dexec $1 loxicmd create lb ${args[*]} - hook=$($dexec llb1 ntc filter show dev eth0 ingress | grep tc_packet_hook) - if [[ $hook != *"tc_packet_hook"* ]]; then + if [[ ${args[*]} == *"--mode=fullproxy"* ]]; then + return + fi + + hook=$($dexec llb1 tc filter show dev eth0 ingress | grep tc_packet_func) + if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; exit 1 fi diff --git a/cicd/common/tcp_https_server.js b/cicd/common/tcp_https_server.js new file mode 100644 index 00000000..678063cf --- /dev/null +++ b/cicd/common/tcp_https_server.js @@ -0,0 +1,17 @@ +// tcp_https_server.js + +var certdir = "./" +if (process.argv[3]) { + certdir = process.argv[3] +} +const https = require('https'); +const fs = require('fs'); + +https.createServer({ + cert: fs.readFileSync(certdir + '/server.crt'), + key: fs.readFileSync(certdir + '/server.key') +}, (req, res) => { + res.writeHead(200); + res.end(process.argv[2]); +}).listen(8080); +console.log("Server listening on https://localhost:8080/"); diff --git a/cicd/docker-k0s-lb/common.sh b/cicd/docker-k0s-lb/common.sh index 87d08bd5..01c5a55a 100755 --- a/cicd/docker-k0s-lb/common.sh +++ b/cicd/docker-k0s-lb/common.sh @@ -532,8 +532,8 @@ function create_lb_rule() { echo "$1: loxicmd create lb ${args[*]}" $dexec $1 loxicmd create lb ${args[*]} - hook=$($dexec llb1 ntc filter show dev eth0 ingress | grep tc_packet_hook) - if [[ $hook != *"tc_packet_hook"* ]]; then + hook=$($dexec llb1 tc filter show dev eth0 ingress | grep tc_packet_func) + if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; exit 1 fi diff --git a/cicd/docker-k3s-calico/common.sh b/cicd/docker-k3s-calico/common.sh index b82f6e0e..ea119602 100644 --- a/cicd/docker-k3s-calico/common.sh +++ b/cicd/docker-k3s-calico/common.sh @@ -536,8 +536,8 @@ function create_lb_rule() { echo "$1: loxicmd create lb ${args[*]}" $dexec $1 loxicmd create lb ${args[*]} - hook=$($dexec llb1 ntc filter show dev eth0 ingress | grep tc_packet_hook) - if [[ $hook != *"tc_packet_hook"* ]]; then + hook=$($dexec llb1 tc filter show dev eth0 ingress | grep tc_packet_func) + if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; exit 1 fi diff --git a/cicd/docker-k3s-cilium/common.sh b/cicd/docker-k3s-cilium/common.sh index b82f6e0e..ea119602 100644 --- a/cicd/docker-k3s-cilium/common.sh +++ b/cicd/docker-k3s-cilium/common.sh @@ -536,8 +536,8 @@ function create_lb_rule() { echo "$1: loxicmd create lb ${args[*]}" $dexec $1 loxicmd create lb ${args[*]} - hook=$($dexec llb1 ntc filter show dev eth0 ingress | grep tc_packet_hook) - if [[ $hook != *"tc_packet_hook"* ]]; then + hook=$($dexec llb1 tc filter show dev eth0 ingress | grep tc_packet_func) + if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; exit 1 fi diff --git a/cicd/docker-k3s-lb/common.sh b/cicd/docker-k3s-lb/common.sh index b82f6e0e..ea119602 100644 --- a/cicd/docker-k3s-lb/common.sh +++ b/cicd/docker-k3s-lb/common.sh @@ -536,8 +536,8 @@ function create_lb_rule() { echo "$1: loxicmd create lb ${args[*]}" $dexec $1 loxicmd create lb ${args[*]} - hook=$($dexec llb1 ntc filter show dev eth0 ingress | grep tc_packet_hook) - if [[ $hook != *"tc_packet_hook"* ]]; then + hook=$($dexec llb1 tc filter show dev eth0 ingress | grep tc_packet_func) + if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; exit 1 fi diff --git a/cicd/e2ehttpsproxy/config.sh b/cicd/e2ehttpsproxy/config.sh new file mode 100755 index 00000000..50ad10c8 --- /dev/null +++ b/cicd/e2ehttpsproxy/config.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +source ../common.sh + +echo "#########################################" +echo "Spawning all hosts" +echo "#########################################" + +spawn_docker_host --dock-type loxilb --dock-name llb1 --extra-args "--proxyonlymode" +spawn_docker_host --dock-type host --dock-name l3h1 +spawn_docker_host --dock-type host --dock-name l3ep1 +spawn_docker_host --dock-type host --dock-name l3ep2 +spawn_docker_host --dock-type host --dock-name l3ep3 + +echo "#########################################" +echo "Connecting and configuring hosts" +echo "#########################################" + + +connect_docker_hosts l3h1 llb1 +connect_docker_hosts l3ep1 llb1 +connect_docker_hosts l3ep2 llb1 +connect_docker_hosts l3ep3 llb1 + +sleep 5 + +#L3 config +config_docker_host --host1 l3h1 --host2 llb1 --ptype phy --addr 10.10.10.1/24 --gw 10.10.10.254 +config_docker_host --host1 l3ep1 --host2 llb1 --ptype phy --addr 31.31.31.1/24 --gw 31.31.31.254 +config_docker_host --host1 l3ep2 --host2 llb1 --ptype phy --addr 32.32.32.1/24 --gw 32.32.32.254 +config_docker_host --host1 l3ep3 --host2 llb1 --ptype phy --addr 33.33.33.1/24 --gw 33.33.33.254 +config_docker_host --host1 llb1 --host2 l3h1 --ptype phy --addr 10.10.10.254/24 +config_docker_host --host1 llb1 --host2 l3ep1 --ptype phy --addr 31.31.31.254/24 +config_docker_host --host1 llb1 --host2 l3ep2 --ptype phy --addr 32.32.32.254/24 +config_docker_host --host1 llb1 --host2 l3ep3 --ptype phy --addr 33.33.33.254/24 + +$dexec llb1 ip addr add 10.10.10.3/32 dev lo + +#Prepare certificates +rm -fr 10.10.10.254 +rm -fr loxilb.io +rm -fr minica*.pem +./minica -ip-addresses 10.10.10.254 +./minica -domains loxilb.io +mv loxilb.io/cert.pem loxilb.io/server.crt +mv loxilb.io/key.pem loxilb.io/server.key + +docker cp minica.pem llb1:/opt/loxilb/cert/rootCA.crt +docker cp 10.10.10.254/cert.pem llb1:/opt/loxilb/cert/server.crt +docker cp 10.10.10.254/key.pem llb1:/opt/loxilb/cert/server.key + +sleep 5 +create_lb_rule llb1 10.10.10.254 --tcp=2020:8080 --endpoints=31.31.31.1:1,32.32.32.1:1,33.33.33.1:1 --mode=fullproxy --security=e2ehttps --host=loxilb.io diff --git a/cicd/e2ehttpsproxy/minica b/cicd/e2ehttpsproxy/minica new file mode 100755 index 00000000..a152b166 Binary files /dev/null and b/cicd/e2ehttpsproxy/minica differ diff --git a/cicd/e2ehttpsproxy/rmconfig.sh b/cicd/e2ehttpsproxy/rmconfig.sh new file mode 100755 index 00000000..9ea80577 --- /dev/null +++ b/cicd/e2ehttpsproxy/rmconfig.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +source ../common.sh + +disconnect_docker_hosts l3h1 llb1 +disconnect_docker_hosts l3ep1 llb1 +disconnect_docker_hosts l3ep2 llb1 +disconnect_docker_hosts l3ep3 llb1 + +delete_docker_host llb1 +delete_docker_host l3h1 +delete_docker_host l3ep1 +delete_docker_host l3ep2 +delete_docker_host l3ep3 + +echo "#########################################" +echo "Deleted testbed" +echo "#########################################" diff --git a/cicd/e2ehttpsproxy/validation.sh b/cicd/e2ehttpsproxy/validation.sh new file mode 100755 index 00000000..c9a3d00e --- /dev/null +++ b/cicd/e2ehttpsproxy/validation.sh @@ -0,0 +1,43 @@ +#!/bin/bash +source ../common.sh +echo SCENARIO-e2ehttps-tcplb +$hexec l3ep1 node ../common/tcp_https_server.js server1 loxilb.io & +$hexec l3ep2 node ../common/tcp_https_server.js server2 loxilb.io & +$hexec l3ep3 node ../common/tcp_https_server.js server3 loxilb.io & + +sleep 5 +code=0 +servIP=( "10.10.10.254" ) +servArr=( "server1" "server2" "server3" ) +ep=( "31.31.31.1" "32.32.32.1" "33.33.33.1" ) +j=0 +waitCount=0 + +for k in {0..0} +do +echo "Testing Service IP: ${servIP[k]}" +lcode=0 +for i in {1..4} +do +for j in {0..2} +do + res=$($hexec l3h1 curl --max-time 10 -H "Application/json" -H "Content-type: application/json" -H "HOST: loxilb.io" --insecure -s https://${servIP[k]}:2020) + echo $res + if [[ $res != "${servArr[j]}" ]] + then + lcode=1 + fi + sleep 1 +done +done +if [[ $lcode == 0 ]] +then + echo SCENARIO-e2ehttps-tcplb with ${servIP[k]} [OK] +else + echo SCENARIO-e2ehttps-tcplb with ${servIP[k]} [FAILED] + code=1 +fi +done + +sudo killall -9 node 2>&1 > /dev/null +exit $code diff --git a/cicd/httpproxy/config.sh b/cicd/httpproxy/config.sh new file mode 100644 index 00000000..51ef2cea --- /dev/null +++ b/cicd/httpproxy/config.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +source ../common.sh + +echo "#########################################" +echo "Spawning all hosts" +echo "#########################################" + +spawn_docker_host --dock-type loxilb --dock-name llb1 +spawn_docker_host --dock-type host --dock-name l3h1 +spawn_docker_host --dock-type host --dock-name l3ep1 +spawn_docker_host --dock-type host --dock-name l3ep2 +spawn_docker_host --dock-type host --dock-name l3ep3 + +echo "#########################################" +echo "Connecting and configuring hosts" +echo "#########################################" + + +connect_docker_hosts l3h1 llb1 +connect_docker_hosts l3ep1 llb1 +connect_docker_hosts l3ep2 llb1 +connect_docker_hosts l3ep3 llb1 + +sleep 5 + +#L3 config +config_docker_host --host1 l3h1 --host2 llb1 --ptype phy --addr 10.10.10.1/24 --gw 10.10.10.254 +config_docker_host --host1 l3ep1 --host2 llb1 --ptype phy --addr 31.31.31.1/24 --gw 31.31.31.254 +config_docker_host --host1 l3ep2 --host2 llb1 --ptype phy --addr 32.32.32.1/24 --gw 32.32.32.254 +config_docker_host --host1 l3ep3 --host2 llb1 --ptype phy --addr 33.33.33.1/24 --gw 33.33.33.254 +config_docker_host --host1 llb1 --host2 l3h1 --ptype phy --addr 10.10.10.254/24 +config_docker_host --host1 llb1 --host2 l3ep1 --ptype phy --addr 31.31.31.254/24 +config_docker_host --host1 llb1 --host2 l3ep2 --ptype phy --addr 32.32.32.254/24 +config_docker_host --host1 llb1 --host2 l3ep3 --ptype phy --addr 33.33.33.254/24 + +sleep 5 +create_lb_rule llb1 10.10.10.254 --tcp=2020:8080 --endpoints=31.31.31.1:1,32.32.32.1:1,33.33.33.1:1 --mode=fullproxy diff --git a/cicd/httpproxy/rmconfig.sh b/cicd/httpproxy/rmconfig.sh new file mode 100644 index 00000000..9ea80577 --- /dev/null +++ b/cicd/httpproxy/rmconfig.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +source ../common.sh + +disconnect_docker_hosts l3h1 llb1 +disconnect_docker_hosts l3ep1 llb1 +disconnect_docker_hosts l3ep2 llb1 +disconnect_docker_hosts l3ep3 llb1 + +delete_docker_host llb1 +delete_docker_host l3h1 +delete_docker_host l3ep1 +delete_docker_host l3ep2 +delete_docker_host l3ep3 + +echo "#########################################" +echo "Deleted testbed" +echo "#########################################" diff --git a/cicd/httpproxy/validation.sh b/cicd/httpproxy/validation.sh new file mode 100644 index 00000000..356e0755 --- /dev/null +++ b/cicd/httpproxy/validation.sh @@ -0,0 +1,64 @@ +#!/bin/bash +source ../common.sh +echo SCENARIO-http-tcplb +$hexec l3ep1 node ../common/tcp_server.js server1 & +$hexec l3ep2 node ../common/tcp_server.js server2 & +$hexec l3ep3 node ../common/tcp_server.js server3 & + +sleep 5 +code=0 +servIP=( "10.10.10.254" ) +servArr=( "server1" "server2" "server3" ) +ep=( "31.31.31.1" "32.32.32.1" "33.33.33.1" ) +j=0 +waitCount=0 +while [ $j -le 2 ] +do + res=$($hexec l3h1 curl --max-time 10 -s ${ep[j]}:8080) + #echo $res + if [[ $res == "${servArr[j]}" ]] + then + echo "$res UP" + j=$(( $j + 1 )) + else + echo "Waiting for ${servArr[j]}(${ep[j]})" + waitCount=$(( $waitCount + 1 )) + if [[ $waitCount == 10 ]]; + then + echo "All Servers are not UP" + echo SCENARIO-http-tcplb [FAILED] + sudo killall -9 node 2>&1 > /dev/null + exit 1 + fi + fi + sleep 1 +done + +for k in {0..0} +do +echo "Testing Service IP: ${servIP[k]}" +lcode=0 +for i in {1..4} +do +for j in {0..2} +do + res=$($hexec l3h1 curl --max-time 10 -s http://${servIP[k]}:2020) + echo $res + if [[ $res != "${servArr[j]}" ]] + then + lcode=1 + fi + sleep 1 +done +done +if [[ $lcode == 0 ]] +then + echo SCENARIO-http-tcplb with ${servIP[k]} [OK] +else + echo SCENARIO-http-tcplb with ${servIP[k]} [FAILED] + code=1 +fi +done + +sudo killall -9 node 2>&1 > /dev/null +exit $code diff --git a/cicd/httpshostproxy/config.sh b/cicd/httpshostproxy/config.sh new file mode 100755 index 00000000..192e9058 --- /dev/null +++ b/cicd/httpshostproxy/config.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +source ../common.sh + +echo "#########################################" +echo "Spawning all hosts" +echo "#########################################" + +spawn_docker_host --dock-type loxilb --dock-name llb1 --extra-args "--proxyonlymode" +spawn_docker_host --dock-type host --dock-name l3h1 +spawn_docker_host --dock-type host --dock-name l3ep1 +spawn_docker_host --dock-type host --dock-name l3ep2 +spawn_docker_host --dock-type host --dock-name l3ep3 + +echo "#########################################" +echo "Connecting and configuring hosts" +echo "#########################################" + + +connect_docker_hosts l3h1 llb1 +connect_docker_hosts l3ep1 llb1 +connect_docker_hosts l3ep2 llb1 +connect_docker_hosts l3ep3 llb1 + +sleep 5 + +#L3 config +config_docker_host --host1 l3h1 --host2 llb1 --ptype phy --addr 10.10.10.1/24 --gw 10.10.10.254 +config_docker_host --host1 l3ep1 --host2 llb1 --ptype phy --addr 31.31.31.1/24 --gw 31.31.31.254 +config_docker_host --host1 l3ep2 --host2 llb1 --ptype phy --addr 32.32.32.1/24 --gw 32.32.32.254 +config_docker_host --host1 l3ep3 --host2 llb1 --ptype phy --addr 33.33.33.1/24 --gw 33.33.33.254 +config_docker_host --host1 llb1 --host2 l3h1 --ptype phy --addr 10.10.10.254/24 +config_docker_host --host1 llb1 --host2 l3ep1 --ptype phy --addr 31.31.31.254/24 +config_docker_host --host1 llb1 --host2 l3ep2 --ptype phy --addr 32.32.32.254/24 +config_docker_host --host1 llb1 --host2 l3ep3 --ptype phy --addr 33.33.33.254/24 + +$dexec llb1 ip addr add 10.10.10.3/32 dev lo +./minica -ip-addresses 10.10.10.254 + +docker cp minica.pem llb1:/opt/loxilb/cert/rootCA.crt +docker cp 10.10.10.254/cert.pem llb1:/opt/loxilb/cert/server.crt +docker cp 10.10.10.254/key.pem llb1:/opt/loxilb/cert/server.key + +sleep 5 +create_lb_rule llb1 10.10.10.254 --tcp=2020:8080 --endpoints=31.31.31.1:1,32.32.32.1:1,33.33.33.1:1 --mode=fullproxy --security=https --host=loxilb.io diff --git a/cicd/httpshostproxy/minica b/cicd/httpshostproxy/minica new file mode 100755 index 00000000..a152b166 Binary files /dev/null and b/cicd/httpshostproxy/minica differ diff --git a/cicd/httpshostproxy/rmconfig.sh b/cicd/httpshostproxy/rmconfig.sh new file mode 100755 index 00000000..9ea80577 --- /dev/null +++ b/cicd/httpshostproxy/rmconfig.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +source ../common.sh + +disconnect_docker_hosts l3h1 llb1 +disconnect_docker_hosts l3ep1 llb1 +disconnect_docker_hosts l3ep2 llb1 +disconnect_docker_hosts l3ep3 llb1 + +delete_docker_host llb1 +delete_docker_host l3h1 +delete_docker_host l3ep1 +delete_docker_host l3ep2 +delete_docker_host l3ep3 + +echo "#########################################" +echo "Deleted testbed" +echo "#########################################" diff --git a/cicd/httpshostproxy/validation.sh b/cicd/httpshostproxy/validation.sh new file mode 100755 index 00000000..199d80d2 --- /dev/null +++ b/cicd/httpshostproxy/validation.sh @@ -0,0 +1,64 @@ +#!/bin/bash +source ../common.sh +echo SCENARIO-https-tcplb +$hexec l3ep1 node ../common/tcp_server.js server1 & +$hexec l3ep2 node ../common/tcp_server.js server2 & +$hexec l3ep3 node ../common/tcp_server.js server3 & + +sleep 5 +code=0 +servIP=( "10.10.10.254" ) +servArr=( "server1" "server2" "server3" ) +ep=( "31.31.31.1" "32.32.32.1" "33.33.33.1" ) +j=0 +waitCount=0 +while [ $j -le 2 ] +do + res=$($hexec l3h1 curl --max-time 10 -s ${ep[j]}:8080) + #echo $res + if [[ $res == "${servArr[j]}" ]] + then + echo "$res UP" + j=$(( $j + 1 )) + else + echo "Waiting for ${servArr[j]}(${ep[j]})" + waitCount=$(( $waitCount + 1 )) + if [[ $waitCount == 10 ]]; + then + echo "All Servers are not UP" + echo SCENARIO-tcplb [FAILED] + sudo killall -9 node 2>&1 > /dev/null + exit 1 + fi + fi + sleep 1 +done + +for k in {0..0} +do +echo "Testing Service IP: ${servIP[k]}" +lcode=0 +for i in {1..4} +do +for j in {0..2} +do + res=$($hexec l3h1 curl --max-time 10 -H "Application/json" -H "Content-type: application/json" -H "HOST: loxilb.io" --insecure -s https://${servIP[k]}:2020) + echo $res + if [[ $res != "${servArr[j]}" ]] + then + lcode=1 + fi + sleep 1 +done +done +if [[ $lcode == 0 ]] +then + echo SCENARIO-https-tcplb with ${servIP[k]} [OK] +else + echo SCENARIO-https-tcplb with ${servIP[k]} [FAILED] + code=1 +fi +done + +sudo killall -9 node 2>&1 > /dev/null +exit $code diff --git a/cicd/httpsproxy/config.sh b/cicd/httpsproxy/config.sh index 0d3ce4b2..b0b60342 100755 --- a/cicd/httpsproxy/config.sh +++ b/cicd/httpsproxy/config.sh @@ -6,7 +6,7 @@ echo "#########################################" echo "Spawning all hosts" echo "#########################################" -spawn_docker_host --dock-type loxilb --dock-name llb1 +spawn_docker_host --dock-type loxilb --dock-name llb1 --extra-args "--proxyonlymode" spawn_docker_host --dock-type host --dock-name l3h1 spawn_docker_host --dock-type host --dock-name l3ep1 spawn_docker_host --dock-type host --dock-name l3ep2 diff --git a/cicd/ipsec-e2e/config.sh b/cicd/ipsec-e2e/config.sh new file mode 100755 index 00000000..4994d916 --- /dev/null +++ b/cicd/ipsec-e2e/config.sh @@ -0,0 +1,149 @@ +#!/bin/bash +source ../common.sh + +echo "#########################################" +echo "Spawning all hosts" +echo "#########################################" + +spawn_docker_host --dock-type loxilb --dock-name lgw1 +spawn_docker_host --dock-type loxilb --dock-name llb1 +spawn_docker_host --dock-type loxilb --dock-name rgw1 +spawn_docker_host --dock-type loxilb --dock-name rgw2 +spawn_docker_host --dock-type host --dock-name lh1 +spawn_docker_host --dock-type host --dock-name rh1 +spawn_docker_host --dock-type host --dock-name rh2 + +$dexec lgw1 bash -c "apt-get update && apt-get install -y iputils-ping curl" +$dexec llb1 bash -c "apt-get update && apt-get install -y iputils-ping curl" +$dexec rgw1 bash -c "apt-get update && apt-get install -y iputils-ping curl" +$dexec rgw2 bash -c "apt-get update && apt-get install -y iputils-ping curl" + +echo "#########################################" +echo "Connecting and configuring hosts" +echo "#########################################" + +connect_docker_hosts lh1 lgw1 +connect_docker_hosts lgw1 llb1 +connect_docker_hosts llb1 rgw1 +connect_docker_hosts llb1 rgw2 +connect_docker_hosts rh1 rgw1 +connect_docker_hosts rh2 rgw2 + +config_docker_host --host1 lh1 --host2 lgw1 --ptype phy --addr 192.168.10.175/24 --gw 192.168.10.1 +config_docker_host --host1 lgw1 --host2 lh1 --ptype phy --addr 192.168.10.1/24 +config_docker_host --host1 lgw1 --host2 llb1 --ptype phy --addr 7.7.7.1/24 +config_docker_host --host1 llb1 --host2 lgw1 --ptype phy --addr 7.7.7.254/24 + +#Tunnel 1 +#xfrm Config(Left) +$dexec lgw1 ip link add vti100 type vti key 100 remote 7.7.7.254 local 7.7.7.1 +$dexec lgw1 ip link set vti100 up +$dexec lgw1 ip addr add 77.77.77.1/24 remote 77.77.77.254/24 dev vti100 +$dexec lgw1 sysctl -w "net.ipv4.conf.vti100.disable_policy=1" +$dexec lgw1 sysctl -w "net.ipv4.conf.elgw1lh1.proxy_arp=1" + +$dexec lgw1 ip route add 192.168.10.200/32 via 77.77.77.254 + +#xfrm Config(Right) +$dexec llb1 ip link add vti100 type vti key 100 remote 7.7.7.1 local 7.7.7.254 +$dexec llb1 ip link set vti100 up +$dexec llb1 ip addr add 77.77.77.254/24 remote 77.77.77.1/24 dev vti100 +$dexec llb1 sysctl -w "net.ipv4.conf.vti100.disable_policy=1" +#$dexec llb1 sysctl -w "net.ipv4.conf.ellb1lgw1.proxy_arp=1" + +$dexec llb1 ip addr add 192.168.10.200/32 dev lo +$dexec llb1 ip route add 192.168.10.175/32 via 77.77.77.1 dev vti100 +$dexec llb1 loxicmd create lb 192.168.10.200 --tcp=2020:8080 --endpoints=192.168.10.10:1,192.168.10.11:1 --mode=fullnat +$dexec llb1 loxicmd create ep 192.168.10.10 --name=192.168.10.10_tcp_2020 --probetype=none +$dexec llb1 loxicmd create ep 192.168.10.11 --name=192.168.10.11_tcp_2020 --probetype=none + +#Route towards Host(lh1) +$dexec llb1 ip route add 192.168.10.175/32 via 77.77.77.1 dev vti100 + + + +create_docker_host_vlan --host1 llb1 --host2 rgw1 --id 1000 --ptype untagged +create_docker_host_vlan --host1 llb1 --host2 rgw2 --id 1000 --ptype untagged + +config_docker_host --host1 rgw1 --host2 llb1 --ptype phy --addr 8.7.7.1/24 +config_docker_host --host1 rgw2 --host2 llb1 --ptype phy --addr 8.7.7.2/24 + +config_docker_host --host1 llb1 --host2 rgw1 --ptype vlan --id 1000 --addr 8.7.7.254/24 + +#Tunnel-2 + +#xfrm Config(Right) +$dexec llb1 ip link add vti200 type vti key 200 remote 8.7.7.1 local 8.7.7.254 +$dexec llb1 ip link set vti200 up +$dexec llb1 ip addr add 8.7.200.254/24 remote 8.7.200.1/24 dev vti200 +$dexec llb1 sysctl -w "net.ipv4.conf.vti200.disable_policy=1" + +#Route towards EP(rh1) +$dexec llb1 ip route add 192.168.10.10/32 via 8.7.200.1 dev vti200 + + +#xfrm Config(Left) +$dexec rgw1 ip link add vti200 type vti key 200 remote 8.7.7.254 local 8.7.7.1 +$dexec rgw1 ip link set vti200 up +$dexec rgw1 ip addr add 8.7.200.1/24 remote 8.7.200.254/24 dev vti200 +$dexec rgw1 sysctl -w "net.ipv4.conf.vti200.disable_policy=1" +$dexec rgw1 sysctl -w "net.ipv4.conf.ergw1rh1.proxy_arp=1" +#Route towards llb1 +$dexec rgw1 ip route add 192.168.10.200/32 via 8.7.200.254 + + +#Tunnel-3 + +#xfrm Config(Right) +$dexec llb1 ip link add vti201 type vti key 201 remote 8.7.7.2 local 8.7.7.254 +$dexec llb1 ip link set vti201 up +$dexec llb1 ip addr add 8.7.201.254/24 remote 8.7.201.1/24 dev vti201 +$dexec llb1 sysctl -w "net.ipv4.conf.vti201.disable_policy=1" + +#Route towards EP(rh2) +$dexec llb1 ip route add 192.168.10.11/32 via 8.7.201.1 dev vti201 + +$dexec rgw2 ip link add vti201 type vti key 201 remote 8.7.7.254 local 8.7.7.2 +$dexec rgw2 ip link set vti201 up +$dexec rgw2 ip addr add 8.7.201.1/24 remote 8.7.201.254/24 dev vti201 +$dexec rgw2 sysctl -w "net.ipv4.conf.vti201.disable_policy=1" +$dexec rgw2 sysctl -w "net.ipv4.conf.ergw2rh2.proxy_arp=1" +#Route towards llb1 +$dexec rgw2 ip route add 192.168.10.200/32 via 8.7.201.254 + + + +config_docker_host --host1 rgw1 --host2 rh1 --ptype phy --addr 192.168.10.2/24 +config_docker_host --host1 rh1 --host2 rgw1 --ptype phy --addr 192.168.10.10/24 --gw 192.168.10.2 + +config_docker_host --host1 rgw2 --host2 rh2 --ptype phy --addr 192.168.10.3/24 +config_docker_host --host1 rh2 --host2 rgw2 --ptype phy --addr 192.168.10.11/24 --gw 192.168.10.3 + +#$dexec lgw1 apt-get update +$dexec lgw1 apt-get install -y iptables strongswan strongswan-swanctl systemctl +docker cp lgw1_ipsec_config/ipsec.conf lgw1:/etc/ +docker cp lgw1_ipsec_config/ipsec.secrets lgw1:/etc/ +docker cp lgw1_ipsec_config/charon.conf lgw1:/etc/strongswan.d/ +$dexec lgw1 systemctl restart strongswan-starter + +#$dexec llb1 apt-get update +$dexec llb1 apt-get install -y strongswan strongswan-swanctl systemctl +docker cp llb1_ipsec_config/ipsec.conf llb1:/etc/ +docker cp llb1_ipsec_config/ipsec.secrets llb1:/etc/ +docker cp llb1_ipsec_config/charon.conf llb1:/etc/strongswan.d/ +$dexec llb1 systemctl restart strongswan-starter + +#$dexec rgw1 apt-get update +$dexec rgw1 apt-get install -y iptables strongswan strongswan-swanctl systemctl +docker cp rgw1_ipsec_config/ipsec.conf rgw1:/etc/ +docker cp rgw1_ipsec_config/ipsec.secrets rgw1:/etc/ +docker cp rgw1_ipsec_config/charon.conf rgw1:/etc/strongswan.d/ +$dexec rgw1 systemctl restart strongswan-starter + +#$dexec rgw2 apt-get update +$dexec rgw2 apt-get install -y iptables strongswan strongswan-swanctl systemctl +docker cp rgw2_ipsec_config/ipsec.conf rgw2:/etc/ +docker cp rgw2_ipsec_config/ipsec.secrets rgw2:/etc/ +docker cp rgw2_ipsec_config/charon.conf rgw2:/etc/strongswan.d/ +$dexec rgw2 systemctl restart strongswan-starter + diff --git a/cicd/ipsec-e2e/lgw1_ipsec_config/charon.conf b/cicd/ipsec-e2e/lgw1_ipsec_config/charon.conf new file mode 100644 index 00000000..926ae24a --- /dev/null +++ b/cicd/ipsec-e2e/lgw1_ipsec_config/charon.conf @@ -0,0 +1,376 @@ +# Options for the charon IKE daemon. +charon { + + # Accept unencrypted ID and HASH payloads in IKEv1 Main Mode. + # accept_unencrypted_mainmode_messages = no + + # Maximum number of half-open IKE_SAs for a single peer IP. + # block_threshold = 5 + + # Whether Certificate Revocation Lists (CRLs) fetched via HTTP or LDAP + # should be saved under a unique file name derived from the public key of + # the Certification Authority (CA) to /etc/ipsec.d/crls (stroke) or + # /etc/swanctl/x509crl (vici), respectively. + # cache_crls = no + + # Whether relations in validated certificate chains should be cached in + # memory. + # cert_cache = yes + + # Send Cisco Unity vendor ID payload (IKEv1 only). + # cisco_unity = no + + # Close the IKE_SA if setup of the CHILD_SA along with IKE_AUTH failed. + # close_ike_on_child_failure = no + + # Number of half-open IKE_SAs that activate the cookie mechanism. + # cookie_threshold = 10 + + # Delete CHILD_SAs right after they got successfully rekeyed (IKEv1 only). + # delete_rekeyed = no + + # Delay in seconds until inbound IPsec SAs are deleted after rekeyings + # (IKEv2 only). + # delete_rekeyed_delay = 5 + + # Use ANSI X9.42 DH exponent size or optimum size matched to cryptographic + # strength. + # dh_exponent_ansi_x9_42 = yes + + # Use RTLD_NOW with dlopen when loading plugins and IMV/IMCs to reveal + # missing symbols immediately. + # dlopen_use_rtld_now = no + + # DNS server assigned to peer via configuration payload (CP). + # dns1 = + + # DNS server assigned to peer via configuration payload (CP). + # dns2 = + + # Enable Denial of Service protection using cookies and aggressiveness + # checks. + # dos_protection = yes + + # Compliance with the errata for RFC 4753. + # ecp_x_coordinate_only = yes + + # Free objects during authentication (might conflict with plugins). + # flush_auth_cfg = no + + # Whether to follow IKEv2 redirects (RFC 5685). + # follow_redirects = yes + + # Maximum size (complete IP datagram size in bytes) of a sent IKE fragment + # when using proprietary IKEv1 or standardized IKEv2 fragmentation, defaults + # to 1280 (use 0 for address family specific default values, which uses a + # lower value for IPv4). If specified this limit is used for both IPv4 and + # IPv6. + # fragment_size = 1280 + + # Name of the group the daemon changes to after startup. + # group = + + # Timeout in seconds for connecting IKE_SAs (also see IKE_SA_INIT DROPPING). + # half_open_timeout = 30 + + # Enable hash and URL support. + # hash_and_url = no + + # Allow IKEv1 Aggressive Mode with pre-shared keys as responder. + # i_dont_care_about_security_and_use_aggressive_mode_psk = no + + # Whether to ignore the traffic selectors from the kernel's acquire events + # for IKEv2 connections (they are not used for IKEv1). + # ignore_acquire_ts = no + + # A space-separated list of routing tables to be excluded from route + # lookups. + # ignore_routing_tables = + + # Maximum number of IKE_SAs that can be established at the same time before + # new connection attempts are blocked. + # ikesa_limit = 0 + + # Number of exclusively locked segments in the hash table. + # ikesa_table_segments = 1 + + # Size of the IKE_SA hash table. + # ikesa_table_size = 1 + + # Whether to close IKE_SA if the only CHILD_SA closed due to inactivity. + # inactivity_close_ike = no + + # Limit new connections based on the current number of half open IKE_SAs, + # see IKE_SA_INIT DROPPING in strongswan.conf(5). + # init_limit_half_open = 0 + + # Limit new connections based on the number of queued jobs. + # init_limit_job_load = 0 + + # Causes charon daemon to ignore IKE initiation requests. + # initiator_only = no + + # Install routes into a separate routing table for established IPsec + # tunnels. + install_routes = no + + # Install virtual IP addresses. + install_virtual_ip = no + + # The name of the interface on which virtual IP addresses should be + # installed. + # install_virtual_ip_on = + + # Check daemon, libstrongswan and plugin integrity at startup. + # integrity_test = no + + # A comma-separated list of network interfaces that should be ignored, if + # interfaces_use is specified this option has no effect. + # interfaces_ignore = + + # A comma-separated list of network interfaces that should be used by + # charon. All other interfaces are ignored. + # interfaces_use = + + # NAT keep alive interval. + # keep_alive = 20s + + # Plugins to load in the IKE daemon charon. + # load = + + # Determine plugins to load via each plugin's load option. + # load_modular = no + + # Initiate IKEv2 reauthentication with a make-before-break scheme. + # make_before_break = no + + # Maximum number of IKEv1 phase 2 exchanges per IKE_SA to keep state about + # and track concurrently. + # max_ikev1_exchanges = 3 + + # Maximum packet size accepted by charon. + # max_packet = 10000 + + # Enable multiple authentication exchanges (RFC 4739). + # multiple_authentication = yes + + # WINS servers assigned to peer via configuration payload (CP). + # nbns1 = + + # WINS servers assigned to peer via configuration payload (CP). + # nbns2 = + + # UDP port used locally. If set to 0 a random port will be allocated. + # port = 500 + + # UDP port used locally in case of NAT-T. If set to 0 a random port will be + # allocated. Has to be different from charon.port, otherwise a random port + # will be allocated. + # port_nat_t = 4500 + + # Whether to prefer updating SAs to the path with the best route. + # prefer_best_path = no + + # Prefer locally configured proposals for IKE/IPsec over supplied ones as + # responder (disabling this can avoid keying retries due to + # INVALID_KE_PAYLOAD notifies). + # prefer_configured_proposals = yes + + # Controls whether permanent or temporary IPv6 addresses are used as source, + # or announced as additional addresses if MOBIKE is used. + # prefer_temporary_addrs = no + + # Process RTM_NEWROUTE and RTM_DELROUTE events. + # process_route = yes + + # How RDNs in subject DNs of certificates are matched against configured + # identities (strict, reordered, or relaxed). + # rdn_matching = strict + + # Delay in ms for receiving packets, to simulate larger RTT. + # receive_delay = 0 + + # Delay request messages. + # receive_delay_request = yes + + # Delay response messages. + # receive_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # receive_delay_type = 0 + + # Size of the AH/ESP replay window, in packets. + # replay_window = 32 + + # Base to use for calculating exponential back off, see IKEv2 RETRANSMISSION + # in strongswan.conf(5). + # retransmit_base = 1.8 + + # Maximum jitter in percent to apply randomly to calculated retransmission + # timeout (0 to disable). + # retransmit_jitter = 0 + + # Upper limit in seconds for calculated retransmission timeout (0 to + # disable). + # retransmit_limit = 0 + + # Timeout in seconds before sending first retransmit. + # retransmit_timeout = 4.0 + + # Number of times to retransmit a packet before giving up. + # retransmit_tries = 5 + + # Interval in seconds to use when retrying to initiate an IKE_SA (e.g. if + # DNS resolution failed), 0 to disable retries. + # retry_initiate_interval = 0 + + # Initiate CHILD_SA within existing IKE_SAs (always enabled for IKEv1). + # reuse_ikesa = yes + + # Numerical routing table to install routes to. + # routing_table = + + # Priority of the routing table. + # routing_table_prio = + + # Whether to use RSA with PSS padding instead of PKCS#1 padding by default. + # rsa_pss = no + + # Delay in ms for sending packets, to simulate larger RTT. + # send_delay = 0 + + # Delay request messages. + # send_delay_request = yes + + # Delay response messages. + # send_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # send_delay_type = 0 + + # Send strongSwan vendor ID payload + # send_vendor_id = no + + # Whether to enable Signature Authentication as per RFC 7427. + # signature_authentication = yes + + # Whether to enable constraints against IKEv2 signature schemes. + # signature_authentication_constraints = yes + + # Value mixed into the local IKE SPIs after applying spi_mask. + # spi_label = 0x0000000000000000 + + # Mask applied to local IKE SPIs before mixing in spi_label (bits set will + # be replaced with spi_label). + # spi_mask = 0x0000000000000000 + + # The upper limit for SPIs requested from the kernel for IPsec SAs. + # spi_max = 0xcfffffff + + # The lower limit for SPIs requested from the kernel for IPsec SAs. + # spi_min = 0xc0000000 + + # Number of worker threads in charon. + # threads = 16 + + # Name of the user the daemon changes to after startup. + # user = + + crypto_test { + + # Benchmark crypto algorithms and order them by efficiency. + # bench = no + + # Buffer size used for crypto benchmark. + # bench_size = 1024 + + # Time in ms during which crypto algorithm performance is measured. + # bench_time = 50 + + # Test crypto algorithms during registration (requires test vectors + # provided by the test-vectors plugin). + # on_add = no + + # Test crypto algorithms on each crypto primitive instantiation. + # on_create = no + + # Strictly require at least one test vector to enable an algorithm. + # required = no + + # Whether to test RNG with TRUE quality; requires a lot of entropy. + # rng_true = no + + } + + host_resolver { + + # Maximum number of concurrent resolver threads (they are terminated if + # unused). + # max_threads = 3 + + # Minimum number of resolver threads to keep around. + # min_threads = 0 + + } + + leak_detective { + + # Includes source file names and line numbers in leak detective output. + # detailed = yes + + # Threshold in bytes for leaks to be reported (0 to report all). + # usage_threshold = 10240 + + # Threshold in number of allocations for leaks to be reported (0 to + # report all). + # usage_threshold_count = 0 + + } + + processor { + + # Section to configure the number of reserved threads per priority class + # see JOB PRIORITY MANAGEMENT in strongswan.conf(5). + priority_threads { + + } + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is started. + start-scripts { + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is terminated. + stop-scripts { + + } + + tls { + + # List of TLS encryption ciphers. + # cipher = + + # List of TLS key exchange methods. + # key_exchange = + + # List of TLS MAC algorithms. + # mac = + + # List of TLS cipher suites. + # suites = + + } + + x509 { + + # Discard certificates with unsupported or unknown critical extensions. + # enforce_critical = yes + + } + +} + diff --git a/cicd/ipsec-e2e/lgw1_ipsec_config/ipsec.conf b/cicd/ipsec-e2e/lgw1_ipsec_config/ipsec.conf new file mode 100644 index 00000000..c1314eb9 --- /dev/null +++ b/cicd/ipsec-e2e/lgw1_ipsec_config/ipsec.conf @@ -0,0 +1,78 @@ +#@ /etc/strongswan/ipsec.conf (Centos) or /etc/ipsec.conf (Ubuntu) + +# ipsec.conf - strongSwan IPsec configuration file + +# basic configuration + +config setup + charondebug="cfg 2, ike 3" +# strictcrlpolicy=yes +# uniqueids = no + +# Add connections here. + +# Sample VPN connections + +#conn sample-self-signed +# leftsubnet=10.1.0.0/16 +# leftcert=selfCert.der +# leftsendcert=never +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightcert=peerCert.der +# auto=start + +#conn sample-with-ca-cert +# leftsubnet=10.1.0.0/16 +# leftcert=myCert.pem +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightid="C=CH, O=Linux strongSwan CN=peer name" +# auto=start + + +conn default + leftauth=psk + rightauth=psk + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : sha1 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + ike=aes256-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 28800 seconds + ikelifetime=28800s + # Phase 1 Negotiation Mode : main + aggressive=no + # Protocol : esp + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : hmac-sha1-96 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + esp=aes128-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 3600 seconds + lifetime=3600s + # Mode : tunnel + type=tunnel + # DPD Interval : 10 + dpddelay=10s + # DPD Retries : 3 + dpdtimeout=30s + # Tuning Parameters for AWS Virtual Private Gateway: + keyexchange=ikev2 + #keyingtries=%forever + rekey=yes + reauth=no + dpdaction=restart + closeaction=restart + #left=%defaultroute + #leftsubnet=0.0.0.0/0,::/0 + #rightsubnet=0.0.0.0/0,::/0 + leftsubnet=192.168.10.175 + rightsubnet=192.168.10.200 + #leftupdown=/etc/strongswan/ipsec-vti.sh + left=7.7.7.1 + right=7.7.7.254 + installpolicy=yes + compress=no + mobike=no + #VTI Key + mark=100 + auto=start diff --git a/cicd/ipsec-e2e/lgw1_ipsec_config/ipsec.secrets b/cicd/ipsec-e2e/lgw1_ipsec_config/ipsec.secrets new file mode 100644 index 00000000..318cc964 --- /dev/null +++ b/cicd/ipsec-e2e/lgw1_ipsec_config/ipsec.secrets @@ -0,0 +1,3 @@ +#@ /etc/strongswan/ipsec.secrets (Centos) or /etc/ipsec.secrets (Ubuntu) + +7.7.7.1 7.7.7.254 : PSK "loxilb@1234!" diff --git a/cicd/ipsec-e2e/llb1_ipsec_config/charon.conf b/cicd/ipsec-e2e/llb1_ipsec_config/charon.conf new file mode 100644 index 00000000..926ae24a --- /dev/null +++ b/cicd/ipsec-e2e/llb1_ipsec_config/charon.conf @@ -0,0 +1,376 @@ +# Options for the charon IKE daemon. +charon { + + # Accept unencrypted ID and HASH payloads in IKEv1 Main Mode. + # accept_unencrypted_mainmode_messages = no + + # Maximum number of half-open IKE_SAs for a single peer IP. + # block_threshold = 5 + + # Whether Certificate Revocation Lists (CRLs) fetched via HTTP or LDAP + # should be saved under a unique file name derived from the public key of + # the Certification Authority (CA) to /etc/ipsec.d/crls (stroke) or + # /etc/swanctl/x509crl (vici), respectively. + # cache_crls = no + + # Whether relations in validated certificate chains should be cached in + # memory. + # cert_cache = yes + + # Send Cisco Unity vendor ID payload (IKEv1 only). + # cisco_unity = no + + # Close the IKE_SA if setup of the CHILD_SA along with IKE_AUTH failed. + # close_ike_on_child_failure = no + + # Number of half-open IKE_SAs that activate the cookie mechanism. + # cookie_threshold = 10 + + # Delete CHILD_SAs right after they got successfully rekeyed (IKEv1 only). + # delete_rekeyed = no + + # Delay in seconds until inbound IPsec SAs are deleted after rekeyings + # (IKEv2 only). + # delete_rekeyed_delay = 5 + + # Use ANSI X9.42 DH exponent size or optimum size matched to cryptographic + # strength. + # dh_exponent_ansi_x9_42 = yes + + # Use RTLD_NOW with dlopen when loading plugins and IMV/IMCs to reveal + # missing symbols immediately. + # dlopen_use_rtld_now = no + + # DNS server assigned to peer via configuration payload (CP). + # dns1 = + + # DNS server assigned to peer via configuration payload (CP). + # dns2 = + + # Enable Denial of Service protection using cookies and aggressiveness + # checks. + # dos_protection = yes + + # Compliance with the errata for RFC 4753. + # ecp_x_coordinate_only = yes + + # Free objects during authentication (might conflict with plugins). + # flush_auth_cfg = no + + # Whether to follow IKEv2 redirects (RFC 5685). + # follow_redirects = yes + + # Maximum size (complete IP datagram size in bytes) of a sent IKE fragment + # when using proprietary IKEv1 or standardized IKEv2 fragmentation, defaults + # to 1280 (use 0 for address family specific default values, which uses a + # lower value for IPv4). If specified this limit is used for both IPv4 and + # IPv6. + # fragment_size = 1280 + + # Name of the group the daemon changes to after startup. + # group = + + # Timeout in seconds for connecting IKE_SAs (also see IKE_SA_INIT DROPPING). + # half_open_timeout = 30 + + # Enable hash and URL support. + # hash_and_url = no + + # Allow IKEv1 Aggressive Mode with pre-shared keys as responder. + # i_dont_care_about_security_and_use_aggressive_mode_psk = no + + # Whether to ignore the traffic selectors from the kernel's acquire events + # for IKEv2 connections (they are not used for IKEv1). + # ignore_acquire_ts = no + + # A space-separated list of routing tables to be excluded from route + # lookups. + # ignore_routing_tables = + + # Maximum number of IKE_SAs that can be established at the same time before + # new connection attempts are blocked. + # ikesa_limit = 0 + + # Number of exclusively locked segments in the hash table. + # ikesa_table_segments = 1 + + # Size of the IKE_SA hash table. + # ikesa_table_size = 1 + + # Whether to close IKE_SA if the only CHILD_SA closed due to inactivity. + # inactivity_close_ike = no + + # Limit new connections based on the current number of half open IKE_SAs, + # see IKE_SA_INIT DROPPING in strongswan.conf(5). + # init_limit_half_open = 0 + + # Limit new connections based on the number of queued jobs. + # init_limit_job_load = 0 + + # Causes charon daemon to ignore IKE initiation requests. + # initiator_only = no + + # Install routes into a separate routing table for established IPsec + # tunnels. + install_routes = no + + # Install virtual IP addresses. + install_virtual_ip = no + + # The name of the interface on which virtual IP addresses should be + # installed. + # install_virtual_ip_on = + + # Check daemon, libstrongswan and plugin integrity at startup. + # integrity_test = no + + # A comma-separated list of network interfaces that should be ignored, if + # interfaces_use is specified this option has no effect. + # interfaces_ignore = + + # A comma-separated list of network interfaces that should be used by + # charon. All other interfaces are ignored. + # interfaces_use = + + # NAT keep alive interval. + # keep_alive = 20s + + # Plugins to load in the IKE daemon charon. + # load = + + # Determine plugins to load via each plugin's load option. + # load_modular = no + + # Initiate IKEv2 reauthentication with a make-before-break scheme. + # make_before_break = no + + # Maximum number of IKEv1 phase 2 exchanges per IKE_SA to keep state about + # and track concurrently. + # max_ikev1_exchanges = 3 + + # Maximum packet size accepted by charon. + # max_packet = 10000 + + # Enable multiple authentication exchanges (RFC 4739). + # multiple_authentication = yes + + # WINS servers assigned to peer via configuration payload (CP). + # nbns1 = + + # WINS servers assigned to peer via configuration payload (CP). + # nbns2 = + + # UDP port used locally. If set to 0 a random port will be allocated. + # port = 500 + + # UDP port used locally in case of NAT-T. If set to 0 a random port will be + # allocated. Has to be different from charon.port, otherwise a random port + # will be allocated. + # port_nat_t = 4500 + + # Whether to prefer updating SAs to the path with the best route. + # prefer_best_path = no + + # Prefer locally configured proposals for IKE/IPsec over supplied ones as + # responder (disabling this can avoid keying retries due to + # INVALID_KE_PAYLOAD notifies). + # prefer_configured_proposals = yes + + # Controls whether permanent or temporary IPv6 addresses are used as source, + # or announced as additional addresses if MOBIKE is used. + # prefer_temporary_addrs = no + + # Process RTM_NEWROUTE and RTM_DELROUTE events. + # process_route = yes + + # How RDNs in subject DNs of certificates are matched against configured + # identities (strict, reordered, or relaxed). + # rdn_matching = strict + + # Delay in ms for receiving packets, to simulate larger RTT. + # receive_delay = 0 + + # Delay request messages. + # receive_delay_request = yes + + # Delay response messages. + # receive_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # receive_delay_type = 0 + + # Size of the AH/ESP replay window, in packets. + # replay_window = 32 + + # Base to use for calculating exponential back off, see IKEv2 RETRANSMISSION + # in strongswan.conf(5). + # retransmit_base = 1.8 + + # Maximum jitter in percent to apply randomly to calculated retransmission + # timeout (0 to disable). + # retransmit_jitter = 0 + + # Upper limit in seconds for calculated retransmission timeout (0 to + # disable). + # retransmit_limit = 0 + + # Timeout in seconds before sending first retransmit. + # retransmit_timeout = 4.0 + + # Number of times to retransmit a packet before giving up. + # retransmit_tries = 5 + + # Interval in seconds to use when retrying to initiate an IKE_SA (e.g. if + # DNS resolution failed), 0 to disable retries. + # retry_initiate_interval = 0 + + # Initiate CHILD_SA within existing IKE_SAs (always enabled for IKEv1). + # reuse_ikesa = yes + + # Numerical routing table to install routes to. + # routing_table = + + # Priority of the routing table. + # routing_table_prio = + + # Whether to use RSA with PSS padding instead of PKCS#1 padding by default. + # rsa_pss = no + + # Delay in ms for sending packets, to simulate larger RTT. + # send_delay = 0 + + # Delay request messages. + # send_delay_request = yes + + # Delay response messages. + # send_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # send_delay_type = 0 + + # Send strongSwan vendor ID payload + # send_vendor_id = no + + # Whether to enable Signature Authentication as per RFC 7427. + # signature_authentication = yes + + # Whether to enable constraints against IKEv2 signature schemes. + # signature_authentication_constraints = yes + + # Value mixed into the local IKE SPIs after applying spi_mask. + # spi_label = 0x0000000000000000 + + # Mask applied to local IKE SPIs before mixing in spi_label (bits set will + # be replaced with spi_label). + # spi_mask = 0x0000000000000000 + + # The upper limit for SPIs requested from the kernel for IPsec SAs. + # spi_max = 0xcfffffff + + # The lower limit for SPIs requested from the kernel for IPsec SAs. + # spi_min = 0xc0000000 + + # Number of worker threads in charon. + # threads = 16 + + # Name of the user the daemon changes to after startup. + # user = + + crypto_test { + + # Benchmark crypto algorithms and order them by efficiency. + # bench = no + + # Buffer size used for crypto benchmark. + # bench_size = 1024 + + # Time in ms during which crypto algorithm performance is measured. + # bench_time = 50 + + # Test crypto algorithms during registration (requires test vectors + # provided by the test-vectors plugin). + # on_add = no + + # Test crypto algorithms on each crypto primitive instantiation. + # on_create = no + + # Strictly require at least one test vector to enable an algorithm. + # required = no + + # Whether to test RNG with TRUE quality; requires a lot of entropy. + # rng_true = no + + } + + host_resolver { + + # Maximum number of concurrent resolver threads (they are terminated if + # unused). + # max_threads = 3 + + # Minimum number of resolver threads to keep around. + # min_threads = 0 + + } + + leak_detective { + + # Includes source file names and line numbers in leak detective output. + # detailed = yes + + # Threshold in bytes for leaks to be reported (0 to report all). + # usage_threshold = 10240 + + # Threshold in number of allocations for leaks to be reported (0 to + # report all). + # usage_threshold_count = 0 + + } + + processor { + + # Section to configure the number of reserved threads per priority class + # see JOB PRIORITY MANAGEMENT in strongswan.conf(5). + priority_threads { + + } + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is started. + start-scripts { + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is terminated. + stop-scripts { + + } + + tls { + + # List of TLS encryption ciphers. + # cipher = + + # List of TLS key exchange methods. + # key_exchange = + + # List of TLS MAC algorithms. + # mac = + + # List of TLS cipher suites. + # suites = + + } + + x509 { + + # Discard certificates with unsupported or unknown critical extensions. + # enforce_critical = yes + + } + +} + diff --git a/cicd/ipsec-e2e/llb1_ipsec_config/ipsec.conf b/cicd/ipsec-e2e/llb1_ipsec_config/ipsec.conf new file mode 100644 index 00000000..785532f9 --- /dev/null +++ b/cicd/ipsec-e2e/llb1_ipsec_config/ipsec.conf @@ -0,0 +1,169 @@ +#@ /etc/strongswan/ipsec.conf (Centos) or /etc/ipsec.conf (Ubuntu) + +# ipsec.conf - strongSwan IPsec configuration file + +# basic configuration + +config setup + charondebug="cfg 2, ike 3" +# strictcrlpolicy=yes +# uniqueids = no + +# Add connections here. + +# Sample VPN connections + +#conn sample-self-signed +# leftsubnet=10.1.0.0/16 +# leftcert=selfCert.der +# leftsendcert=never +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightcert=peerCert.der +# auto=start + +#conn sample-with-ca-cert +# leftsubnet=10.1.0.0/16 +# leftcert=myCert.pem +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightid="C=CH, O=Linux strongSwan CN=peer name" +# auto=start + +conn llb1-to-lgw1 + leftauth=psk + rightauth=psk + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : sha1 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + ike=aes256-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 28800 seconds + ikelifetime=28800s + # Phase 1 Negotiation Mode : main + aggressive=no + # Protocol : esp + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : hmac-sha1-96 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + esp=aes128-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 3600 seconds + lifetime=3600s + # Mode : tunnel + type=tunnel + # DPD Interval : 10 + dpddelay=10s + # DPD Retries : 3 + dpdtimeout=30s + # Tuning Parameters for AWS Virtual Private Gateway: + keyexchange=ikev2 + #keyingtries=%forever + rekey=yes + reauth=no + dpdaction=restart + closeaction=restart + #left=%defaultroute + #leftsubnet=0.0.0.0/0,::/0 + #rightsubnet=0.0.0.0/0,::/0 + leftsubnet=192.168.10.200 + rightsubnet=192.168.10.175 + #leftupdown=/etc/strongswan/ipsec-vti.sh + left=7.7.7.254 + right=7.7.7.1 + installpolicy=yes + compress=no + mobike=no + #VTI Key + mark=100 + auto=start + +conn llb1-to-rgw1 + leftauth=psk + rightauth=psk + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : sha1 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + ike=aes256-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 28800 seconds + ikelifetime=28800s + # Phase 1 Negotiation Mode : main + aggressive=no + # Protocol : esp + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : hmac-sha1-96 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + esp=aes128-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 3600 seconds + lifetime=3600s + # Mode : tunnel + type=tunnel + # DPD Interval : 10 + dpddelay=10s + # DPD Retries : 3 + dpdtimeout=30s + # Tuning Parameters for AWS Virtual Private Gateway: + keyexchange=ikev2 + #keyingtries=%forever + rekey=yes + reauth=no + dpdaction=restart + closeaction=restart + #left=%defaultroute + #leftsubnet=0.0.0.0/0,::/0 + #rightsubnet=0.0.0.0/0,::/0 + leftsubnet=192.168.10.200 + rightsubnet=192.168.10.10 + #leftupdown=/etc/strongswan/ipsec-vti.sh + left=8.7.7.254 + right=8.7.7.1 + installpolicy=yes + compress=no + mobike=no + #VTI Key + mark=200 + auto=start + +conn llb1-to-rgw2 + leftauth=psk + rightauth=psk + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : sha1 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + ike=aes256-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 28800 seconds + ikelifetime=28800s + # Phase 1 Negotiation Mode : main + aggressive=no + # Protocol : esp + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : hmac-sha1-96 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + esp=aes128-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 3600 seconds + lifetime=3600s + # Mode : tunnel + type=tunnel + # DPD Interval : 10 + dpddelay=10s + # DPD Retries : 3 + dpdtimeout=30s + # Tuning Parameters for AWS Virtual Private Gateway: + keyexchange=ikev2 + #keyingtries=%forever + rekey=yes + reauth=no + dpdaction=restart + closeaction=restart + #left=%defaultroute + #leftsubnet=0.0.0.0/0,::/0 + #rightsubnet=0.0.0.0/0,::/0 + leftsubnet=192.168.10.200 + rightsubnet=192.168.10.11 + #leftupdown=/etc/strongswan/ipsec-vti.sh + left=8.7.7.254 + right=8.7.7.2 + installpolicy=yes + compress=no + mobike=no + #VTI Key + mark=201 + auto=start diff --git a/cicd/ipsec-e2e/llb1_ipsec_config/ipsec.secrets b/cicd/ipsec-e2e/llb1_ipsec_config/ipsec.secrets new file mode 100644 index 00000000..ff2e3907 --- /dev/null +++ b/cicd/ipsec-e2e/llb1_ipsec_config/ipsec.secrets @@ -0,0 +1,5 @@ +#@ /etc/strongswan/ipsec.secrets (Centos) or /etc/ipsec.secrets (Ubuntu) + +7.7.7.254 7.7.7.1 : PSK "loxilb@1234!" +8.7.7.254 8.7.7.1 : PSK "loxilb@1234!" +8.7.7.254 8.7.7.2 : PSK "loxilb@1234!" diff --git a/cicd/ipsec-e2e/rgw1_ipsec_config/charon.conf b/cicd/ipsec-e2e/rgw1_ipsec_config/charon.conf new file mode 100644 index 00000000..926ae24a --- /dev/null +++ b/cicd/ipsec-e2e/rgw1_ipsec_config/charon.conf @@ -0,0 +1,376 @@ +# Options for the charon IKE daemon. +charon { + + # Accept unencrypted ID and HASH payloads in IKEv1 Main Mode. + # accept_unencrypted_mainmode_messages = no + + # Maximum number of half-open IKE_SAs for a single peer IP. + # block_threshold = 5 + + # Whether Certificate Revocation Lists (CRLs) fetched via HTTP or LDAP + # should be saved under a unique file name derived from the public key of + # the Certification Authority (CA) to /etc/ipsec.d/crls (stroke) or + # /etc/swanctl/x509crl (vici), respectively. + # cache_crls = no + + # Whether relations in validated certificate chains should be cached in + # memory. + # cert_cache = yes + + # Send Cisco Unity vendor ID payload (IKEv1 only). + # cisco_unity = no + + # Close the IKE_SA if setup of the CHILD_SA along with IKE_AUTH failed. + # close_ike_on_child_failure = no + + # Number of half-open IKE_SAs that activate the cookie mechanism. + # cookie_threshold = 10 + + # Delete CHILD_SAs right after they got successfully rekeyed (IKEv1 only). + # delete_rekeyed = no + + # Delay in seconds until inbound IPsec SAs are deleted after rekeyings + # (IKEv2 only). + # delete_rekeyed_delay = 5 + + # Use ANSI X9.42 DH exponent size or optimum size matched to cryptographic + # strength. + # dh_exponent_ansi_x9_42 = yes + + # Use RTLD_NOW with dlopen when loading plugins and IMV/IMCs to reveal + # missing symbols immediately. + # dlopen_use_rtld_now = no + + # DNS server assigned to peer via configuration payload (CP). + # dns1 = + + # DNS server assigned to peer via configuration payload (CP). + # dns2 = + + # Enable Denial of Service protection using cookies and aggressiveness + # checks. + # dos_protection = yes + + # Compliance with the errata for RFC 4753. + # ecp_x_coordinate_only = yes + + # Free objects during authentication (might conflict with plugins). + # flush_auth_cfg = no + + # Whether to follow IKEv2 redirects (RFC 5685). + # follow_redirects = yes + + # Maximum size (complete IP datagram size in bytes) of a sent IKE fragment + # when using proprietary IKEv1 or standardized IKEv2 fragmentation, defaults + # to 1280 (use 0 for address family specific default values, which uses a + # lower value for IPv4). If specified this limit is used for both IPv4 and + # IPv6. + # fragment_size = 1280 + + # Name of the group the daemon changes to after startup. + # group = + + # Timeout in seconds for connecting IKE_SAs (also see IKE_SA_INIT DROPPING). + # half_open_timeout = 30 + + # Enable hash and URL support. + # hash_and_url = no + + # Allow IKEv1 Aggressive Mode with pre-shared keys as responder. + # i_dont_care_about_security_and_use_aggressive_mode_psk = no + + # Whether to ignore the traffic selectors from the kernel's acquire events + # for IKEv2 connections (they are not used for IKEv1). + # ignore_acquire_ts = no + + # A space-separated list of routing tables to be excluded from route + # lookups. + # ignore_routing_tables = + + # Maximum number of IKE_SAs that can be established at the same time before + # new connection attempts are blocked. + # ikesa_limit = 0 + + # Number of exclusively locked segments in the hash table. + # ikesa_table_segments = 1 + + # Size of the IKE_SA hash table. + # ikesa_table_size = 1 + + # Whether to close IKE_SA if the only CHILD_SA closed due to inactivity. + # inactivity_close_ike = no + + # Limit new connections based on the current number of half open IKE_SAs, + # see IKE_SA_INIT DROPPING in strongswan.conf(5). + # init_limit_half_open = 0 + + # Limit new connections based on the number of queued jobs. + # init_limit_job_load = 0 + + # Causes charon daemon to ignore IKE initiation requests. + # initiator_only = no + + # Install routes into a separate routing table for established IPsec + # tunnels. + install_routes = no + + # Install virtual IP addresses. + install_virtual_ip = no + + # The name of the interface on which virtual IP addresses should be + # installed. + # install_virtual_ip_on = + + # Check daemon, libstrongswan and plugin integrity at startup. + # integrity_test = no + + # A comma-separated list of network interfaces that should be ignored, if + # interfaces_use is specified this option has no effect. + # interfaces_ignore = + + # A comma-separated list of network interfaces that should be used by + # charon. All other interfaces are ignored. + # interfaces_use = + + # NAT keep alive interval. + # keep_alive = 20s + + # Plugins to load in the IKE daemon charon. + # load = + + # Determine plugins to load via each plugin's load option. + # load_modular = no + + # Initiate IKEv2 reauthentication with a make-before-break scheme. + # make_before_break = no + + # Maximum number of IKEv1 phase 2 exchanges per IKE_SA to keep state about + # and track concurrently. + # max_ikev1_exchanges = 3 + + # Maximum packet size accepted by charon. + # max_packet = 10000 + + # Enable multiple authentication exchanges (RFC 4739). + # multiple_authentication = yes + + # WINS servers assigned to peer via configuration payload (CP). + # nbns1 = + + # WINS servers assigned to peer via configuration payload (CP). + # nbns2 = + + # UDP port used locally. If set to 0 a random port will be allocated. + # port = 500 + + # UDP port used locally in case of NAT-T. If set to 0 a random port will be + # allocated. Has to be different from charon.port, otherwise a random port + # will be allocated. + # port_nat_t = 4500 + + # Whether to prefer updating SAs to the path with the best route. + # prefer_best_path = no + + # Prefer locally configured proposals for IKE/IPsec over supplied ones as + # responder (disabling this can avoid keying retries due to + # INVALID_KE_PAYLOAD notifies). + # prefer_configured_proposals = yes + + # Controls whether permanent or temporary IPv6 addresses are used as source, + # or announced as additional addresses if MOBIKE is used. + # prefer_temporary_addrs = no + + # Process RTM_NEWROUTE and RTM_DELROUTE events. + # process_route = yes + + # How RDNs in subject DNs of certificates are matched against configured + # identities (strict, reordered, or relaxed). + # rdn_matching = strict + + # Delay in ms for receiving packets, to simulate larger RTT. + # receive_delay = 0 + + # Delay request messages. + # receive_delay_request = yes + + # Delay response messages. + # receive_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # receive_delay_type = 0 + + # Size of the AH/ESP replay window, in packets. + # replay_window = 32 + + # Base to use for calculating exponential back off, see IKEv2 RETRANSMISSION + # in strongswan.conf(5). + # retransmit_base = 1.8 + + # Maximum jitter in percent to apply randomly to calculated retransmission + # timeout (0 to disable). + # retransmit_jitter = 0 + + # Upper limit in seconds for calculated retransmission timeout (0 to + # disable). + # retransmit_limit = 0 + + # Timeout in seconds before sending first retransmit. + # retransmit_timeout = 4.0 + + # Number of times to retransmit a packet before giving up. + # retransmit_tries = 5 + + # Interval in seconds to use when retrying to initiate an IKE_SA (e.g. if + # DNS resolution failed), 0 to disable retries. + # retry_initiate_interval = 0 + + # Initiate CHILD_SA within existing IKE_SAs (always enabled for IKEv1). + # reuse_ikesa = yes + + # Numerical routing table to install routes to. + # routing_table = + + # Priority of the routing table. + # routing_table_prio = + + # Whether to use RSA with PSS padding instead of PKCS#1 padding by default. + # rsa_pss = no + + # Delay in ms for sending packets, to simulate larger RTT. + # send_delay = 0 + + # Delay request messages. + # send_delay_request = yes + + # Delay response messages. + # send_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # send_delay_type = 0 + + # Send strongSwan vendor ID payload + # send_vendor_id = no + + # Whether to enable Signature Authentication as per RFC 7427. + # signature_authentication = yes + + # Whether to enable constraints against IKEv2 signature schemes. + # signature_authentication_constraints = yes + + # Value mixed into the local IKE SPIs after applying spi_mask. + # spi_label = 0x0000000000000000 + + # Mask applied to local IKE SPIs before mixing in spi_label (bits set will + # be replaced with spi_label). + # spi_mask = 0x0000000000000000 + + # The upper limit for SPIs requested from the kernel for IPsec SAs. + # spi_max = 0xcfffffff + + # The lower limit for SPIs requested from the kernel for IPsec SAs. + # spi_min = 0xc0000000 + + # Number of worker threads in charon. + # threads = 16 + + # Name of the user the daemon changes to after startup. + # user = + + crypto_test { + + # Benchmark crypto algorithms and order them by efficiency. + # bench = no + + # Buffer size used for crypto benchmark. + # bench_size = 1024 + + # Time in ms during which crypto algorithm performance is measured. + # bench_time = 50 + + # Test crypto algorithms during registration (requires test vectors + # provided by the test-vectors plugin). + # on_add = no + + # Test crypto algorithms on each crypto primitive instantiation. + # on_create = no + + # Strictly require at least one test vector to enable an algorithm. + # required = no + + # Whether to test RNG with TRUE quality; requires a lot of entropy. + # rng_true = no + + } + + host_resolver { + + # Maximum number of concurrent resolver threads (they are terminated if + # unused). + # max_threads = 3 + + # Minimum number of resolver threads to keep around. + # min_threads = 0 + + } + + leak_detective { + + # Includes source file names and line numbers in leak detective output. + # detailed = yes + + # Threshold in bytes for leaks to be reported (0 to report all). + # usage_threshold = 10240 + + # Threshold in number of allocations for leaks to be reported (0 to + # report all). + # usage_threshold_count = 0 + + } + + processor { + + # Section to configure the number of reserved threads per priority class + # see JOB PRIORITY MANAGEMENT in strongswan.conf(5). + priority_threads { + + } + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is started. + start-scripts { + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is terminated. + stop-scripts { + + } + + tls { + + # List of TLS encryption ciphers. + # cipher = + + # List of TLS key exchange methods. + # key_exchange = + + # List of TLS MAC algorithms. + # mac = + + # List of TLS cipher suites. + # suites = + + } + + x509 { + + # Discard certificates with unsupported or unknown critical extensions. + # enforce_critical = yes + + } + +} + diff --git a/cicd/ipsec-e2e/rgw1_ipsec_config/ipsec.conf b/cicd/ipsec-e2e/rgw1_ipsec_config/ipsec.conf new file mode 100644 index 00000000..43da92c5 --- /dev/null +++ b/cicd/ipsec-e2e/rgw1_ipsec_config/ipsec.conf @@ -0,0 +1,76 @@ +#@ /etc/strongswan/ipsec.conf (Centos) or /etc/ipsec.conf (Ubuntu) + +# ipsec.conf - strongSwan IPsec configuration file + +# basic configuration + +config setup + charondebug="cfg 2, ike 3" +# strictcrlpolicy=yes +# uniqueids = no + +# Add connections here. + +# Sample VPN connections + +#conn sample-self-signed +# leftsubnet=10.1.0.0/16 +# leftcert=selfCert.der +# leftsendcert=never +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightcert=peerCert.der +# auto=start + +#conn sample-with-ca-cert +# leftsubnet=10.1.0.0/16 +# leftcert=myCert.pem +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightid="C=CH, O=Linux strongSwan CN=peer name" +# auto=start + + +conn rgw1-to-llb1 + leftauth=psk + rightauth=psk + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : sha1 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + ike=aes256-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 28800 seconds + ikelifetime=28800s + # Phase 1 Negotiation Mode : main + aggressive=no + # Protocol : esp + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : hmac-sha1-96 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + esp=aes128-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 3600 seconds + lifetime=3600s + # Mode : tunnel + type=tunnel + # DPD Interval : 10 + dpddelay=10s + # DPD Retries : 3 + dpdtimeout=30s + # Tuning Parameters for AWS Virtual Private Gateway: + keyexchange=ikev2 + #keyingtries=%forever + rekey=yes + reauth=no + dpdaction=restart + closeaction=restart + #left=%defaultroute + leftsubnet=192.168.10.10 + rightsubnet=192.168.10.200 + #leftupdown=/etc/strongswan/ipsec-vti.sh + left=8.7.7.1 + right=8.7.7.254 + installpolicy=yes + compress=no + mobike=no + #VTI Key + mark=200 + auto=start diff --git a/cicd/ipsec-e2e/rgw1_ipsec_config/ipsec.secrets b/cicd/ipsec-e2e/rgw1_ipsec_config/ipsec.secrets new file mode 100644 index 00000000..4e71446d --- /dev/null +++ b/cicd/ipsec-e2e/rgw1_ipsec_config/ipsec.secrets @@ -0,0 +1,3 @@ +#@ /etc/strongswan/ipsec.secrets (Centos) or /etc/ipsec.secrets (Ubuntu) + +8.7.7.1 8.7.7.254 : PSK "loxilb@1234!" diff --git a/cicd/ipsec-e2e/rgw2_ipsec_config/charon.conf b/cicd/ipsec-e2e/rgw2_ipsec_config/charon.conf new file mode 100644 index 00000000..926ae24a --- /dev/null +++ b/cicd/ipsec-e2e/rgw2_ipsec_config/charon.conf @@ -0,0 +1,376 @@ +# Options for the charon IKE daemon. +charon { + + # Accept unencrypted ID and HASH payloads in IKEv1 Main Mode. + # accept_unencrypted_mainmode_messages = no + + # Maximum number of half-open IKE_SAs for a single peer IP. + # block_threshold = 5 + + # Whether Certificate Revocation Lists (CRLs) fetched via HTTP or LDAP + # should be saved under a unique file name derived from the public key of + # the Certification Authority (CA) to /etc/ipsec.d/crls (stroke) or + # /etc/swanctl/x509crl (vici), respectively. + # cache_crls = no + + # Whether relations in validated certificate chains should be cached in + # memory. + # cert_cache = yes + + # Send Cisco Unity vendor ID payload (IKEv1 only). + # cisco_unity = no + + # Close the IKE_SA if setup of the CHILD_SA along with IKE_AUTH failed. + # close_ike_on_child_failure = no + + # Number of half-open IKE_SAs that activate the cookie mechanism. + # cookie_threshold = 10 + + # Delete CHILD_SAs right after they got successfully rekeyed (IKEv1 only). + # delete_rekeyed = no + + # Delay in seconds until inbound IPsec SAs are deleted after rekeyings + # (IKEv2 only). + # delete_rekeyed_delay = 5 + + # Use ANSI X9.42 DH exponent size or optimum size matched to cryptographic + # strength. + # dh_exponent_ansi_x9_42 = yes + + # Use RTLD_NOW with dlopen when loading plugins and IMV/IMCs to reveal + # missing symbols immediately. + # dlopen_use_rtld_now = no + + # DNS server assigned to peer via configuration payload (CP). + # dns1 = + + # DNS server assigned to peer via configuration payload (CP). + # dns2 = + + # Enable Denial of Service protection using cookies and aggressiveness + # checks. + # dos_protection = yes + + # Compliance with the errata for RFC 4753. + # ecp_x_coordinate_only = yes + + # Free objects during authentication (might conflict with plugins). + # flush_auth_cfg = no + + # Whether to follow IKEv2 redirects (RFC 5685). + # follow_redirects = yes + + # Maximum size (complete IP datagram size in bytes) of a sent IKE fragment + # when using proprietary IKEv1 or standardized IKEv2 fragmentation, defaults + # to 1280 (use 0 for address family specific default values, which uses a + # lower value for IPv4). If specified this limit is used for both IPv4 and + # IPv6. + # fragment_size = 1280 + + # Name of the group the daemon changes to after startup. + # group = + + # Timeout in seconds for connecting IKE_SAs (also see IKE_SA_INIT DROPPING). + # half_open_timeout = 30 + + # Enable hash and URL support. + # hash_and_url = no + + # Allow IKEv1 Aggressive Mode with pre-shared keys as responder. + # i_dont_care_about_security_and_use_aggressive_mode_psk = no + + # Whether to ignore the traffic selectors from the kernel's acquire events + # for IKEv2 connections (they are not used for IKEv1). + # ignore_acquire_ts = no + + # A space-separated list of routing tables to be excluded from route + # lookups. + # ignore_routing_tables = + + # Maximum number of IKE_SAs that can be established at the same time before + # new connection attempts are blocked. + # ikesa_limit = 0 + + # Number of exclusively locked segments in the hash table. + # ikesa_table_segments = 1 + + # Size of the IKE_SA hash table. + # ikesa_table_size = 1 + + # Whether to close IKE_SA if the only CHILD_SA closed due to inactivity. + # inactivity_close_ike = no + + # Limit new connections based on the current number of half open IKE_SAs, + # see IKE_SA_INIT DROPPING in strongswan.conf(5). + # init_limit_half_open = 0 + + # Limit new connections based on the number of queued jobs. + # init_limit_job_load = 0 + + # Causes charon daemon to ignore IKE initiation requests. + # initiator_only = no + + # Install routes into a separate routing table for established IPsec + # tunnels. + install_routes = no + + # Install virtual IP addresses. + install_virtual_ip = no + + # The name of the interface on which virtual IP addresses should be + # installed. + # install_virtual_ip_on = + + # Check daemon, libstrongswan and plugin integrity at startup. + # integrity_test = no + + # A comma-separated list of network interfaces that should be ignored, if + # interfaces_use is specified this option has no effect. + # interfaces_ignore = + + # A comma-separated list of network interfaces that should be used by + # charon. All other interfaces are ignored. + # interfaces_use = + + # NAT keep alive interval. + # keep_alive = 20s + + # Plugins to load in the IKE daemon charon. + # load = + + # Determine plugins to load via each plugin's load option. + # load_modular = no + + # Initiate IKEv2 reauthentication with a make-before-break scheme. + # make_before_break = no + + # Maximum number of IKEv1 phase 2 exchanges per IKE_SA to keep state about + # and track concurrently. + # max_ikev1_exchanges = 3 + + # Maximum packet size accepted by charon. + # max_packet = 10000 + + # Enable multiple authentication exchanges (RFC 4739). + # multiple_authentication = yes + + # WINS servers assigned to peer via configuration payload (CP). + # nbns1 = + + # WINS servers assigned to peer via configuration payload (CP). + # nbns2 = + + # UDP port used locally. If set to 0 a random port will be allocated. + # port = 500 + + # UDP port used locally in case of NAT-T. If set to 0 a random port will be + # allocated. Has to be different from charon.port, otherwise a random port + # will be allocated. + # port_nat_t = 4500 + + # Whether to prefer updating SAs to the path with the best route. + # prefer_best_path = no + + # Prefer locally configured proposals for IKE/IPsec over supplied ones as + # responder (disabling this can avoid keying retries due to + # INVALID_KE_PAYLOAD notifies). + # prefer_configured_proposals = yes + + # Controls whether permanent or temporary IPv6 addresses are used as source, + # or announced as additional addresses if MOBIKE is used. + # prefer_temporary_addrs = no + + # Process RTM_NEWROUTE and RTM_DELROUTE events. + # process_route = yes + + # How RDNs in subject DNs of certificates are matched against configured + # identities (strict, reordered, or relaxed). + # rdn_matching = strict + + # Delay in ms for receiving packets, to simulate larger RTT. + # receive_delay = 0 + + # Delay request messages. + # receive_delay_request = yes + + # Delay response messages. + # receive_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # receive_delay_type = 0 + + # Size of the AH/ESP replay window, in packets. + # replay_window = 32 + + # Base to use for calculating exponential back off, see IKEv2 RETRANSMISSION + # in strongswan.conf(5). + # retransmit_base = 1.8 + + # Maximum jitter in percent to apply randomly to calculated retransmission + # timeout (0 to disable). + # retransmit_jitter = 0 + + # Upper limit in seconds for calculated retransmission timeout (0 to + # disable). + # retransmit_limit = 0 + + # Timeout in seconds before sending first retransmit. + # retransmit_timeout = 4.0 + + # Number of times to retransmit a packet before giving up. + # retransmit_tries = 5 + + # Interval in seconds to use when retrying to initiate an IKE_SA (e.g. if + # DNS resolution failed), 0 to disable retries. + # retry_initiate_interval = 0 + + # Initiate CHILD_SA within existing IKE_SAs (always enabled for IKEv1). + # reuse_ikesa = yes + + # Numerical routing table to install routes to. + # routing_table = + + # Priority of the routing table. + # routing_table_prio = + + # Whether to use RSA with PSS padding instead of PKCS#1 padding by default. + # rsa_pss = no + + # Delay in ms for sending packets, to simulate larger RTT. + # send_delay = 0 + + # Delay request messages. + # send_delay_request = yes + + # Delay response messages. + # send_delay_response = yes + + # Specific IKEv2 message type to delay, 0 for any. + # send_delay_type = 0 + + # Send strongSwan vendor ID payload + # send_vendor_id = no + + # Whether to enable Signature Authentication as per RFC 7427. + # signature_authentication = yes + + # Whether to enable constraints against IKEv2 signature schemes. + # signature_authentication_constraints = yes + + # Value mixed into the local IKE SPIs after applying spi_mask. + # spi_label = 0x0000000000000000 + + # Mask applied to local IKE SPIs before mixing in spi_label (bits set will + # be replaced with spi_label). + # spi_mask = 0x0000000000000000 + + # The upper limit for SPIs requested from the kernel for IPsec SAs. + # spi_max = 0xcfffffff + + # The lower limit for SPIs requested from the kernel for IPsec SAs. + # spi_min = 0xc0000000 + + # Number of worker threads in charon. + # threads = 16 + + # Name of the user the daemon changes to after startup. + # user = + + crypto_test { + + # Benchmark crypto algorithms and order them by efficiency. + # bench = no + + # Buffer size used for crypto benchmark. + # bench_size = 1024 + + # Time in ms during which crypto algorithm performance is measured. + # bench_time = 50 + + # Test crypto algorithms during registration (requires test vectors + # provided by the test-vectors plugin). + # on_add = no + + # Test crypto algorithms on each crypto primitive instantiation. + # on_create = no + + # Strictly require at least one test vector to enable an algorithm. + # required = no + + # Whether to test RNG with TRUE quality; requires a lot of entropy. + # rng_true = no + + } + + host_resolver { + + # Maximum number of concurrent resolver threads (they are terminated if + # unused). + # max_threads = 3 + + # Minimum number of resolver threads to keep around. + # min_threads = 0 + + } + + leak_detective { + + # Includes source file names and line numbers in leak detective output. + # detailed = yes + + # Threshold in bytes for leaks to be reported (0 to report all). + # usage_threshold = 10240 + + # Threshold in number of allocations for leaks to be reported (0 to + # report all). + # usage_threshold_count = 0 + + } + + processor { + + # Section to configure the number of reserved threads per priority class + # see JOB PRIORITY MANAGEMENT in strongswan.conf(5). + priority_threads { + + } + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is started. + start-scripts { + + } + + # Section containing a list of scripts (name = path) that are executed when + # the daemon is terminated. + stop-scripts { + + } + + tls { + + # List of TLS encryption ciphers. + # cipher = + + # List of TLS key exchange methods. + # key_exchange = + + # List of TLS MAC algorithms. + # mac = + + # List of TLS cipher suites. + # suites = + + } + + x509 { + + # Discard certificates with unsupported or unknown critical extensions. + # enforce_critical = yes + + } + +} + diff --git a/cicd/ipsec-e2e/rgw2_ipsec_config/ipsec.conf b/cicd/ipsec-e2e/rgw2_ipsec_config/ipsec.conf new file mode 100644 index 00000000..f173a905 --- /dev/null +++ b/cicd/ipsec-e2e/rgw2_ipsec_config/ipsec.conf @@ -0,0 +1,76 @@ +#@ /etc/strongswan/ipsec.conf (Centos) or /etc/ipsec.conf (Ubuntu) + +# ipsec.conf - strongSwan IPsec configuration file + +# basic configuration + +config setup + charondebug="cfg 2, ike 3" +# strictcrlpolicy=yes +# uniqueids = no + +# Add connections here. + +# Sample VPN connections + +#conn sample-self-signed +# leftsubnet=10.1.0.0/16 +# leftcert=selfCert.der +# leftsendcert=never +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightcert=peerCert.der +# auto=start + +#conn sample-with-ca-cert +# leftsubnet=10.1.0.0/16 +# leftcert=myCert.pem +# right=192.168.0.2 +# rightsubnet=10.2.0.0/16 +# rightid="C=CH, O=Linux strongSwan CN=peer name" +# auto=start + + +conn rgw2-to-llb1 + leftauth=psk + rightauth=psk + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : sha1 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + ike=aes256-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 28800 seconds + ikelifetime=28800s + # Phase 1 Negotiation Mode : main + aggressive=no + # Protocol : esp + # Encryption Algorithm : aes-128-cbc + # Authentication Algorithm : hmac-sha1-96 + # Perfect Forward Secrecy : Diffie-Hellman Group 2 + esp=aes128-sha256-modp2048s256,aes128-sha1-modp1024! + # Lifetime : 3600 seconds + lifetime=3600s + # Mode : tunnel + type=tunnel + # DPD Interval : 10 + dpddelay=10s + # DPD Retries : 3 + dpdtimeout=30s + # Tuning Parameters for AWS Virtual Private Gateway: + keyexchange=ikev2 + #keyingtries=%forever + rekey=yes + reauth=no + dpdaction=restart + closeaction=restart + #left=%defaultroute + leftsubnet=192.168.10.11 + rightsubnet=192.168.10.200 + #leftupdown=/etc/strongswan/ipsec-vti.sh + left=8.7.7.2 + right=8.7.7.254 + installpolicy=yes + compress=no + mobike=no + #VTI Key + mark=201 + auto=start diff --git a/cicd/ipsec-e2e/rgw2_ipsec_config/ipsec.secrets b/cicd/ipsec-e2e/rgw2_ipsec_config/ipsec.secrets new file mode 100644 index 00000000..b24a6b9b --- /dev/null +++ b/cicd/ipsec-e2e/rgw2_ipsec_config/ipsec.secrets @@ -0,0 +1,3 @@ +#@ /etc/strongswan/ipsec.secrets (Centos) or /etc/ipsec.secrets (Ubuntu) + +8.7.7.2 8.7.7.254 : PSK "loxilb@1234!" diff --git a/cicd/ipsec-e2e/rmconfig.sh b/cicd/ipsec-e2e/rmconfig.sh new file mode 100755 index 00000000..a7406048 --- /dev/null +++ b/cicd/ipsec-e2e/rmconfig.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +source ../common.sh + +disconnect_docker_hosts lh1 lgw1 +disconnect_docker_hosts lgw1 llb1 +disconnect_docker_hosts llb1 rgw1 +disconnect_docker_hosts llb1 rgw2 +disconnect_docker_hosts rgw1 rh1 +disconnect_docker_hosts rgw2 rh2 + +delete_docker_host llb1 +delete_docker_host lgw1 +delete_docker_host rgw1 +delete_docker_host rgw2 +delete_docker_host lh1 +delete_docker_host rh1 +delete_docker_host rh2 + +echo "#########################################" +echo "Deleted testbed" +echo "#########################################" diff --git a/cicd/ipsec-e2e/validation.sh b/cicd/ipsec-e2e/validation.sh new file mode 100755 index 00000000..e28b28d1 --- /dev/null +++ b/cicd/ipsec-e2e/validation.sh @@ -0,0 +1,61 @@ +#!/bin/bash +source ../common.sh +echo IPSEC-e2e +$hexec rh1 node ../common/tcp_server.js server1 & +$hexec rh2 node ../common/tcp_server.js server2 & + +sleep 2 +lgw1_rx1=`$hexec lgw1 ifconfig vti100 | grep "RX packets" | cut -d " " -f 11` +lgw1_tx1=`$hexec lgw1 ifconfig vti100 | grep "TX packets" | cut -d " " -f 11` +llb1_rx1=`$hexec llb1 ifconfig vti100 | grep "RX packets" | cut -d " " -f 11` +llb1_tx1=`$hexec llb1 ifconfig vti100 | grep "TX packets" | cut -d " " -f 11` +llb1_rx2=`$hexec llb1 ifconfig vti200 | grep "RX packets" | cut -d " " -f 11` +llb1_tx2=`$hexec llb1 ifconfig vti200 | grep "TX packets" | cut -d " " -f 11` +llb1_rx3=`$hexec llb1 ifconfig vti201 | grep "RX packets" | cut -d " " -f 11` +llb1_tx3=`$hexec llb1 ifconfig vti201 | grep "TX packets" | cut -d " " -f 11` + +rgw1_rx1=`$hexec rgw1 ifconfig vti200 | grep "RX packets" | cut -d " " -f 11` +rgw1_tx1=`$hexec rgw1 ifconfig vti200 | grep "TX packets" | cut -d " " -f 11` +rgw2_rx1=`$hexec rgw2 ifconfig vti201 | grep "RX packets" | cut -d " " -f 11` +rgw2_tx1=`$hexec rgw2 ifconfig vti201 | grep "TX packets" | cut -d " " -f 11` + +code=0 +servArr=( "server1" "server2" ) +vip=( "192.168.10.200" ) + +for j in {0..3} +do +for i in {0..1} +do + res=`$hexec lh1 curl --max-time 10 -s http://${vip[0]}:2020` + echo -e $res + if [[ "x$res" != "x${servArr[$i]}" ]] + then + echo -e "Expected ${servArr[$i]}, Received : $res" + if [[ "$res" != *"server"* ]]; + then + echo "lgw1 ct" + $dexec lgw1 loxicmd get ct + echo "llb1 ct" + $dexec llb1 loxicmd get ct + echo "rgw1 ct" + $dexec rgw1 loxicmd get ct + echo "rgw2 ct" + $dexec rgw2 loxicmd get ct + echo "llb1 ip neigh" + $dexec llb1 ip neigh + fi + code=1 + fi + sleep 1 +done +done +if [[ $code == 0 ]] +then + echo IPSEC-3 [OK] +else + echo IPSEC-3 [FAILED] +fi +sudo pkill node +exit $code + diff --git a/cicd/k0s-incluster/common.sh b/cicd/k0s-incluster/common.sh index b82f6e0e..ea119602 100755 --- a/cicd/k0s-incluster/common.sh +++ b/cicd/k0s-incluster/common.sh @@ -536,8 +536,8 @@ function create_lb_rule() { echo "$1: loxicmd create lb ${args[*]}" $dexec $1 loxicmd create lb ${args[*]} - hook=$($dexec llb1 ntc filter show dev eth0 ingress | grep tc_packet_hook) - if [[ $hook != *"tc_packet_hook"* ]]; then + hook=$($dexec llb1 tc filter show dev eth0 ingress | grep tc_packet_func) + if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; exit 1 fi diff --git a/cicd/k3s-flannel-loxilb-ingress/Vagrantfile b/cicd/k3s-flannel-loxilb-ingress/Vagrantfile new file mode 100644 index 00000000..69cb4c0b --- /dev/null +++ b/cicd/k3s-flannel-loxilb-ingress/Vagrantfile @@ -0,0 +1,39 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +workers = (ENV['WORKERS'] || "1").to_i +#box_name = (ENV['VAGRANT_BOX'] || "ubuntu/focal64") +box_name = (ENV['VAGRANT_BOX'] || "sysnet4admin/Ubuntu-k8s") +box_version = "0.7.1" +Vagrant.configure("2") do |config| + config.vm.box = "#{box_name}" + config.vm.box_version = "#{box_version}" + + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.auto_update = false + end + + config.vm.define "loxilb" do |loxilb| + loxilb.vm.hostname = 'llb1' + #loxilb.vm.network "forwarded_port", guest: 55002, host: 5502, protocol: "tcp" + loxilb.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" + loxilb.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0" + loxilb.vm.provision :shell, :path => "loxilb.sh" + loxilb.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 6000] + vbox.customize ["modifyvm", :id, "--cpus", 4] + end + end + + + config.vm.define "master" do |master| + master.vm.hostname = 'master' + master.vm.network :private_network, ip: "192.168.80.10", :netmask => "255.255.255.0" + master.vm.provision :shell, :path => "master.sh" + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 8192] + vbox.customize ["modifyvm", :id, "--cpus", 4] + end + end + +end diff --git a/cicd/k3s-flannel-loxilb-ingress/config.sh b/cicd/k3s-flannel-loxilb-ingress/config.sh new file mode 100755 index 00000000..6b8ee48e --- /dev/null +++ b/cicd/k3s-flannel-loxilb-ingress/config.sh @@ -0,0 +1,3 @@ +#!/bin/bash +vagrant global-status | grep -i virtualbox | cut -f 1 -d ' ' | xargs -L 1 vagrant destroy -f +vagrant up diff --git a/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-deploy.yml b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-deploy.yml new file mode 100644 index 00000000..457cf9cd --- /dev/null +++ b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-deploy.yml @@ -0,0 +1,266 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/instance: loxilb-ingress + app.kubernetes.io/name: loxilb-ingress + name: loxilb-ingress +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: loxilb-ingress + app.kubernetes.io/name: loxilb-ingress + name: loxilb-ingress + namespace: kube-system +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: loxilb-ingress + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: loxilb-ingress + app.kubernetes.io/name: loxilb-ingress + name: loxilb-ingress +rules: +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + - extensions + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update +- apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: loxilb-ingress + app.kubernetes.io/name: loxilb-ingress + name: loxilb-ingress + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: loxilb-ingress +subjects: +- kind: ServiceAccount + name: loxilb-ingress + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: loxilb-ingress + app.kubernetes.io/name: loxilb-ingress + name: loxilb-ingress +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: loxilb-ingress +subjects: +- kind: ServiceAccount + name: loxilb-ingress + namespace: kube-system +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-ingress + namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-ingress + app.kubernetes.io/instance: loxilb-ingress + app.kubernetes.io/name: loxilb-ingress + template: + metadata: + name: loxilb-ingress + labels: + app: loxilb-ingress + app.kubernetes.io/instance: loxilb-ingress + app.kubernetes.io/name: loxilb-ingress + spec: + #hostNetwork: true + #dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: loxilb-ingress + containers: + - name: loxilb-ingress + volumeMounts: + - mountPath: "/opt/loxilb/cert/" + name: loxilb-ssl + image: "ghcr.io/loxilb-io/loxilb-ingress:latest" + imagePullPolicy: Always + command: [ "/bin/loxilb-ingress" ] + ports: + - containerPort: 11111 + volumes: + - name: loxilb-ssl + secret: + secretName: loxilb-ssl diff --git a/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-svc.yml b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-svc.yml new file mode 100644 index 00000000..698bd2aa --- /dev/null +++ b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-svc.yml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: loxilb-ingress-manager + namespace: kube-system + annotations: + loxilb.io/lbmode: "onearm" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + app.kubernetes.io/instance: loxilb-ingress + app.kubernetes.io/name: loxilb-ingress + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + type: LoadBalancer diff --git a/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress.yml b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress.yml new file mode 100644 index 00000000..1808b48d --- /dev/null +++ b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress.yml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: site +spec: + replicas: 1 + selector: + matchLabels: + name: site-handler + template: + metadata: + labels: + name: site-handler + spec: + containers: + - name: blog + image: ghcr.io/loxilb-io/nginx:stable + imagePullPolicy: Always + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: site-handler-service +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: site-handler +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: site-loxilb-ingress +spec: + ingressClassName: loxilb + tls: + - hosts: + - loxilb.io + secretName: loxilb-ssl + rules: + - host: loxilb.io + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: site-handler-service + port: + number: 80 diff --git a/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-secret.yml b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-secret.yml new file mode 100644 index 00000000..73d69182 --- /dev/null +++ b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-secret.yml @@ -0,0 +1,10 @@ +apiVersion: v1 +data: + server.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZDVENDQXZHZ0F3SUJBZ0lVSENPekxWNlRFeVg2cjIxNllycFlOWWZOY2Zzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZERVNNQkFHQTFVRUF3d0piRzk0YVd4aUxtbHZNQjRYRFRJME1EZ3dPREEyTXpNeE5Gb1hEVEkxTURndwpPREEyTXpNeE5Gb3dGREVTTUJBR0ExVUVBd3dKYkc5NGFXeGlMbWx2TUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGCkFBT0NBZzhBTUlJQ0NnS0NBZ0VBb2hMNWgxSlFFVVlpRExvR0hzdDNmM3ZrWkcyMWU1LzM2Rml1WDhsa1pOTkwKZUlUZmUzR1E0ci96K253N0oxSXdlc2VHdkkyZW5FNWtLYVdsZHhpekNEd2JxS21GRk1EMk1zQklEUlRJb2d4NgpOak1YUFlqQ1VxUlhVODJwNzhUa1Bvd1FqdllhcExiZ3REcWdiWWxMZC95VUg5aWdmcHo5VFY2d2grQlMvUDJwCnc1MUMrckRIUHdSM0JNL2hGNUtpeVZway9GNmJNQjZRSFE2bGk5SmR4ZEVNSGtDRXhPWUo1R01kVkEvRmUzODMKbTNwK2JucVd2OXdLTXF0d29LVVVEOFJ0TmdkUXJxSmp0elV3YXRmT0VkY3ptTG1uVXg2VjgyMk9weFhQeG4vSApiSmxUcy8vblRrV0FCWmFEVGFqQ2FnZUpCQnZ4Rk1Eci9mVUdQWlRPRUdEZkxxaE9HM0g4UDJmMGp1OHFpMUJ0Cnp0ODBmT2N2eElLME8veWJnemRINjB6YXJFcEZFRjFEcGF3a0hGWmZHYmVTdnpUeTZSVm0zWWxRRjc2NFZHTDQKSCtMMFFEcVI2Zm0veHoxaEhLbER6dFA2VUV1MjExUUc4RDUvQ1ZUVzdQQUIrMkRWbk1vN0JqRzYrVG55Z0ZqNApOUXZEaW9VQ1NwZzdRT2g2RWw0UjgwVHR1Vmo5bEsvbnVIR08yQ0hwclhnTzUyeWgrZzNzOUJVeXQ5eXVQQVFxCkhIeWsyT3hIc1dURnFTQ3VaNHo0NVI0NWFTcmNDd2t6N09xbWlVWTUrenduT1F5WlN3Y2JzUVhzbVNEUm9KUVcKR2lwSUp0VVZjcnZSbWIzWkFnRDVNdlVQRXpEYjVTME5La1lqczNvWnVBZXVjSGJSS080RkFMRlRDYi9JR2E4QwpBd0VBQWFOVE1GRXdIUVlEVlIwT0JCWUVGTDRJZFpKNE9Obk53cVVmdFhCR25STndwN0V0TUI4R0ExVWRJd1FZCk1CYUFGTDRJZFpKNE9Obk53cVVmdFhCR25STndwN0V0TUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dJQkFFWHg0Q3pUMnNTOCtMVjRHc0I2bDFtRWM5UFJiT205Q1pQK3M2MzQ5QVd5MXh3cwppUmlaYVJVcEJ2Skk1MHZYMjhaeWJRVXpPdVg3ZGE5bFIzWEhLM01QdHlVUk5NYkcySzlmbzVZQ1RqV3lTZEg1CnpJQm1RZVV0cXE1OVMwdytSQytUcC9xd3lveUlUajk2dGU0YmdheGhUV2RtazZUNFZjWkgwY1NQU0hEMG9DeFkKcHJDOVdnQ21kQ3JOWWQ0T2pxaUhwOERhWHFybGhmbWZXdThHaFJlcVNmL1pEOTBrSUw3aEx0OHBXYXRpQnZ3UAowRmtGMjNWcFBwZ0s0MElKM1NBcllSWXlIUllKaDNLK1QzZ2RQY0pOdUloaENrRE1YNUtKdlI1QXdUdWpEL1lKCjNTTVRzL1F0SnZScDd0Q0kxM1lwZXFiaHFoQnBtdzdVWFpSUnh4WURiNHU2L25oZUZkMS9NNjdsYTJtUmpvZlIKUDQxc2pRa1lQSkhsY2hVMHRkQnRjN203bVkrdFo1U2h4bklKZnFBS1pqTEpEZUJyYlhrS2hjNms4NFpBM09vQwpCajl1U3V1RERlUUJ0VDlYUHppOVZaV2pVWis2Zk42QlB0RHVRa0x4V2xacHN0TXJIUEhia1gvVXhmU2NuZEpiCkw0ZXBhTVVqUDJDWnd2NGFraUxjZmQzVXEwaENQZzVZNTNOL1cyWlJ2Y204aGlpcXptaDIyeUxMYUZEQXBOaGEKZitXdUNxNU1HQ2Rib3U1Wnk4TXRoaXZwRnhEUXAzWkh4RktXTGw3VGZpR0hRYXZVK0ZnUVdQUFUrOVVmYksyZQpQYmRSSmxoaVE1Z09VbHBWT1V6bGVxR3lhVldDRHpuQ3JlVWFVcnNzbkNZejdzQmNORTViYUl4dlptUmkKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRZ0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1N3d2dna29BZ0VBQW9JQ0FRQ2lFdm1IVWxBUlJpSU0KdWdZZXkzZC9lK1JrYmJWN24vZm9XSzVmeVdSazAwdDRoTjk3Y1pEaXYvUDZmRHNuVWpCNng0YThqWjZjVG1RcApwYVYzR0xNSVBCdW9xWVVVd1BZeXdFZ05GTWlpREhvMk14YzlpTUpTcEZkVHphbnZ4T1ErakJDTzlocWt0dUMwCk9xQnRpVXQzL0pRZjJLQituUDFOWHJDSDRGTDgvYW5EblVMNnNNYy9CSGNFeitFWGtxTEpXbVQ4WHBzd0hwQWQKRHFXTDBsM0YwUXdlUUlURTVnbmtZeDFVRDhWN2Z6ZWJlbjV1ZXBhLzNBb3lxM0NncFJRUHhHMDJCMUN1b21PMwpOVEJxMTg0UjF6T1l1YWRUSHBYemJZNm5GYy9HZjhkc21WT3ovK2RPUllBRmxvTk5xTUpxQjRrRUcvRVV3T3Y5CjlRWTlsTTRRWU44dXFFNGJjZncvWi9TTzd5cUxVRzNPM3pSODV5L0VnclE3L0p1RE4wZnJUTnFzU2tVUVhVT2wKckNRY1ZsOFp0NUsvTlBMcEZXYmRpVkFYdnJoVVl2Z2Y0dlJBT3BIcCtiL0hQV0VjcVVQTzAvcFFTN2JYVkFidwpQbjhKVk5iczhBSDdZTldjeWpzR01icjVPZktBV1BnMUM4T0toUUpLbUR0QTZIb1NYaEh6Uk8yNVdQMlVyK2U0CmNZN1lJZW10ZUE3bmJLSDZEZXowRlRLMzNLNDhCQ29jZktUWTdFZXhaTVdwSUs1bmpQamxIamxwS3R3TENUUHMKNnFhSlJqbjdQQ2M1REpsTEJ4dXhCZXlaSU5HZ2xCWWFLa2dtMVJWeXU5R1p2ZGtDQVBreTlROFRNTnZsTFEwcQpSaU96ZWhtNEI2NXdkdEVvN2dVQXNWTUp2OGdacndJREFRQUJBb0lDQUFKVCt3SE5iUnA3c28zenpnYm1zWlNOCldpSFkyYWxwSHFmWHBOSmdWTkR4aTZUQ3Q0ZFRsaHNPbC9jZXR6RE8wc09tS3cvcDhDT1RQWklCR05KRE9tSXAKS0hqelp6Zjl3aVBsNHBKdERRK3dtRkFYQ0l0ZUhQM25RNzRaN0xnZUNSWFc2c2FJWHc2dkFFbFYwcytETHZvZApiUHZUdVNYUlp4MHRRWEVpaC93VUlVU2pSeE16OE5GaFg3MENmeTF5VTI2NU1rTFYyVXY2Z3M4N2o4UmJEZjlBCnBhWnFKNWp6NUJTYTVsaHl5cFpZQ3pVam9NMm5meTF5OE9BOVZIaDl5SGMxYjFMMmtzYlJBQTJQalBoRjF4bHUKeVE5OUs3Qk9nUEg4VGROVDZVSms1UXNQcE5mR1V0U2hEZkR2a1RNNjZkZlcwTFc2cVJtWlBJdlpUVERkM0J2SwpCN1NnOUs3bXZCbVlsNEpMM05pZXBJVjkvNEVPRzZsNi9QaGxUR0JVRUdrTmdNQ2dTaWxyc05qU2E4ZW9SZHdzCm40VmN5enNWeWZYaGFSTVBFTklLVWZJTmFuenpYRkY2ZFRyd3Azelo0RDNhVzNLdWltYUxJR0hCNXZZaGRFTGoKUE9PQVVXRkVXUjMxMVFMZ3hUUm53aStnRUNlMmhPVmNyUkZVcjhDdlVrWWRUT0FGQzdjUnBOUW5rSVdQNU1QbQpXZkRvM2dZRnkyZU45NGR1MHBzTzVabzlHMm5UMlpiTjl1d0FCMk1ibk91R0xiWVRFbWpHdVRIUVc5Uzd6TFBzCnJMUmtVdndvYWE4cVBmaGc4SWw2ak1mcG5heUdETnU1S2tVV05KcGFZaGp5Y2l6SkVGUjVicTF6UU1QZjE2Qk0KdVloWVMySEhuS1VhdFRmZ3hyNmxBb0lCQVFEa0E3UlB2MURtNmxKTXYzaUU0K1FRYzUyTDdueUJHc3FueEVzdwo1Ly9PN2gvd2ZMMkd4VzFzRXcrSUNDd1VmVjNwZjBqWEl2dktjM3V0YXByWjc2Tk5uWnJJU0syYnFCamZpc2VrCk9vMWRlRlQvNlVaOGxhOWVDMFF3UlVWMWdmZ1I2K0VCayttUDJjSWpJRDVKbkx1b1lLSFNwenRmZG15UEUrODUKVUtXRU5rR1BsN200aStqTzRwWUhYQXdhZVFkU2NSbjVPdVUyY3FBTkZVbmpRbmtQNnp6U2tJVkNUaFNGVkpUYgplZEpIOFNwbW9YL0IzL3VVSm9vVGFvWXQwb1V4NnJsR0xqTDhadUx0TUlVU1J0WUJNd2JuMDVQQkZ0cStuaitlClVtWEJqUEgxbURRN0tlWG1yZHlldHYzQWRUQjdjWUhMZW1GL0lNL1g5N3Brb2twVEFvSUJBUUMxOTJheDhReTkKWGkrNmlVNDIzTEx1R3p3a3EwNlVHeVdSMlBHM3pCeDlHRU5KNS9rV0FrSUdPWjlSMG1xOGRuTnJOeitROTFlawp4QlFJS0xGZzEvMVFWZ2VOMEw3TVJySWxSelBsa2NZYUkrNDFhbktKYUpsd3BMaDEza0RmdzRMUXpOLzhDcElrCk9KajBZWFpNNkpSbHlmTE5jQ1pURFBKVjY0R24vaFNDZ3RISndaRk9MdWZsalIzSVpQbEphcHBTNlNRUDBkVDYKeExmUEsyUWZGR251UTBETitsQjdPVTJMT3d4enF0UldsYWFjTnpOU3B0cUJ2NzMvVFFoQ1l5eVc0RnBReER4Vgo3MzJWZ0tvWVQ3dElpZFF5Z2NrWnp0NnFhRUNVQ0o3UmtuUVNyZDEvUHBITDBrdXkxNDIxc0VLdkUyWVpON21WCkNGYVlzRGdqb0orMUFvSUJBUURFckpGT0xEYUxSMWkvekhRWEpEN25GZUt4L1g2MUdiZnQ1N24zc0ZXTlZ3YUQKdUFUK2g3S3ZndTFscDY3bDZaRWt3OTYvZXZSMmx3LzU4RGNyZnJGOFZGSmJLWjFkNHl1NVBncWdGTVUwOTROUgp6aFEzaUNGZzNCVHdFZ0Fzc0hPYWVieDJVUEFvWFd0QVF5S1crak0vdEVKQTRuQ3JFZ25uakFsUGhjbU85Z0dzCjZ2R09SbGdFZzV0bk03Vlk3RVl0alZNYkQvci84UFV1ODhyczFMeDV4NjJKN3BDVE5hZ3JyVjVNeFpKazdaZG0KT1MxcXZGbFRXNzdEcXFHY1NyY0s3R3p0SlJKamRoZU5BY24yanRVdTZhV3VOMmgrSjhsOG5DRkIzYzdabVVwbgpUZWJYbFhjeGQ0d1I5c04veTFXTFZNZmhER21tYjFYMzhqMTdhaVR6QW9JQkFGN0VLTXlWSURCakkvSSszZWYrCmlvVXMwK2E0L0tSdmV1SjVISEcvTURzSjdzbEhzVlovK0lpcmE4aStEKzUwdGVscGpZWmUrbHNKN3ZETTJJdjYKRUtmTkZnUUthY09UTWVYdUxoM3FERVRDMzZVYitlaUwvQlZKQS9RR3VyeU9Zc3VCVjBrNDdDRkhCSW1KVklYNwpQb1hBWmQ0T0FUZVJiNGZGcmZHaWhtWHQ0WG4wZ0VzNmJIVUZTRFI4T2NPOWEvK3dBYUxuZ2NiVHVuSi9RNVpZCkdFOEk0WEFrWTlPNDVTU1VyUWgwT0QrYmtuaWEydlM1aHVTNXlpWnlwTkdHT3N1Y3JneVFGbWdlNE1XQ2k1TTcKdXVxdE5VRFVqTG9QSGJHYnQ3NGd1eTJqMnlWN1BQYXV6RmxjL1NWMzB3cURjRWNqa0RHajd0ZXB6d2VZQnJTdgpTMTBDZ2dFQUNqSHBrZE5VZkdzOGZnN0x4WDlFcURRZjY4SkhIblBscTQ1azFwN0U4UExmRytFVWNIMnUzekRzCjhmZDByTHAwb2Z3VEhONUNPMmhJVWhCVC9UU0poQ1VaOTZ2ZlRyeXVVWVJjdjZ2NkEvaW1OdFQ3MEQ0ZkZ0cXoKWnB3Si9GNzFwdVlJTmtwSEpteWIvSHdIZmxEdURyVkRjMFF3VmY2WkFPcE9QbnhzM0VHUElqNGdVcDFYMEdVcAp1TERCdVJtR0RFNEVKYUhYUjFHemM0YkduZVlUV0FnY1IxQ2MrbThuWnR3a2dxQWVia0ZtaXFmZDBPOGNkUDlUCkZKWjNSbkRJZHBCYmw0b0hJS0NiTWY0M2pkdlZnM2RTb0hwUTlXUkZ1MEhVQURqenJUS2Z5c0JLMFM3WnlZRmEKc1RoOGJ6QU01YXpBWlZSeHNTbXJiSm95VFRZM3pBPT0KLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo= +kind: Secret +metadata: + creationTimestamp: null + name: loxilb-ssl + namespace: kube-system +type: Opaque diff --git a/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml b/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml new file mode 100644 index 00000000..93a52b37 --- /dev/null +++ b/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-loxilb + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-loxilb +subjects: + - kind: ServiceAccount + name: kube-loxilb + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-loxilb + namespace: kube-system + labels: + app: kube-loxilb-app +spec: + replicas: 1 + selector: + matchLabels: + app: kube-loxilb-app + template: + metadata: + labels: + app: kube-loxilb-app + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + terminationGracePeriodSeconds: 0 + containers: + - name: kube-loxilb + image: ghcr.io/loxilb-io/kube-loxilb:latest + imagePullPolicy: Always + command: + - /bin/kube-loxilb + args: + - --loxiURL=http://192.168.80.9:11111 + - --externalCIDR=192.168.80.9/32 + #- --zone=aws + #- --setBGP=64512 + #- --setRoles=0.0.0.0 + #- --extBGPPeers=192.168.90.9:64511 + #- --monitor + #- --setLBMode=1 + #- --config=/opt/loxilb/agent/kube-loxilb.conf + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + capabilities: + add: ["NET_ADMIN", "NET_RAW"] diff --git a/cicd/k3s-flannel-loxilb-ingress/loxilb.sh b/cicd/k3s-flannel-loxilb-ingress/loxilb.sh new file mode 100644 index 00000000..74e66ae9 --- /dev/null +++ b/cicd/k3s-flannel-loxilb-ingress/loxilb.sh @@ -0,0 +1,13 @@ +export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +apt-get update +apt-get install -y software-properties-common +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get update +apt-get install -y docker-ce +docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest +echo alias loxicmd=\"sudo docker exec -it loxilb loxicmd\" >> ~/.bashrc +echo alias loxilb=\"sudo docker exec -it loxilb \" >> ~/.bashrc + +echo $LOXILB_IP > /vagrant/loxilb-ip diff --git a/cicd/k3s-flannel-loxilb-ingress/master.sh b/cicd/k3s-flannel-loxilb-ingress/master.sh new file mode 100755 index 00000000..e78fce62 --- /dev/null +++ b/cicd/k3s-flannel-loxilb-ingress/master.sh @@ -0,0 +1,15 @@ +export MASTER_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable traefik --disable servicelb --node-ip=${MASTER_IP}" sh - + +echo $MASTER_IP > /vagrant/master-ip +sudo cp /var/lib/rancher/k3s/server/node-token /vagrant/node-token +sudo sed -i -e "s/127.0.0.1/${MASTER_IP}/g" /etc/rancher/k3s/k3s.yaml +sudo cp /etc/rancher/k3s/k3s.yaml /vagrant/k3s.yaml +sudo kubectl apply -f /vagrant/kube-loxilb.yml +sudo kubectl apply -f /vagrant/ingress/loxilb-secret.yml +sudo kubectl apply -f /vagrant/ingress/loxilb-ingress-deploy.yml +sudo kubectl apply -f /vagrant/ingress/loxilb-ingress-svc.yml +sudo kubectl apply -f /vagrant/ingress/loxilb-ingress.yml +sleep 30 +/vagrant/wait_ready.sh diff --git a/cicd/k3s-flannel-loxilb-ingress/rmconfig.sh b/cicd/k3s-flannel-loxilb-ingress/rmconfig.sh new file mode 100755 index 00000000..f157b24b --- /dev/null +++ b/cicd/k3s-flannel-loxilb-ingress/rmconfig.sh @@ -0,0 +1,3 @@ +#!/bin/bash +vagrant destroy -f master +vagrant destroy -f loxilb diff --git a/cicd/k3s-flannel-loxilb-ingress/validation.sh b/cicd/k3s-flannel-loxilb-ingress/validation.sh new file mode 100755 index 00000000..296b29f1 --- /dev/null +++ b/cicd/k3s-flannel-loxilb-ingress/validation.sh @@ -0,0 +1,40 @@ +#!/bin/bash +source ../common.sh +echo k3s-loxi-ingress + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +# Set space as the delimiter +IFS=' ' + +sleep 45 + +echo "Service Info" +vagrant ssh master -c 'sudo kubectl get svc -A' +echo "Ingress Info" +vagrant ssh master -c 'sudo kubectl get ingress -A' +echo "LB Info" +vagrant ssh loxilb -c 'sudo docker exec -i loxilb loxicmd get lb -o wide' +echo "EP Info" +vagrant ssh loxilb -c 'sudo docker exec -i loxilb loxicmd get ep -o wide' + +print_debug_info() { + echo "llb1 route-info" + vagrant ssh loxilb -c 'ip route' + vagrant ssh master -c 'sudo kubectl get pods -A' + vagrant ssh master -c 'sudo kubectl get svc' + vagrant ssh master -c 'sudo kubectl get nodes' +} + +out=$(curl -s --connect-timeout 30 -H "Application/json" -H "Content-type: application/json" -H "HOST: loxilb.io" --insecure https://192.168.80.9:443) +if [[ ${out} == *"Welcome to nginx"* ]]; then + echo "k3s-loxi-ingress tcp [OK]" +else + echo "k3s-loxi-ingress tcp [FAILED]" + print_debug_info + exit 1 +fi + +exit diff --git a/cicd/k3s-flannel-loxilb-ingress/wait_ready.sh b/cicd/k3s-flannel-loxilb-ingress/wait_ready.sh new file mode 100755 index 00000000..3736a1ba --- /dev/null +++ b/cicd/k3s-flannel-loxilb-ingress/wait_ready.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +function wait_cluster_ready_full { + sudo kubectl wait pod --all --for=condition=Ready --namespace=kube-system --timeout=240s + sudo kubectl wait pod --all --for=condition=Ready --namespace=default --timeout=60s +} + +wait_cluster_ready_full diff --git a/cicd/k3s-incluster/common.sh b/cicd/k3s-incluster/common.sh index b82f6e0e..ea119602 100644 --- a/cicd/k3s-incluster/common.sh +++ b/cicd/k3s-incluster/common.sh @@ -536,8 +536,8 @@ function create_lb_rule() { echo "$1: loxicmd create lb ${args[*]}" $dexec $1 loxicmd create lb ${args[*]} - hook=$($dexec llb1 ntc filter show dev eth0 ingress | grep tc_packet_hook) - if [[ $hook != *"tc_packet_hook"* ]]; then + hook=$($dexec llb1 tc filter show dev eth0 ingress | grep tc_packet_func) + if [[ $hook != *"tc_packet_func"* ]]; then echo "ERROR : No hook point found"; exit 1 fi diff --git a/cicd/k3s-rabbitmq-incluster/Vagrantfile b/cicd/k3s-rabbitmq-incluster/Vagrantfile new file mode 100644 index 00000000..fc1347f4 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/Vagrantfile @@ -0,0 +1,51 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +workers = (ENV['WORKERS'] || "3").to_i +box_name = (ENV['VAGRANT_BOX'] || "sysnet4admin/Ubuntu-k8s") +box_version = "0.7.1" +Vagrant.configure("2") do |config| + config.vm.box = "#{box_name}" + config.vm.box_version = "#{box_version}" + + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.auto_update = false + end + + config.vm.define "host" do |host| + host.vm.hostname = 'host' + host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" + host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0" + host.vm.provision :shell, :path => "host.sh" + host.vm.provider :virtualbox do |vbox| + vbox.memory = "4096" + vbox.cpus = "8" + vbox.default_nic_type = "virtio" + end + end + + config.vm.define "master" do |master| + master.vm.hostname = 'master' + master.vm.network :private_network, ip: "192.168.80.10", :netmask => "255.255.255.0" + master.vm.provision :shell, :path => "master.sh" + master.vm.provider :virtualbox do |vbox| + vbox.memory = "4096" + vbox.cpus = "4" + vbox.default_nic_type = "virtio" + end + end + + (1..workers).each do |node_number| + config.vm.define "worker#{node_number}" do |worker| + worker.vm.hostname = "worker#{node_number}" + ip = node_number + 100 + worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + worker.vm.provision :shell, :path => "worker.sh" + worker.vm.provider :virtualbox do |vbox| + vbox.memory = "4096" + vbox.cpus = "4" + vbox.default_nic_type = "virtio" + end + end + end +end diff --git a/cicd/k3s-rabbitmq-incluster/config.sh b/cicd/k3s-rabbitmq-incluster/config.sh new file mode 100755 index 00000000..6b8ee48e --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/config.sh @@ -0,0 +1,3 @@ +#!/bin/bash +vagrant global-status | grep -i virtualbox | cut -f 1 -d ' ' | xargs -L 1 vagrant destroy -f +vagrant up diff --git a/cicd/k3s-rabbitmq-incluster/grafana.yaml b/cicd/k3s-rabbitmq-incluster/grafana.yaml new file mode 100644 index 00000000..b41ec540 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/grafana.yaml @@ -0,0 +1,84 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: grafana-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: grafana + name: grafana +spec: + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + securityContext: + fsGroup: 472 + supplementalGroups: + - 0 + containers: + - name: grafana + image: grafana/grafana:9.1.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 750Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-pv + volumes: + - name: grafana-pv + persistentVolumeClaim: + claimName: grafana-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: grafana +spec: + ports: + - port: 3000 + protocol: TCP + targetPort: http-grafana + selector: + app: grafana + sessionAffinity: None + type: LoadBalancer + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb diff --git a/cicd/k3s-rabbitmq-incluster/host.sh b/cicd/k3s-rabbitmq-incluster/host.sh new file mode 100644 index 00000000..f29d40e7 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/host.sh @@ -0,0 +1,10 @@ +apt-get update +apt-get install -y software-properties-common +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get update +apt-get install -y docker-ce + +#curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +#add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get install -y iperf iperf3 diff --git a/cicd/k3s-rabbitmq-incluster/install_cilium.sh b/cicd/k3s-rabbitmq-incluster/install_cilium.sh new file mode 100755 index 00000000..7206c022 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/install_cilium.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +#Install Cilium +CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt) +CLI_ARCH=amd64 +if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi +curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} +sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum +sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin +rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} +mkdir -p ~/.kube/ +sudo cat /etc/rancher/k3s/k3s.yaml > ~/.kube/config +cilium install diff --git a/cicd/k3s-rabbitmq-incluster/iperf-service.yml b/cicd/k3s-rabbitmq-incluster/iperf-service.yml new file mode 100644 index 00000000..1d23f273 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/iperf-service.yml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Service +metadata: + name: iperf-service + annotations: + loxilb.io/lbmode: "onearm" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: perf-test + ports: + - port: 55001 + targetPort: 5001 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: iperf1 + labels: + what: perf-test +spec: + containers: + - name: iperf + image: eyes852/ubuntu-iperf-test:0.5 + command: + - iperf + - "-s" + ports: + - containerPort: 5001 + diff --git a/cicd/k3s-rabbitmq-incluster/k3s.yaml b/cicd/k3s-rabbitmq-incluster/k3s.yaml new file mode 100644 index 00000000..546f1c3f --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/k3s.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkakNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTVRBNE1qWXhOVEl3SGhjTk1qUXdNekU1TURVeU9URXlXaGNOTXpRd016RTNNRFV5T1RFeQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTVRBNE1qWXhOVEl3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRcjhwZm83akZnSUN1WkZlVkdCZVRrc01PdElZWjZidWVVYVFBc1BLbU0KTGVYbm9Uc1JzRFJ5Wi92Vmw1NzNZZHNHeTYxSHh1WFN3bTNPUFhvS25DKzFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVltWTVuMnhPdUpmNEgvUmt5aDJwCnpPaDE4K3d3Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnSnUvbVRlUk9qeC8rdGdNckxQdC9NMmF0a1RqRUw3NkkKU2xQV0N3eEticlVDSUhRZFZEQVJGVWtPd1ZNNEppdVhTaG5JYkt1OXJBNzdocXZBdlZ0ZFEzWEMKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://192.168.80.10:6443 + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrRENDQVRlZ0F3SUJBZ0lJRUk5Tm02SXBsdzB3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOekV3T0RJMk1UVXlNQjRYRFRJME1ETXhPVEExTWpreE1sb1hEVEkxTURNeApPVEExTWpreE1sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJONlJVVGVwcnZBRkNuN2EKcDZndmRUMmxPN1MrWXd3bTZ3em45T2xXcWw4ZnJqNTgwcktEWVNxVmFCdkxUL2IrZytBL0pQRUV6TXFscWdYTwpGYWo0TElTalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVGRNaERoRUZQOEdlMVBSblh4d2hWcGltUHZYakFLQmdncWhrak9QUVFEQWdOSEFEQkUKQWlBaGNVd1d3WE1iRGZaVkE2NHVSemhweDR5dmg0UUNEM0ZZa2YwQkwwQ2FlZ0lnTmZ4enArenUxWk5PZWpoTAo2d3ZXVXhuekZpQ2xZYUpzNDVrcCt6ZFJuME09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTVRBNE1qWXhOVEl3SGhjTk1qUXdNekU1TURVeU9URXlXaGNOTXpRd016RTNNRFV5T1RFeQpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTVRBNE1qWXhOVEl3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUTUoxd1ZOcEIwS0N6MWx5bWhRRGc3UDhRSGxGcHBUOHc5blFCWGYyeGQKMWtTb2RyS3RvSzlQYTJtelNiWFNtei9acTBpQk94SkY3aTdyT3BhQzZXUHdvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTNUSVE0UkJUL0JudFQwWjE4Y0lWCmFZcGo3MTR3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUlRN3ZNVldZNkMxaVdLakIzNEYzdVZFQS9GSVpKVVAKRWM1bEFLS0JSWW8vQWlBbUFVVnQzRkRrSEYreFhJWUlzenBscWVDNWZ0Y0g1azJDaFFrbXFZaThXQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU9KamZxUm90eWRLd0poQkY5SHJlTG1RNExSYVp2NFFiRXp0K0I0WnBTeWNvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFM3BGUk42bXU4QVVLZnRxbnFDOTFQYVU3dEw1akRDYnJET2YwNlZhcVh4K3VQbnpTc29OaApLcFZvRzh0UDl2NkQ0RDhrOFFUTXlxV3FCYzRWcVBnc2hBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= diff --git a/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml b/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml new file mode 100644 index 00000000..f6d54ad8 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml @@ -0,0 +1,130 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-loxilb + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-loxilb +subjects: + - kind: ServiceAccount + name: kube-loxilb + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-loxilb + namespace: kube-system + labels: + app: loxilb +spec: + replicas: 1 + selector: + matchLabels: + app: loxilb + template: + metadata: + labels: + app: loxilb + spec: + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + terminationGracePeriodSeconds: 0 + containers: + - name: kube-loxilb + image: ghcr.io/loxilb-io/kube-loxilb:latest + imagePullPolicy: Always + command: + - /bin/kube-loxilb + args: + #- --loxiURL=http://192.168.80.9:11111 + - --externalCIDR=192.168.80.20/32 + - --setRoles=0.0.0.0 + #- --monitor + #- --setBGP + - --setLBMode=1 + #- --config=/opt/loxilb/agent/kube-loxilb.conf + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + capabilities: + add: ["NET_ADMIN", "NET_RAW"] diff --git a/cicd/k3s-rabbitmq-incluster/loxilb.sh b/cicd/k3s-rabbitmq-incluster/loxilb.sh new file mode 100644 index 00000000..74e66ae9 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/loxilb.sh @@ -0,0 +1,13 @@ +export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +apt-get update +apt-get install -y software-properties-common +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get update +apt-get install -y docker-ce +docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest +echo alias loxicmd=\"sudo docker exec -it loxilb loxicmd\" >> ~/.bashrc +echo alias loxilb=\"sudo docker exec -it loxilb \" >> ~/.bashrc + +echo $LOXILB_IP > /vagrant/loxilb-ip diff --git a/cicd/k3s-rabbitmq-incluster/loxilb.yml b/cicd/k3s-rabbitmq-incluster/loxilb.yml new file mode 100644 index 00000000..30cf81d9 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/loxilb.yml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-lb + namespace: kube-system + labels: + app: loxilb-app +spec: + selector: + matchLabels: + app: loxilb-app + template: + metadata: + labels: + app: loxilb-app + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - key: "node-role.kubernetes.io/master" + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + #affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: "node-role.kubernetes.io/master" + # operator: Exists + # - key: "node-role.kubernetes.io/control-plane" + # operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + containers: + - name: loxilb-lb + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: Always + #command: [ "/root/loxilb-io/loxilb/loxilb", "--egr-hooks", "--blacklist=cni.|veth.|flannel.|cali.|tunl.|vxlan[.]calico", "--ipvs-compat", "--k8s-api=cluster" ] + command: [ "/root/loxilb-io/loxilb/loxilb", "--ipvs-compat"] + ports: + - containerPort: 11111 + - containerPort: 179 + - containerPort: 50051 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN + env: + - name: MY_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-lb-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-app + ports: + - name: loxilb-app + port: 11111 + targetPort: 11111 + protocol: TCP + - name: loxilb-app-bgp + port: 179 + targetPort: 179 + protocol: TCP + - name: loxilb-app-gobgp + port: 50051 + targetPort: 50051 + protocol: TCP diff --git a/cicd/k3s-rabbitmq-incluster/manifests/cilium-ippool.yaml b/cicd/k3s-rabbitmq-incluster/manifests/cilium-ippool.yaml new file mode 100644 index 00000000..5dd56500 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/manifests/cilium-ippool.yaml @@ -0,0 +1,7 @@ +apiVersion: "cilium.io/v2alpha1" +kind: CiliumLoadBalancerIPPool +metadata: + name: "lb-pool" +spec: + cidrs: + - cidr: "192.168.80.20/24" diff --git a/cicd/k3s-rabbitmq-incluster/manifests/cilium-policy.yaml b/cicd/k3s-rabbitmq-incluster/manifests/cilium-policy.yaml new file mode 100644 index 00000000..b155ad31 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/manifests/cilium-policy.yaml @@ -0,0 +1,9 @@ +apiVersion: "cilium.io/v2alpha1" +kind: CiliumL2AnnouncementPolicy +metadata: + name: basic-policy +spec: + interfaces: + - eth1 + externalIPs: true + loadBalancerIPs: true diff --git a/cicd/k3s-rabbitmq-incluster/manifests/iperf-service-loxilb.yml b/cicd/k3s-rabbitmq-incluster/manifests/iperf-service-loxilb.yml new file mode 100644 index 00000000..1d23f273 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/manifests/iperf-service-loxilb.yml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Service +metadata: + name: iperf-service + annotations: + loxilb.io/lbmode: "onearm" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: perf-test + ports: + - port: 55001 + targetPort: 5001 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: iperf1 + labels: + what: perf-test +spec: + containers: + - name: iperf + image: eyes852/ubuntu-iperf-test:0.5 + command: + - iperf + - "-s" + ports: + - containerPort: 5001 + diff --git a/cicd/k3s-rabbitmq-incluster/manifests/iperf-service-metallb.yml b/cicd/k3s-rabbitmq-incluster/manifests/iperf-service-metallb.yml new file mode 100644 index 00000000..066c796e --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/manifests/iperf-service-metallb.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: iperf-service +spec: + externalTrafficPolicy: Local + selector: + what: perf-test + ports: + - port: 55001 + targetPort: 5001 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: iperf1 + labels: + what: perf-test +spec: + containers: + - name: iperf + image: eyes852/ubuntu-iperf-test:0.5 + command: + - iperf + - "-s" + ports: + - containerPort: 5001 + diff --git a/cicd/k8s-calico-incluster/yaml/kube-loxilb.yml b/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml similarity index 93% rename from cicd/k8s-calico-incluster/yaml/kube-loxilb.yml rename to cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml index 3d8478c4..9357b17b 100644 --- a/cicd/k8s-calico-incluster/yaml/kube-loxilb.yml +++ b/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml @@ -92,7 +92,6 @@ spec: app: loxilb spec: hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet tolerations: - effect: NoSchedule operator: Exists @@ -111,10 +110,8 @@ spec: command: - /bin/kube-loxilb args: - #- --loxiURL=http://192.168.80.10:11111 - - --externalCIDR=123.123.123.1/24 - - --setBGP=64512 - - --listenBGPPort=1791 + #- --loxiURL=http://192.168.80.9:11111 + - --externalCIDR=192.168.80.20/32 - --setRoles=0.0.0.0 #- --monitor #- --setBGP diff --git a/cicd/k3s-rabbitmq-incluster/manifests/loxilb.yml b/cicd/k3s-rabbitmq-incluster/manifests/loxilb.yml new file mode 100644 index 00000000..cd783e48 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/manifests/loxilb.yml @@ -0,0 +1,67 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-lb + namespace: kube-system + labels: + app: loxilb-app +spec: + selector: + matchLabels: + app: loxilb-app + template: + metadata: + labels: + app: loxilb-app + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - key: "node-role.kubernetes.io/master" + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + containers: + - name: loxilb-lb + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: Always + command: [ "/root/loxilb-io/loxilb/loxilb", "--egr-hooks", "--blacklist=cni.|veth.|flannel.|cali.|tunl.|vxlan[.]calico", "--ipvs-compat", "--k8s-api=cluster" ] + ports: + - containerPort: 11111 + - containerPort: 179 + - containerPort: 50051 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN + env: + - name: MY_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-lb-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-app + ports: + - name: loxilb-app + port: 11111 + targetPort: 11111 + protocol: TCP + - name: loxilb-app-bgp + port: 179 + targetPort: 179 + protocol: TCP + - name: loxilb-app-gobgp + port: 50051 + targetPort: 50051 + protocol: TCP diff --git a/cicd/k3s-rabbitmq-incluster/manifests/metallb-addr-pool.yml b/cicd/k3s-rabbitmq-incluster/manifests/metallb-addr-pool.yml new file mode 100644 index 00000000..ea7d2ba4 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/manifests/metallb-addr-pool.yml @@ -0,0 +1,8 @@ +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: cheap + namespace: metallb-system +spec: + addresses: + - 192.168.80.20/32 diff --git a/cicd/k3s-rabbitmq-incluster/manifests/metallb-native.yaml b/cicd/k3s-rabbitmq-incluster/manifests/metallb-native.yaml new file mode 100644 index 00000000..f8a80308 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/manifests/metallb-native.yaml @@ -0,0 +1,2042 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/warn: privileged + name: metallb-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: addresspools.metallb.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== + service: + name: webhook-service + namespace: metallb-system + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1beta1 + group: metallb.io + names: + kind: AddressPool + listKind: AddressPoolList + plural: addresspools + singular: addresspool + scope: Namespaced + versions: + - deprecated: true + deprecationWarning: metallb.io v1alpha1 AddressPool is deprecated + name: v1alpha1 + schema: + openAPIV3Schema: + description: AddressPool is the Schema for the addresspools API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AddressPoolSpec defines the desired state of AddressPool. + properties: + addresses: + description: A list of IP address ranges over which MetalLB has authority. + You can list multiple ranges in a single pool, they will all share + the same settings. Each range can be either a CIDR prefix, or an + explicit start-end range of IPs. + items: + type: string + type: array + autoAssign: + default: true + description: AutoAssign flag used to prevent MetallB from automatic + allocation for a pool. + type: boolean + bgpAdvertisements: + description: When an IP is allocated from this pool, how should it + be translated into BGP announcements? + items: + properties: + aggregationLength: + default: 32 + description: The aggregation-length advertisement option lets + you “roll up” the /32s into a larger prefix. + format: int32 + minimum: 1 + type: integer + aggregationLengthV6: + default: 128 + description: Optional, defaults to 128 (i.e. no aggregation) + if not specified. + format: int32 + type: integer + communities: + description: BGP communities + items: + type: string + type: array + localPref: + description: BGP LOCAL_PREF attribute which is used by BGP best + path algorithm, Path with higher localpref is preferred over + one with lower localpref. + format: int32 + type: integer + type: object + type: array + protocol: + description: Protocol can be used to select how the announcement is + done. + enum: + - layer2 + - bgp + type: string + required: + - addresses + - protocol + type: object + status: + description: AddressPoolStatus defines the observed state of AddressPool. + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} + - deprecated: true + deprecationWarning: metallb.io v1beta1 AddressPool is deprecated, consider using + IPAddressPool + name: v1beta1 + schema: + openAPIV3Schema: + description: AddressPool represents a pool of IP addresses that can be allocated + to LoadBalancer services. AddressPool is deprecated and being replaced by + IPAddressPool. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AddressPoolSpec defines the desired state of AddressPool. + properties: + addresses: + description: A list of IP address ranges over which MetalLB has authority. + You can list multiple ranges in a single pool, they will all share + the same settings. Each range can be either a CIDR prefix, or an + explicit start-end range of IPs. + items: + type: string + type: array + autoAssign: + default: true + description: AutoAssign flag used to prevent MetallB from automatic + allocation for a pool. + type: boolean + bgpAdvertisements: + description: Drives how an IP allocated from this pool should translated + into BGP announcements. + items: + properties: + aggregationLength: + default: 32 + description: The aggregation-length advertisement option lets + you “roll up” the /32s into a larger prefix. + format: int32 + minimum: 1 + type: integer + aggregationLengthV6: + default: 128 + description: Optional, defaults to 128 (i.e. no aggregation) + if not specified. + format: int32 + type: integer + communities: + description: BGP communities to be associated with the given + advertisement. + items: + type: string + type: array + localPref: + description: BGP LOCAL_PREF attribute which is used by BGP best + path algorithm, Path with higher localpref is preferred over + one with lower localpref. + format: int32 + type: integer + type: object + type: array + protocol: + description: Protocol can be used to select how the announcement is + done. + enum: + - layer2 + - bgp + type: string + required: + - addresses + - protocol + type: object + status: + description: AddressPoolStatus defines the observed state of AddressPool. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: bfdprofiles.metallb.io +spec: + group: metallb.io + names: + kind: BFDProfile + listKind: BFDProfileList + plural: bfdprofiles + singular: bfdprofile + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.passiveMode + name: Passive Mode + type: boolean + - jsonPath: .spec.transmitInterval + name: Transmit Interval + type: integer + - jsonPath: .spec.receiveInterval + name: Receive Interval + type: integer + - jsonPath: .spec.detectMultiplier + name: Multiplier + type: integer + name: v1beta1 + schema: + openAPIV3Schema: + description: BFDProfile represents the settings of the bfd session that can + be optionally associated with a BGP session. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BFDProfileSpec defines the desired state of BFDProfile. + properties: + detectMultiplier: + description: Configures the detection multiplier to determine packet + loss. The remote transmission interval will be multiplied by this + value to determine the connection loss detection timer. + format: int32 + maximum: 255 + minimum: 2 + type: integer + echoInterval: + description: Configures the minimal echo receive transmission interval + that this system is capable of handling in milliseconds. Defaults + to 50ms + format: int32 + maximum: 60000 + minimum: 10 + type: integer + echoMode: + description: Enables or disables the echo transmission mode. This + mode is disabled by default, and not supported on multi hops setups. + type: boolean + minimumTtl: + description: 'For multi hop sessions only: configure the minimum expected + TTL for an incoming BFD control packet.' + format: int32 + maximum: 254 + minimum: 1 + type: integer + passiveMode: + description: 'Mark session as passive: a passive session will not + attempt to start the connection and will wait for control packets + from peer before it begins replying.' + type: boolean + receiveInterval: + description: The minimum interval that this system is capable of receiving + control packets in milliseconds. Defaults to 300ms. + format: int32 + maximum: 60000 + minimum: 10 + type: integer + transmitInterval: + description: The minimum transmission interval (less jitter) that + this system wants to use to send BFD control packets in milliseconds. + Defaults to 300ms + format: int32 + maximum: 60000 + minimum: 10 + type: integer + type: object + status: + description: BFDProfileStatus defines the observed state of BFDProfile. + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: bgpadvertisements.metallb.io +spec: + group: metallb.io + names: + kind: BGPAdvertisement + listKind: BGPAdvertisementList + plural: bgpadvertisements + singular: bgpadvertisement + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.ipAddressPools + name: IPAddressPools + type: string + - jsonPath: .spec.ipAddressPoolSelectors + name: IPAddressPool Selectors + type: string + - jsonPath: .spec.peers + name: Peers + type: string + - jsonPath: .spec.nodeSelectors + name: Node Selectors + priority: 10 + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: BGPAdvertisement allows to advertise the IPs coming from the + selected IPAddressPools via BGP, setting the parameters of the BGP Advertisement. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPAdvertisementSpec defines the desired state of BGPAdvertisement. + properties: + aggregationLength: + default: 32 + description: The aggregation-length advertisement option lets you + “roll up” the /32s into a larger prefix. Defaults to 32. Works for + IPv4 addresses. + format: int32 + minimum: 1 + type: integer + aggregationLengthV6: + default: 128 + description: The aggregation-length advertisement option lets you + “roll up” the /128s into a larger prefix. Defaults to 128. Works + for IPv6 addresses. + format: int32 + type: integer + communities: + description: The BGP communities to be associated with the announcement. + Each item can be a standard community of the form 1234:1234, a large + community of the form large:1234:1234:1234 or the name of an alias + defined in the Community CRD. + items: + type: string + type: array + ipAddressPoolSelectors: + description: A selector for the IPAddressPools which would get advertised + via this advertisement. If no IPAddressPool is selected by this + or by the list, the advertisement is applied to all the IPAddressPools. + items: + description: A label selector is a label query over a set of resources. + The result of matchLabels and matchExpressions are ANDed. An empty + label selector matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + ipAddressPools: + description: The list of IPAddressPools to advertise via this advertisement, + selected by name. + items: + type: string + type: array + localPref: + description: The BGP LOCAL_PREF attribute which is used by BGP best + path algorithm, Path with higher localpref is preferred over one + with lower localpref. + format: int32 + type: integer + nodeSelectors: + description: NodeSelectors allows to limit the nodes to announce as + next hops for the LoadBalancer IP. When empty, all the nodes having are + announced as next hops. + items: + description: A label selector is a label query over a set of resources. + The result of matchLabels and matchExpressions are ANDed. An empty + label selector matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + peers: + description: Peers limits the bgppeer to advertise the ips of the + selected pools to. When empty, the loadbalancer IP is announced + to all the BGPPeers configured. + items: + type: string + type: array + type: object + status: + description: BGPAdvertisementStatus defines the observed state of BGPAdvertisement. + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: bgppeers.metallb.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== + service: + name: webhook-service + namespace: metallb-system + path: /convert + conversionReviewVersions: + - v1beta1 + - v1beta2 + group: metallb.io + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.peerAddress + name: Address + type: string + - jsonPath: .spec.peerASN + name: ASN + type: string + - jsonPath: .spec.bfdProfile + name: BFD Profile + type: string + - jsonPath: .spec.ebgpMultiHop + name: Multi Hops + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: BGPPeer is the Schema for the peers API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec defines the desired state of Peer. + properties: + bfdProfile: + type: string + ebgpMultiHop: + description: EBGP peer is multi-hops away + type: boolean + holdTime: + description: Requested BGP hold time, per RFC4271. + type: string + keepaliveTime: + description: Requested BGP keepalive time, per RFC4271. + type: string + myASN: + description: AS number to use for the local end of the session. + format: int32 + maximum: 4294967295 + minimum: 0 + type: integer + nodeSelectors: + description: Only connect to this peer on nodes that match one of + these selectors. + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + minItems: 1 + type: array + required: + - key + - operator + - values + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + type: array + password: + description: Authentication password for routers enforcing TCP MD5 + authenticated sessions + type: string + peerASN: + description: AS number to expect from the remote end of the session. + format: int32 + maximum: 4294967295 + minimum: 0 + type: integer + peerAddress: + description: Address to dial when establishing the session. + type: string + peerPort: + description: Port to dial when establishing the session. + maximum: 16384 + minimum: 0 + type: integer + routerID: + description: BGP router ID to advertise to the peer + type: string + sourceAddress: + description: Source address to use when establishing the session. + type: string + required: + - myASN + - peerASN + - peerAddress + type: object + status: + description: BGPPeerStatus defines the observed state of Peer. + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.peerAddress + name: Address + type: string + - jsonPath: .spec.peerASN + name: ASN + type: string + - jsonPath: .spec.bfdProfile + name: BFD Profile + type: string + - jsonPath: .spec.ebgpMultiHop + name: Multi Hops + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: BGPPeer is the Schema for the peers API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec defines the desired state of Peer. + properties: + bfdProfile: + description: The name of the BFD Profile to be used for the BFD session + associated to the BGP session. If not set, the BFD session won't + be set up. + type: string + ebgpMultiHop: + description: To set if the BGPPeer is multi-hops away. Needed for + FRR mode only. + type: boolean + holdTime: + description: Requested BGP hold time, per RFC4271. + type: string + keepaliveTime: + description: Requested BGP keepalive time, per RFC4271. + type: string + myASN: + description: AS number to use for the local end of the session. + format: int32 + maximum: 4294967295 + minimum: 0 + type: integer + nodeSelectors: + description: Only connect to this peer on nodes that match one of + these selectors. + items: + description: A label selector is a label query over a set of resources. + The result of matchLabels and matchExpressions are ANDed. An empty + label selector matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + password: + description: Authentication password for routers enforcing TCP MD5 + authenticated sessions + type: string + passwordSecret: + description: passwordSecret is name of the authentication secret for + BGP Peer. the secret must be of type "kubernetes.io/basic-auth", + and created in the same namespace as the MetalLB deployment. The + password is stored in the secret as the key "password". + properties: + name: + description: name is unique within a namespace to reference a + secret resource. + type: string + namespace: + description: namespace defines the space within which the secret + name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + peerASN: + description: AS number to expect from the remote end of the session. + format: int32 + maximum: 4294967295 + minimum: 0 + type: integer + peerAddress: + description: Address to dial when establishing the session. + type: string + peerPort: + default: 179 + description: Port to dial when establishing the session. + maximum: 16384 + minimum: 0 + type: integer + routerID: + description: BGP router ID to advertise to the peer + type: string + sourceAddress: + description: Source address to use when establishing the session. + type: string + vrf: + description: To set if we want to peer with the BGPPeer using an interface + belonging to a host vrf + type: string + required: + - myASN + - peerASN + - peerAddress + type: object + status: + description: BGPPeerStatus defines the observed state of Peer. + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: communities.metallb.io +spec: + group: metallb.io + names: + kind: Community + listKind: CommunityList + plural: communities + singular: community + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: Community is a collection of aliases for communities. Users can + define named aliases to be used in the BGPPeer CRD. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CommunitySpec defines the desired state of Community. + properties: + communities: + items: + properties: + name: + description: The name of the alias for the community. + type: string + value: + description: The BGP community value corresponding to the given + name. Can be a standard community of the form 1234:1234 or + a large community of the form large:1234:1234:1234. + type: string + type: object + type: array + type: object + status: + description: CommunityStatus defines the observed state of Community. + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: ipaddresspools.metallb.io +spec: + group: metallb.io + names: + kind: IPAddressPool + listKind: IPAddressPoolList + plural: ipaddresspools + singular: ipaddresspool + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.autoAssign + name: Auto Assign + type: boolean + - jsonPath: .spec.avoidBuggyIPs + name: Avoid Buggy IPs + type: boolean + - jsonPath: .spec.addresses + name: Addresses + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: IPAddressPool represents a pool of IP addresses that can be allocated + to LoadBalancer services. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAddressPoolSpec defines the desired state of IPAddressPool. + properties: + addresses: + description: A list of IP address ranges over which MetalLB has authority. + You can list multiple ranges in a single pool, they will all share + the same settings. Each range can be either a CIDR prefix, or an + explicit start-end range of IPs. + items: + type: string + type: array + autoAssign: + default: true + description: AutoAssign flag used to prevent MetallB from automatic + allocation for a pool. + type: boolean + avoidBuggyIPs: + default: false + description: AvoidBuggyIPs prevents addresses ending with .0 and .255 + to be used by a pool. + type: boolean + serviceAllocation: + description: AllocateTo makes ip pool allocation to specific namespace + and/or service. The controller will use the pool with lowest value + of priority in case of multiple matches. A pool with no priority + set will be used only if the pools with priority can't be used. + If multiple matching IPAddressPools are available it will check + for the availability of IPs sorting the matching IPAddressPools + by priority, starting from the highest to the lowest. If multiple + IPAddressPools have the same priority, choice will be random. + properties: + namespaceSelectors: + description: NamespaceSelectors list of label selectors to select + namespace(s) for ip pool, an alternative to using namespace + list. + items: + description: A label selector is a label query over a set of + resources. The result of matchLabels and matchExpressions + are ANDed. An empty label selector matches all objects. A + null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + namespaces: + description: Namespaces list of namespace(s) on which ip pool + can be attached. + items: + type: string + type: array + priority: + description: Priority priority given for ip pool while ip allocation + on a service. + type: integer + serviceSelectors: + description: ServiceSelectors list of label selector to select + service(s) for which ip pool can be used for ip allocation. + items: + description: A label selector is a label query over a set of + resources. The result of matchLabels and matchExpressions + are ANDed. An empty label selector matches all objects. A + null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + type: object + required: + - addresses + type: object + status: + description: IPAddressPoolStatus defines the observed state of IPAddressPool. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: l2advertisements.metallb.io +spec: + group: metallb.io + names: + kind: L2Advertisement + listKind: L2AdvertisementList + plural: l2advertisements + singular: l2advertisement + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.ipAddressPools + name: IPAddressPools + type: string + - jsonPath: .spec.ipAddressPoolSelectors + name: IPAddressPool Selectors + type: string + - jsonPath: .spec.interfaces + name: Interfaces + type: string + - jsonPath: .spec.nodeSelectors + name: Node Selectors + priority: 10 + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: L2Advertisement allows to advertise the LoadBalancer IPs provided + by the selected pools via L2. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: L2AdvertisementSpec defines the desired state of L2Advertisement. + properties: + interfaces: + description: A list of interfaces to announce from. The LB IP will + be announced only from these interfaces. If the field is not set, + we advertise from all the interfaces on the host. + items: + type: string + type: array + ipAddressPoolSelectors: + description: A selector for the IPAddressPools which would get advertised + via this advertisement. If no IPAddressPool is selected by this + or by the list, the advertisement is applied to all the IPAddressPools. + items: + description: A label selector is a label query over a set of resources. + The result of matchLabels and matchExpressions are ANDed. An empty + label selector matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + ipAddressPools: + description: The list of IPAddressPools to advertise via this advertisement, + selected by name. + items: + type: string + type: array + nodeSelectors: + description: NodeSelectors allows to limit the nodes to announce as + next hops for the LoadBalancer IP. When empty, all the nodes having are + announced as next hops. + items: + description: A label selector is a label query over a set of resources. + The result of matchLabels and matchExpressions are ANDed. An empty + label selector matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + type: object + status: + description: L2AdvertisementStatus defines the observed state of L2Advertisement. + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: speaker + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resourceNames: + - memberlist + resources: + - secrets + verbs: + - list +- apiGroups: + - apps + resourceNames: + - controller + resources: + - deployments + verbs: + - get +- apiGroups: + - metallb.io + resources: + - bgppeers + verbs: + - get + - list +- apiGroups: + - metallb.io + resources: + - addresspools + verbs: + - get + - list + - watch +- apiGroups: + - metallb.io + resources: + - bfdprofiles + verbs: + - get + - list + - watch +- apiGroups: + - metallb.io + resources: + - ipaddresspools + verbs: + - get + - list + - watch +- apiGroups: + - metallb.io + resources: + - bgpadvertisements + verbs: + - get + - list + - watch +- apiGroups: + - metallb.io + resources: + - l2advertisements + verbs: + - get + - list + - watch +- apiGroups: + - metallb.io + resources: + - communities + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - list +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +- apiGroups: + - metallb.io + resources: + - addresspools + verbs: + - get + - list + - watch +- apiGroups: + - metallb.io + resources: + - bfdprofiles + verbs: + - get + - list + - watch +- apiGroups: + - metallb.io + resources: + - bgppeers + verbs: + - get + - list + - watch +- apiGroups: + - metallb.io + resources: + - l2advertisements + verbs: + - get + - list + - watch +- apiGroups: + - metallb.io + resources: + - bgpadvertisements + verbs: + - get + - list + - watch +- apiGroups: + - metallb.io + resources: + - ipaddresspools + verbs: + - get + - list + - watch +- apiGroups: + - metallb.io + resources: + - communities + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:controller +rules: +- apiGroups: + - "" + resources: + - services + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - list +- apiGroups: + - "" + resources: + - services/status + verbs: + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - controller + resources: + - podsecuritypolicies + verbs: + - use +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - metallb-webhook-configuration + resources: + - validatingwebhookconfigurations + - mutatingwebhookconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + - mutatingwebhookconfigurations + verbs: + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resourceNames: + - addresspools.metallb.io + - bfdprofiles.metallb.io + - bgpadvertisements.metallb.io + - bgppeers.metallb.io + - ipaddresspools.metallb.io + - l2advertisements.metallb.io + - communities.metallb.io + resources: + - customresourcedefinitions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:speaker +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - nodes + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - speaker + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: controller +subjects: +- kind: ServiceAccount + name: controller + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-lister +subjects: +- kind: ServiceAccount + name: speaker + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:controller +subjects: +- kind: ServiceAccount + name: controller + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:speaker +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:speaker +subjects: +- kind: ServiceAccount + name: speaker + namespace: metallb-system +--- +apiVersion: v1 +data: + excludel2.yaml: | + announcedInterfacesToExclude: ["^docker.*", "^cbr.*", "^dummy.*", "^virbr.*", "^lxcbr.*", "^veth.*", "^lo$", "^cali.*", "^tunl.*", "^flannel.*", "^kube-ipvs.*", "^cni.*", "^nodelocaldns.*"] +kind: ConfigMap +metadata: + name: metallb-excludel2 + namespace: metallb-system +--- +apiVersion: v1 +kind: Secret +metadata: + name: webhook-server-cert + namespace: metallb-system +--- +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: metallb-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + component: controller +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metallb + component: controller + name: controller + namespace: metallb-system +spec: + revisionHistoryLimit: 3 + selector: + matchLabels: + app: metallb + component: controller + template: + metadata: + annotations: + prometheus.io/port: "7472" + prometheus.io/scrape: "true" + labels: + app: metallb + component: controller + spec: + containers: + - args: + - --port=7472 + - --log-level=info + env: + - name: METALLB_ML_SECRET_NAME + value: memberlist + - name: METALLB_DEPLOYMENT + value: controller + image: quay.io/metallb/controller:v0.13.12 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: controller + ports: + - containerPort: 7472 + name: monitoring + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + nodeSelector: + kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + serviceAccountName: controller + terminationGracePeriodSeconds: 0 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: metallb + component: speaker + name: speaker + namespace: metallb-system +spec: + selector: + matchLabels: + app: metallb + component: speaker + template: + metadata: + annotations: + prometheus.io/port: "7472" + prometheus.io/scrape: "true" + labels: + app: metallb + component: speaker + spec: + containers: + - args: + - --port=7472 + - --log-level=info + env: + - name: METALLB_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: METALLB_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: METALLB_ML_BIND_ADDR + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: METALLB_ML_LABELS + value: app=metallb,component=speaker + - name: METALLB_ML_SECRET_KEY_PATH + value: /etc/ml_secret_key + image: quay.io/metallb/speaker:v0.13.12 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: speaker + ports: + - containerPort: 7472 + name: monitoring + - containerPort: 7946 + name: memberlist-tcp + - containerPort: 7946 + name: memberlist-udp + protocol: UDP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_RAW + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/ml_secret_key + name: memberlist + readOnly: true + - mountPath: /etc/metallb + name: metallb-excludel2 + readOnly: true + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: speaker + terminationGracePeriodSeconds: 2 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - name: memberlist + secret: + defaultMode: 420 + secretName: memberlist + - configMap: + defaultMode: 256 + name: metallb-excludel2 + name: metallb-excludel2 +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: metallb-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: metallb-system + path: /validate-metallb-io-v1beta2-bgppeer + failurePolicy: Fail + name: bgppeersvalidationwebhook.metallb.io + rules: + - apiGroups: + - metallb.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - bgppeers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: metallb-system + path: /validate-metallb-io-v1beta1-addresspool + failurePolicy: Fail + name: addresspoolvalidationwebhook.metallb.io + rules: + - apiGroups: + - metallb.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - addresspools + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: metallb-system + path: /validate-metallb-io-v1beta1-bfdprofile + failurePolicy: Fail + name: bfdprofilevalidationwebhook.metallb.io + rules: + - apiGroups: + - metallb.io + apiVersions: + - v1beta1 + operations: + - CREATE + - DELETE + resources: + - bfdprofiles + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: metallb-system + path: /validate-metallb-io-v1beta1-bgpadvertisement + failurePolicy: Fail + name: bgpadvertisementvalidationwebhook.metallb.io + rules: + - apiGroups: + - metallb.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - bgpadvertisements + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: metallb-system + path: /validate-metallb-io-v1beta1-community + failurePolicy: Fail + name: communityvalidationwebhook.metallb.io + rules: + - apiGroups: + - metallb.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - communities + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: metallb-system + path: /validate-metallb-io-v1beta1-ipaddresspool + failurePolicy: Fail + name: ipaddresspoolvalidationwebhook.metallb.io + rules: + - apiGroups: + - metallb.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ipaddresspools + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: metallb-system + path: /validate-metallb-io-v1beta1-l2advertisement + failurePolicy: Fail + name: l2advertisementvalidationwebhook.metallb.io + rules: + - apiGroups: + - metallb.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - l2advertisements + sideEffects: None diff --git a/cicd/k3s-rabbitmq-incluster/master.sh b/cicd/k3s-rabbitmq-incluster/master.sh new file mode 100644 index 00000000..7f6f9171 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/master.sh @@ -0,0 +1,14 @@ +export MASTER_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable traefik --disable servicelb --disable-cloud-controller --kube-proxy-arg proxy-mode=ipvs --flannel-iface=eth1 \ +--disable-network-policy \ +--node-ip=${MASTER_IP} --node-external-ip=${MASTER_IP} \ +--bind-address=${MASTER_IP}" sh - + +echo $MASTER_IP > /vagrant/master-ip +sudo cp /var/lib/rancher/k3s/server/node-token /vagrant/node-token +sudo cp /etc/rancher/k3s/k3s.yaml /vagrant/k3s.yaml +sudo sed -i -e "s/127.0.0.1/${MASTER_IP}/g" /vagrant/k3s.yaml +#sudo kubectl apply -f "https://github.com/rabbitmq/cluster-operator/releases/latest/download/cluster-operator.yml" +#sudo kubectl apply -f /vagrant/rmq/rabbitmq.yaml +/vagrant/wait_ready.sh diff --git a/cicd/k3s-rabbitmq-incluster/master_with_cilium.sh b/cicd/k3s-rabbitmq-incluster/master_with_cilium.sh new file mode 100644 index 00000000..01ad60b7 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/master_with_cilium.sh @@ -0,0 +1,29 @@ +export MASTER_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +#curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable traefik --disable servicelb --disable-cloud-controller --kube-proxy-arg proxy-mode=ipvs --flannel-iface=eth1 \ + +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable traefik --disable servicelb --disable-cloud-controller \ +--flannel-backend=none \ +--disable-network-policy \ +--node-ip=${MASTER_IP} --node-external-ip=${MASTER_IP} \ +--bind-address=${MASTER_IP}" sh - + +#Install Cilium +CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt) +CLI_ARCH=amd64 +if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi +curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} +sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum +sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin +rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} +mkdir -p ~/.kube/ +sudo cat /etc/rancher/k3s/k3s.yaml > ~/.kube/config +cilium install + +echo $MASTER_IP > /vagrant/master-ip +sudo cp /var/lib/rancher/k3s/server/node-token /vagrant/node-token +sudo cp /etc/rancher/k3s/k3s.yaml /vagrant/k3s.yaml +sudo sed -i -e "s/127.0.0.1/${MASTER_IP}/g" /vagrant/k3s.yaml +#sudo kubectl apply -f "https://github.com/rabbitmq/cluster-operator/releases/latest/download/cluster-operator.yml" +#sudo kubectl apply -f /vagrant/rmq/rabbitmq.yaml +/vagrant/wait_ready.sh diff --git a/cicd/k3s-rabbitmq-incluster/prom_rbac.yaml b/cicd/k3s-rabbitmq-incluster/prom_rbac.yaml new file mode 100644 index 00000000..967e1436 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/prom_rbac.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus +rules: +- apiGroups: [""] + resources: + - nodes + - nodes/metrics + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: ["get", "list", "watch"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +subjects: +- kind: ServiceAccount + name: prometheus + namespace: default diff --git a/cicd/k3s-rabbitmq-incluster/prom_svc.yaml b/cicd/k3s-rabbitmq-incluster/prom_svc.yaml new file mode 100644 index 00000000..c6eebe80 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/prom_svc.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: prometheus + labels: + app: prometheus +spec: + #externalTrafficPolicy: Local + #loadBalancerClass: loxilb.io/loxilb + #type: LoadBalancer + ports: + - name: web + port: 9090 + targetPort: web + selector: + app.kubernetes.io/name: prometheus + sessionAffinity: ClientIP diff --git a/cicd/k3s-rabbitmq-incluster/prometheus.yaml b/cicd/k3s-rabbitmq-incluster/prometheus.yaml new file mode 100644 index 00000000..c8a2c55b --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/prometheus.yaml @@ -0,0 +1,21 @@ +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: prometheus + labels: + app: prometheus +spec: + image: quay.io/prometheus/prometheus:v2.22.1 + nodeSelector: + kubernetes.io/os: linux + replicas: 3 + resources: + requests: + memory: 400Mi + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + serviceAccountName: prometheus + version: v2.22.1 + serviceMonitorSelector: {} diff --git a/cicd/k3s-rabbitmq-incluster/prometheus_servicemonitor.yaml b/cicd/k3s-rabbitmq-incluster/prometheus_servicemonitor.yaml new file mode 100644 index 00000000..ea252e6d --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/prometheus_servicemonitor.yaml @@ -0,0 +1,13 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: prometheus-self + labels: + app: prometheus +spec: + endpoints: + - interval: 5s + port: web + selector: + matchLabels: + app: prometheus diff --git a/cicd/k3s-rabbitmq-incluster/rmconfig.sh b/cicd/k3s-rabbitmq-incluster/rmconfig.sh new file mode 100755 index 00000000..096e37b7 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/rmconfig.sh @@ -0,0 +1,4 @@ +#!/bin/bash +vagrant destroy -f worker1 +vagrant destroy -f master +vagrant destroy -f host diff --git a/cicd/k3s-rabbitmq-incluster/rmq/client-service.yaml b/cicd/k3s-rabbitmq-incluster/rmq/client-service.yaml new file mode 100644 index 00000000..c3dfc6bc --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/rmq/client-service.yaml @@ -0,0 +1,24 @@ +kind: Service +apiVersion: v1 +metadata: + namespace: test-rabbitmq + name: rabbitmq-client + labels: + app: rabbitmq + type: LoadBalancer +spec: + type: LoadBalancer + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + ports: + - name: http + protocol: TCP + port: 15672 + - name: prometheus + protocol: TCP + port: 15692 + - name: amqp + protocol: TCP + port: 5672 + selector: + app: rabbitmq diff --git a/cicd/k3s-rabbitmq-incluster/rmq/configmap.yaml b/cicd/k3s-rabbitmq-incluster/rmq/configmap.yaml new file mode 100644 index 00000000..5360b145 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/rmq/configmap.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: rabbitmq-config + namespace: test-rabbitmq +data: + enabled_plugins: | + [rabbitmq_peer_discovery_k8s, rabbitmq_management, rabbitmq_prometheus]. + rabbitmq.conf: | + cluster_formation.peer_discovery_backend = k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.k8s.address_type = hostname + cluster_formation.k8s.service_name = rabbitmq-headless + + queue_master_locator=min-masters diff --git a/cicd/k3s-rabbitmq-incluster/rmq/headless-service.yaml b/cicd/k3s-rabbitmq-incluster/rmq/headless-service.yaml new file mode 100644 index 00000000..37b1222d --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/rmq/headless-service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: rabbitmq-headless + namespace: test-rabbitmq +spec: + clusterIP: None + ports: + - name: epmd + port: 4369 + protocol: TCP + targetPort: 4369 + - name: cluster-rpc + port: 25672 + protocol: TCP + targetPort: 25672 + selector: + app: rabbitmq + type: ClusterIP + sessionAffinity: None diff --git a/cicd/k3s-rabbitmq-incluster/rmq/namespace.yaml b/cicd/k3s-rabbitmq-incluster/rmq/namespace.yaml new file mode 100644 index 00000000..e2ef8f8b --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/rmq/namespace.yaml @@ -0,0 +1,6 @@ +## All resources will be created in this namespace +## To delete all resources created by this example, simply delete this namespace: +apiVersion: v1 +kind: Namespace +metadata: + name: test-rabbitmq diff --git a/cicd/k3s-rabbitmq-incluster/rmq/rabbitmq.yaml b/cicd/k3s-rabbitmq-incluster/rmq/rabbitmq.yaml new file mode 100644 index 00000000..4e84bb50 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/rmq/rabbitmq.yaml @@ -0,0 +1,24 @@ +apiVersion: rabbitmq.com/v1beta1 +kind: RabbitmqCluster +metadata: + name: hello-world +spec: + replicas: 3 + service: + type: LoadBalancer + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "node-role.kubernetes.io/master" + operator: DoesNotExist + - key: "node-role.kubernetes.io/control-plane" + operator: DoesNotExist + override: + service: + spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + ports: + - port: 5672 diff --git a/cicd/k3s-rabbitmq-incluster/rmq/rbac.yaml b/cicd/k3s-rabbitmq-incluster/rmq/rbac.yaml new file mode 100644 index 00000000..6403f39c --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/rmq/rbac.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rabbitmq + namespace: test-rabbitmq +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rabbitmq + namespace: test-rabbitmq +rules: +- apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rabbitmq + namespace: test-rabbitmq +subjects: +- kind: ServiceAccount + name: rabbitmq + namespace: test-rabbitmq +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rabbitmq diff --git a/cicd/k3s-rabbitmq-incluster/rmq/statefulset.yaml b/cicd/k3s-rabbitmq-incluster/rmq/statefulset.yaml new file mode 100644 index 00000000..6422406e --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/rmq/statefulset.yaml @@ -0,0 +1,142 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: rabbitmq + namespace: test-rabbitmq +spec: + selector: + matchLabels: + app: "rabbitmq" + # headless service that gives network identity to the RMQ nodes, and enables them to cluster + serviceName: rabbitmq-headless # serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where "pod-specific-string" is managed by the StatefulSet controller. + volumeClaimTemplates: + - metadata: + name: rabbitmq-data + namespace: test-rabbitmq + spec: + storageClassName: standard + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "3Gi" + template: + metadata: + name: rabbitmq + namespace: test-rabbitmq + labels: + app: rabbitmq + spec: + initContainers: + # Since k8s 1.9.4, config maps mount read-only volumes. Since the Docker image also writes to the config file, + # the file must be mounted as read-write. We use init containers to copy from the config map read-only + # path, to a read-write path + - name: "rabbitmq-config" + image: busybox:1.32.0 + volumeMounts: + - name: rabbitmq-config + mountPath: /tmp/rabbitmq + - name: rabbitmq-config-rw + mountPath: /etc/rabbitmq + command: + - sh + - -c + # the newline is needed since the Docker image entrypoint scripts appends to the config file + - cp /tmp/rabbitmq/rabbitmq.conf /etc/rabbitmq/rabbitmq.conf && echo '' >> /etc/rabbitmq/rabbitmq.conf; + cp /tmp/rabbitmq/enabled_plugins /etc/rabbitmq/enabled_plugins + volumes: + - name: rabbitmq-config + configMap: + name: rabbitmq-config + optional: false + items: + - key: enabled_plugins + path: "enabled_plugins" + - key: rabbitmq.conf + path: "rabbitmq.conf" + # read-write volume into which to copy the rabbitmq.conf and enabled_plugins files + # this is needed since the docker image writes to the rabbitmq.conf file + # and Kubernetes Config Maps are mounted as read-only since Kubernetes 1.9.4 + - name: rabbitmq-config-rw + emptyDir: {} + - name: rabbitmq-data + persistentVolumeClaim: + claimName: rabbitmq-data + serviceAccount: rabbitmq + # The Docker image runs as the `rabbitmq` user with uid 999 + # and writes to the `rabbitmq.conf` file + # The security context is needed since the image needs + # permission to write to this file. Without the security + # context, `rabbitmq.conf` is owned by root and inaccessible + # by the `rabbitmq` user + securityContext: + fsGroup: 999 + runAsUser: 999 + runAsGroup: 999 + containers: + - name: rabbitmq + # Community Docker Image + image: rabbitmq:latest + volumeMounts: + # mounting rabbitmq.conf and enabled_plugins + # this should have writeable access, this might be a problem + - name: rabbitmq-config-rw + mountPath: "/etc/rabbitmq" + # mountPath: "/etc/rabbitmq/conf.d/" + # rabbitmq data directory + - name: rabbitmq-data + mountPath: "/var/lib/rabbitmq/mnesia" + env: + - name: RABBITMQ_DEFAULT_PASS + valueFrom: + secretKeyRef: + name: rabbitmq-admin + key: pass + - name: RABBITMQ_DEFAULT_USER + valueFrom: + secretKeyRef: + name: rabbitmq-admin + key: user + - name: RABBITMQ_ERLANG_COOKIE + valueFrom: + secretKeyRef: + name: erlang-cookie + key: cookie + ports: + - name: amqp + containerPort: 5672 + protocol: TCP + - name: management + containerPort: 15672 + protocol: TCP + - name: prometheus + containerPort: 15692 + protocol: TCP + - name: epmd + containerPort: 4369 + protocol: TCP + livenessProbe: + exec: + # This is just an example. There is no "one true health check" but rather + # several rabbitmq-diagnostics commands that can be combined to form increasingly comprehensive + # and intrusive health checks. + # Learn more at https://www.rabbitmq.com/monitoring.html#health-checks. + # + # Stage 2 check: + command: ["rabbitmq-diagnostics", "status"] + initialDelaySeconds: 60 + # See https://www.rabbitmq.com/monitoring.html for monitoring frequency recommendations. + periodSeconds: 60 + timeoutSeconds: 15 + readinessProbe: # probe to know when RMQ is ready to accept traffic + exec: + # This is just an example. There is no "one true health check" but rather + # several rabbitmq-diagnostics commands that can be combined to form increasingly comprehensive + # and intrusive health checks. + # Learn more at https://www.rabbitmq.com/monitoring.html#health-checks. + # + # Stage 1 check: + command: ["rabbitmq-diagnostics", "ping"] + initialDelaySeconds: 20 + periodSeconds: 60 + timeoutSeconds: 10 diff --git a/cicd/k3s-rabbitmq-incluster/validation.sh b/cicd/k3s-rabbitmq-incluster/validation.sh new file mode 100755 index 00000000..c96f4ff1 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/validation.sh @@ -0,0 +1,58 @@ +#!/bin/bash +source ../common.sh +echo k3s-flannel-cluster + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +# Set space as the delimiter +IFS=' ' + +sleep 45 +extIP="123.123.123.1" +echo $extIP + +echo "Service Info" +vagrant ssh master -c 'sudo kubectl get svc' +echo "LB Info" +vagrant ssh loxilb -c 'sudo docker exec -i loxilb loxicmd get lb -o wide' +echo "EP Info" +vagrant ssh loxilb -c 'sudo docker exec -i loxilb loxicmd get ep -o wide' + +print_debug_info() { + echo "llb1 route-info" + vagrant ssh loxilb -c 'ip route' + vagrant ssh master -c 'sudo kubectl get pods -A' + vagrant ssh master -c 'sudo kubectl get svc' + vagrant ssh master -c 'sudo kubectl get nodes' +} + +out=$(curl -s --connect-timeout 10 http://$extIP:55002) +if [[ ${out} == *"Welcome to nginx"* ]]; then + echo "k3s-flannel-cluster (kube-loxilb) tcp [OK]" +else + echo "k3s-flannel-cluster (kube-loxilb) tcp [FAILED]" + print_debug_info + exit 1 +fi + +out=$(timeout 10 ../common/udp_client $extIP 55003) +if [[ ${out} == *"Client"* ]]; then + echo "k3s-flannel-cluster (kube-loxilb) udp [OK]" +else + echo "k3s-flannel-cluster (kube-loxilb) udp [FAILED]" + print_debug_info + exit 1 +fi + +out=$(timeout 10 ../common/sctp_client 192.168.90.1 41291 $extIP 55004) +if [[ ${out} == *"server1"* ]]; then + echo "k3s-flannel-cluster (kube-loxilb) sctp [OK]" +else + echo "k3s-flannel-cluster (kube-loxilb) sctp [FAILED]" + print_debug_info + exit 1 +fi + +exit diff --git a/cicd/k3s-rabbitmq-incluster/wait_ready.sh b/cicd/k3s-rabbitmq-incluster/wait_ready.sh new file mode 100755 index 00000000..b0e69d01 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/wait_ready.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +function wait_cluster_ready { + Res=$(sudo kubectl get pods -A | + while IFS= read -r line; do + if [[ "$line" != *"Running"* && "$line" != *"READY"* ]]; then + echo "$line: not ready" + return + fi + done) + if [[ $Res == *"not ready"* ]]; then + echo $Res + return 1 + fi + return 0 +} + +function wait_cluster_ready_full { + i=1 + nr=0 + for ((;;)) do + wait_cluster_ready + nr=$? + if [[ $nr == 0 ]]; then + echo "Cluster is ready" + break + fi + i=$(( $i + 1 )) + if [[ $i -ge 40 ]]; then + echo "Cluster is not ready.Giving up" + exit 1 + fi + echo "Cluster is not ready...." + sleep 10 + done +} + +wait_cluster_ready_full diff --git a/cicd/k3s-rabbitmq-incluster/worker.sh b/cicd/k3s-rabbitmq-incluster/worker.sh new file mode 100644 index 00000000..5da2836b --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/worker.sh @@ -0,0 +1,13 @@ +export WORKER_ADDR=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +export MASTER_ADDR=$(cat /vagrant/master-ip) +export NODE_TOKEN=$(cat /vagrant/node-token) + +sudo mkdir -p /etc/rancher/k3s +sudo cp -f /vagrant/k3s.yaml /etc/rancher/k3s/k3s.yaml + +curl -sfL https://get.k3s.io | K3S_URL="https://${MASTER_ADDR}:6443" K3S_TOKEN="${NODE_TOKEN}" INSTALL_K3S_EXEC="--node-ip=${WORKER_ADDR} --node-external-ip=${WORKER_ADDR} --kube-proxy-arg proxy-mode=ipvs --flannel-iface=eth1" sh - +#sudo kubectl apply -f /vagrant/nginx.yml +#sudo kubectl apply -f /vagrant/udp.yml +#sudo kubectl apply -f /vagrant/iperf-service.yml +#sudo kubectl apply -f /vagrant/loxilb.yml +/vagrant/wait_ready.sh diff --git a/cicd/k3s-rabbitmq-incluster/worker_with_cilium.sh b/cicd/k3s-rabbitmq-incluster/worker_with_cilium.sh new file mode 100644 index 00000000..0e7be7c6 --- /dev/null +++ b/cicd/k3s-rabbitmq-incluster/worker_with_cilium.sh @@ -0,0 +1,14 @@ +export WORKER_ADDR=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +export MASTER_ADDR=$(cat /vagrant/master-ip) +export NODE_TOKEN=$(cat /vagrant/node-token) + +sudo mkdir -p /etc/rancher/k3s +sudo cp -f /vagrant/k3s.yaml /etc/rancher/k3s/k3s.yaml +curl -sfL https://get.k3s.io | K3S_URL="https://${MASTER_ADDR}:6443" K3S_TOKEN="${NODE_TOKEN}" INSTALL_K3S_EXEC="--node-ip=${WORKER_ADDR} --node-external-ip=${WORKER_ADDR}" sh - + +#curl -sfL https://get.k3s.io | K3S_URL="https://${MASTER_ADDR}:6443" K3S_TOKEN="${NODE_TOKEN}" INSTALL_K3S_EXEC="--node-ip=${WORKER_ADDR} --node-external-ip=${WORKER_ADDR} --kube-proxy-arg proxy-mode=ipvs --flannel-iface=eth1" sh - +#sudo kubectl apply -f /vagrant/nginx.yml +#sudo kubectl apply -f /vagrant/udp.yml +#sudo kubectl apply -f /vagrant/iperf-service.yml +#sudo kubectl apply -f /vagrant/loxilb.yml +/vagrant/wait_ready.sh diff --git a/cicd/k8s-calico-incluster/Vagrantfile b/cicd/k8s-calico-incluster/Vagrantfile index e68faecb..5653a665 100644 --- a/cicd/k8s-calico-incluster/Vagrantfile +++ b/cicd/k8s-calico-incluster/Vagrantfile @@ -5,32 +5,29 @@ require "yaml" settings = YAML.load_file "yaml/settings.yaml" workers = settings["nodes"]["workers"]["count"] +loxilbs = (ENV['LOXILBS'] || "2").to_i Vagrant.configure("2") do |config| if Vagrant.has_plugin?("vagrant-vbguest") config.vbguest.auto_update = false end - - config.vm.box = settings["software"]["cluster"]["box"]["name"] - config.vm.box_version = settings["software"]["cluster"]["box"]["version"] - - config.vm.define "host" do |host| + config.vm.define "host" do |host| host.vm.hostname = 'host1' - host.vm.network :private_network, ip: settings["network"]["client_ip"], :netmask => "255.255.255.0" + host.vm.box = settings["software"]["cluster"]["box"] host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0" host.vm.provision :shell, :path => "node_scripts/host.sh" host.vm.provider :virtualbox do |vbox| vbox.customize ["modifyvm", :id, "--memory", 2048] - vbox.customize ["modifyvm", :id, "--cpus", 1] + vbox.customize ["modifyvm", :id, "--cpus", 2] end end config.vm.define "master" do |master| - master.vm.hostname = 'master1' + master.vm.box = settings["software"]["cluster"]["box"] + master.vm.hostname = 'master' master.vm.network :private_network, ip: settings["network"]["control_ip"], :netmask => "255.255.255.0" - master.vm.network :private_network, ip: settings["network"]["control_ip2"], :netmask => "255.255.255.0" master.vm.provision "shell", env: { "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), @@ -50,14 +47,16 @@ Vagrant.configure("2") do |config| master.vm.provider :virtualbox do |vbox| vbox.customize ["modifyvm", :id, "--memory", 4096] - vbox.customize ["modifyvm", :id, "--cpus", 3] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] end end (1..workers).each do |node_number| config.vm.define "worker#{node_number}" do |worker| + worker.vm.box = settings["software"]["cluster"]["box"] worker.vm.hostname = "worker#{node_number}" - ip = node_number + 100 + ip = node_number + 200 worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" worker.vm.provision "shell", env: { @@ -70,8 +69,9 @@ Vagrant.configure("2") do |config| worker.vm.provision "shell", path: "node_scripts/worker.sh" worker.vm.provider :virtualbox do |vbox| - vbox.customize ["modifyvm", :id, "--memory", 2048] + vbox.customize ["modifyvm", :id, "--memory", 4096] vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] end end end diff --git a/cicd/k8s-calico-incluster/config.sh b/cicd/k8s-calico-incluster/config.sh index 5c415494..6abfc021 100755 --- a/cicd/k8s-calico-incluster/config.sh +++ b/cicd/k8s-calico-incluster/config.sh @@ -30,8 +30,38 @@ do sleep 10 done -# Create fullnat Services +sudo sysctl net.ipv4.conf.vboxnet1.arp_accept=1 + +#Create fullnat Service +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_onearm.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_onearm.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_onearm.yml' 2> /dev/null vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_fullnat.yml' 2> /dev/null vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_fullnat.yml' 2> /dev/null vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_fullnat.yml' 2> /dev/null -vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp.yml' 2> /dev/null + +for((i=1; i<=60; i++)) +do + fin=1 + pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE") + + while IFS= read -a pods; do + read -a pod <<< "$pods" + if [[ ${pod[3]} != *"Running"* ]]; then + echo "${pod[1]} is not UP yet" + fin=0 + fi + done <<< "$pods" + if [ $fin == 1 ]; + then + echo "Cluster is ready" + break; + fi + echo "Will try after 10s" + sleep 10 +done + +if [[ $fin == 0 ]]; then + echo "Cluster is not ready" + exit 1 +fi diff --git a/cicd/k8s-calico-incluster/node_scripts/common.sh b/cicd/k8s-calico-incluster/node_scripts/common.sh index cf1e66f1..c01ad688 100644 --- a/cicd/k8s-calico-incluster/node_scripts/common.sh +++ b/cicd/k8s-calico-incluster/node_scripts/common.sh @@ -26,7 +26,7 @@ sudo apt-get update -y # Install CRI-O Runtime VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" - +CRIO_VERSION=1.27 # Create the .conf file to load the modules at bootup cat <