diff --git a/docs/data/scripts/kilobase/version_bump.sh b/docs/data/scripts/kilobase/version_bump.sh new file mode 100755 index 0000000000..5eee616a5e --- /dev/null +++ b/docs/data/scripts/kilobase/version_bump.sh @@ -0,0 +1,128 @@ +#!/bin/bash + +# Get the directory of the currently running script +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../../../" && pwd)" # Root of the project + +# Function to read the current version from Cargo.toml +read_current_version_from_cargo() { + cargo_toml_path=$1 + current_version=$(grep -E '^version = "[0-9]+\.[0-9]+\.[0-9]+"' "$cargo_toml_path" | sed 's/version = "\(.*\)"/\1/') + echo "$current_version" +} + +# Function to increment the version (bump the patch number) +increment_version() { + version=$1 + major=$(echo "$version" | cut -d '.' -f 1) + minor=$(echo "$version" | cut -d '.' -f 2) + patch=$(echo "$version" | cut -d '.' -f 3) + + # Increment patch number + new_patch=$((patch + 1)) + + # Return the new version + echo "$major.$minor.$new_patch" +} + +# Function to update the version in Cargo.toml +update_version_in_cargo_toml() { + local new_version=$1 + local cargo_toml_path=$2 + + sed -i.bak "s/^version = \".*\"/version = \"$new_version\"/" "$cargo_toml_path" + + if [ $? -eq 0 ]; then + echo "Version updated to $new_version in $cargo_toml_path" + else + echo "Failed to update the version in Cargo.toml" + exit 1 + fi +} + +# Function to update the version in project.json +update_version_in_project_json() { + local new_version=$1 + local project_json_path=$2 + + # Use sed to replace the version tags, regardless of what the current version is + sed -i.bak "s/\"tags\": \[\"[0-9.]*\", \"[0-9.]*\"\]/\"tags\": [\"$new_version\", \"15.1\"]/" "$project_json_path" + + if [ $? -eq 0 ]; then + echo "Version updated to $new_version in $project_json_path" + else + echo "Failed to update the version in project.json" + exit 1 + fi +} + +# Function to update the version in values.yaml (only under db.image) +update_version_in_values_yaml() { + local new_version=$1 + local values_yaml_path=$2 + + # Use sed to replace the tag under the db.image section only + sed -i.bak -e '/db:/,/tag:/s/tag: .*/tag: '"'$new_version'"'/' "$values_yaml_path" + + if [ $? -eq 0 ]; then + echo "Version updated to $new_version in $values_yaml_path" + else + echo "Failed to update the version in values.yaml" + exit 1 + fi +} + + +# Function to clean up backup files after successful version update +cleanup_backups() { + local paths=("$@") + + for path in "${paths[@]}"; do + if [ -f "${path}.bak" ]; then + rm -f "${path}.bak" + echo "Removed backup: ${path}.bak" + fi + done +} + +# Main function to call other functions +update_versions() { + local new_version=$1 + + # Define the paths to the files you want to update + local cargo_toml_path="$PROJECT_ROOT/apps/kilobase/Cargo.toml" + local project_json_path="$PROJECT_ROOT/apps/kilobase/project.json" + local values_yaml_path="$PROJECT_ROOT/migrations/kube/charts/kilobase/supabase/values.yaml" + + # Update version in Cargo.toml + update_version_in_cargo_toml "$new_version" "$cargo_toml_path" + + # Update version in project.json + update_version_in_project_json "$new_version" "$project_json_path" + + # Update version in values.yaml + update_version_in_values_yaml "$new_version" "$values_yaml_path" + + # Clean up backup files + cleanup_backups "$cargo_toml_path" "$project_json_path" "$values_yaml_path" +} + +# Entry point of the script +if [ $# -eq 0 ]; then + echo "No version argument provided. Reading current version from Cargo.toml." + + # Path to the Cargo.toml file + cargo_toml_path="$PROJECT_ROOT/apps/kilobase/Cargo.toml" + + # Read current version and increment it + current_version=$(read_current_version_from_cargo "$cargo_toml_path") + new_version=$(increment_version "$current_version") + + echo "Current version: $current_version. Bumping to: $new_version." +else + new_version=$1 + echo "Using provided version: $new_version." +fi + +# Call the main function to update all versions +update_versions "$new_version" \ No newline at end of file diff --git a/docs/index.html b/docs/index.html index b8ac0e702d..bd4a34673c 100644 --- a/docs/index.html +++ b/docs/index.html @@ -205,7 +205,7 @@

Meet Your Dream Dev Team!

Advanced gaming and software solutions - Elevate your experience with cutting-edge technology! From k8s to AI, we got you covered!

Meet the Space Dwarven Squad -

Latest Commit by h0lybyte : -

"Merge pull request #2829 from KBVE/beta +

"Merge pull request #2868 from KBVE/beta Preparing Release Branch"

-On:

-View this commit on +On:

+View this commit on GitHub .

Stack of ScrewFast product boxes containing assorted hardware tools


KBVE Creations: Pioneering Digital Frontiers

Explore Our World of Groundbreaking Games, Dynamic Websites, and Innovative Applications

Rust icon docker + +September: 24th | KiloByte Virtual Engine + + +
Daily Post Image from Upsplash

September: 24th

2024

+

Kilobase

+

Already 2am, oh man, the time seems to fly when trouble shooting through kubernetes. +The goal will be to get the majority of the postgres operational but there are a room full of errors that we need to resolve on the side. +The kong settings need to be adjusted a bit for the wrapper.sh and the functions, which I will start while waiting for the build to move forward.

+
    +
  • 2:26am
  • +
+

The current error is that it wants to pull the 15.1 image of kilobase, but we released a specific image called 15.1.1. +Maybe the issue might be because of cache, regardless, going to go back and switich the pull to Always for now and then see where it goes from there. +Actually, we could also update the image tag and make it publish with a 15.1 and the 15.1.1. +In this case, we will do both and see where it goes from now, going to push out this branch and create a new branch.

+
    +
  • 3:19am
  • +
+

There seems to be a mismatch between the usermod for the postgres, so I am going back and switching it back to the way that Supabase had it, which was 101 for postgres. +We will bump it up to version 15.1.2 but still release it under 15.1, but I wonder if that will cause problems, hmm. +Lets bump up the chart version too, maybe that will help with the current situation.

+
    +
  • 4:42am
  • +
+

Progress has been made, we were able to get the initial databases up and running, but there is still some issues with the other functions unable to connect. +Going to have go back around and updated all the DB_HOST values to be our kubernetes value.

+
    +
  • 4:57am
  • +
+

The function container aka edge container seems to be throwing an error because of the read-only file system, so let us go ahead and update it so that it can write the cache. +Under its values.yaml, we will go ahead and update the function setting, readOnlyRootFilesystem: false, which should now let us write. +One error after another, lets get this going, going to do it for all of them. +The other idea would be to just fork each of the images that supabase has and make the file changes we want, then publish them as our own images. +This way we can then force them to be read-only, but that will be way later on in the system. +Getting closer to the goal, with 35/41 resources running.

+

Updated the configmap with everything else we might need! Lets hope that it will solve our issue and get this baby closer to operational.

+

Worker

+

Expanded the worker5 sda3 to 50GB and now I am thinking of looping back around and expanding all the remaining VMs in the master series. +Adding another 18Gb to each of the masters would help with the images that we are pulling and should give us a bit more room to be flexible. +In the future, with our next worker6 and worker7, we can give them a decent bulk size but that will require some additional planning to pull off. +We did a quick resize to all the 5 original nodes.

+

Markets

+

What a rebound in the markets, good to see the post fed rates finally starting to give that much needed boost back to infinite. +We just need nvidia to come back into that $125 zone and we are officially printing bags.

+

SQL

+
    +
  • 12:40pm
  • +
+

The biggest issue that we have is in the configmap for the post install, which I am thinking we could resolve with our own SQL statements afterwards. +The current idea would be to isolate these additional sqls into their own configmap and then appy them afterward the main init.

+
    +
  • 4:00pm
  • +
+

It seems that the $PGUSER and the $PGPASS are reserved for the operator, so we have to remove those from the env. +Thus we can switch back around and update the kilobase-additional-sql-postgres with references to those two variables, now I wonder if the $JWT_SECRET will still pass through. +Going to delete the fleet that we just deployed and start the whole process again.

+

+
+    -- migrate:up 99-jwt.sql
+    ALTER DATABASE postgres SET "app.settings.jwt_secret" TO '{{ .Values.secret.jwt.secretRefKey.secret }}';
+    ALTER DATABASE postgres SET "app.settings.jwt_exp" TO '{{ .Values.db.environment.JWT_EXP | default "3600" }}';
+
+    -- migrate:down
+
+    -- migrate:up 99-logs.sql
+    CREATE SCHEMA IF NOT EXISTS _analytics;
+    ALTER SCHEMA _analytics OWNER TO '{{ .Values.secret.db.secretRefKey.username }}';
+
+    -- migrate:down
+
+    -- migrate:up 99-realtime.sql
+    CREATE SCHEMA IF NOT EXISTS _realtime;
+    ALTER SCHEMA _realtime OWNER TO '{{ .Values.secret.db.secretRefKey.username }}';
+
+    -- migrate:down
+
+    -- migrate:up 99-roles.sql
+    -- NOTE: change to your own passwords for production environments
+    ALTER USER authenticator WITH PASSWORD '{{ .Values.secret.db.secretRefKey.password }}';
+    ALTER USER pgbouncer WITH PASSWORD '{{ .Values.secret.db.secretRefKey.password }}';
+    ALTER USER supabase_auth_admin WITH PASSWORD '{{ .Values.secret.db.secretRefKey.password }}';
+    ALTER USER supabase_functions_admin WITH PASSWORD '{{ .Values.secret.db.secretRefKey.password }}';
+    ALTER USER supabase_storage_admin WITH PASSWORD '{{ .Values.secret.db.secretRefKey.password }}';
+
+
+

These are the SQL statements that we will have to manually execute on the container to get the rest of the instance up and running. +Afterwards we can run barman and see if it moves the database into the s3 bucket as a backup. +If the WAL and the DB get saved to the s3, we can deploy it all again but with a recovery mode, then that should be enough to handle our generic needs for now.

+

Meta

+

The meta data issue that I am facing is:

+

Failed to create resource: admission webhook “vcluster.cnpg.io” denied the request: +Cluster.postgresql.cnpg.io “kilobase-migrations-kube-charts-kilobase-supabase-supabase-db” is invalid: +metadata.name: Invalid value: “kilobase-migrations-kube-charts-kilobase-supabase-supabase-db”: the maximum length of a cluster name is 50 characters

+

We could try to scale some of the names back to help with the deployment.

+

2023

+
    +
  • 9:35am - I woke up a bit late today and I think I am going to go back to sleep a bit more, I need to catch up on my ZZZs.
  • +
  • 5:03pm - Okay there are a couple things that I need to overlook when it comes to handling the YoRHa UI, but the issues are still a bit of a pain.
  • +
+

Quote

+
+

No party has a monopoly on wisdom. No democracy works without compromise. +— Barack Obama

+
+
+

Tasks

+
    +
  • [ ]
  • +
\ No newline at end of file diff --git a/docs/journal/09-25/index.html b/docs/journal/09-25/index.html new file mode 100644 index 0000000000..8add9d10e0 --- /dev/null +++ b/docs/journal/09-25/index.html @@ -0,0 +1,311 @@ + +September: 25th | KiloByte Virtual Engine + + +
Daily Post Image from Upsplash

September: 25th

2024

+

Fleet

+

The 2am grindset is real! +We got the ghost stories playing in the background, which describes exactly how I feel when operating in kubernetes! +After we execute the manual SQL queries, we need to wrap back around and update the shared extensions. +There is another point of error that I have to look into, which is the pgnet because I believe the cURL that it requires is only in ubuntu 24 and not ubuntu 20. +Hopefully the 15.6.x postgres image that Supabase plans to release or 16.x postgres image hopefully is built using nix and ubuntu 24.

+

Shared

+

2:28am - Preload Error

+

Looks like that the shared_preload_libraries did not infact load the pg_net, so we will have to go back around and fix that. +Furthermore, we can also use this part to load the kilobase extension too, okay made some changes to the chart to include the extensions.

+

Kilobase Extension

+

Our custom extension did not seem to load and thus we need to go back and change the build. +I believe this should be an easy issue to resolve because we need to remove the OpenSSL part of the reqwest client. +Okay there was a small step that we skipped, which would be to bump the version control up, making sure that we are using the latest version.

+

When doing the version bump, we need to edit a couple files and in the future we should have a shell script that would make these changes for us during a pull request. +Maybe after an official image is published, have it go through and make changes to all the files.

+
    +
  • ”Cargo.toml” under /apps/kilobase/
  • +
  • “project.json” under /apps/kilobase/
  • +
  • “values.yaml” under the /migrations/kube/charts/kilobase/supabase/
  • +
+

Keeping the kilobase version consistent across all three files will be important.

+

Okay the shell script version_bump.sh handles this.

+

Next error is in the misisng libc6, which I am assuming the linking was broken? +Let us go ahead and add it into the Dockerfile and see if it helps solve that problem. +Damn it, the error was not because of a linking problem but rather a mis-match in the operating system for building the extension on ubuntu-24 and running that so file in ubuntu-20. +I been moving around different ubuntu versions and forgot that ubuntu-24 is definitely the wave. Ugh, here are the rest of the notes related to this.

+

Okay there are two ways we can fix this issue, we can fix the action that runs the extension builder using ubuntu-20.04 instead of ubuntu-latest? +Or we can move the extension building back into the Dockerfile as we had it from the start, ugh, that means we need to make a couple adjustments to our overall build process. +The first step in setting up the docker build would be to directly reference the jedi package via the crates rather than a local path. +That would split the updating for the jedi package up a bit but for now it should be okay, I can sense the future problems that this might cause with our locks but all of this is part of the learning experience.

+

We will shift the Cargo.toml from :

+

+jedi = { path = "../../packages/jedi" }
+# TO
+jedi = "0.1.14"
+
+
+

Actually decoupling the direct reference to jedi and maintaining the jedi workflow will be a better way for our future development cycles.

+

Now this also means that we need to adjust the Dockerfile next, preparing the STAGE 1 again for building out the kilobase extension.

+

Docker

+

Dear lord building docker images on Windows WSL is terrible, I am honestly waiting forever for docker builds and some of the errors are just being skipped. +I guess I been spoiled with orbstack and some of the newer tech, that going back to Docker desktop feels like entering dev cycle hell all over again. +Going to version bump the whole container and see what the errors are within the dev branch, then go back around and compare the errors from my end? +That would mean we have to comment out some of the dev ci workflow as well because we moved all of pgrx into the Dockerfile, but it might make sense to test it before going into the Docker buildx. +Both of these options would make sense in this situation, testing the extension building before doing the full docker build, maybe within the dev cycle we will keep the extension tests. +Then inside of the alpha branch, we will do a pgrx build and then a docker build test, this way we can keep both of the tests but split them up a bit between the two branches.

+

GLIBC

+
    +
  • 7:00pm
  • +
+

Error log from the kubectl logs supabase-release-supabase-db-1-initdb-flnzv -n supabase

+

+{
+  "level":"info",
+  "ts":"2024-09-25T22:16:10Z",
+  "logger":"pg_ctl",
+  "msg":"waiting for server to start....2024-09-25 22:16:10.661 UTC [29] FATAL:  could not load library \"/usr/lib/postgresql/15/lib/kilobase.so\": /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.33' not found (required by /usr/lib/postgresql/15/lib/kilobase.so)",
+  "pipe":"stdout",
+  "logging_pod":"supabase-release-supabase-db-1-initdb"
+}
+
+
+

Keeping this error in the notes as a reference as I know we will have this issue in the future again when we have to rebuild the supabase image using the ubuntu 24 / postgres 16.

+

NVDA

+

Ending up selling all the Nvidia call options and preparing to move some of the cash into CDs before the rates start to drop! +This will be a good way to save some extra cash that I will have to pay the IRS anyhow, might as well have it sit and collect some interest before the start of 2025. +So profits, about 50%, will be moved into a CD that should end right before April 2025, then we pay the IRS with that large sum, overpaying them and having them deposit back the excess cash into a T bill account. +One of my goals would be to get around six figures in T Bill funds before the age of 40, this would be then used as an extra extra extra emergency fund for the future, granted the rate of return on the t bills are pretty trash but all of this is just part of that experience.

+ +

2023

+
    +
  • 10:05am - I completely forgot about the work backlog, so I need to focus on catching up on that end. I will dedicate a couple hours to just getting myself ahead of the backlog? I could mix it in with the other backlogs, so that I do not get too bored of handling issue tickets.
  • +
  • 11:17am - The storage of the meta engine should really be done on Amazon or some sort of external cloud provider, I found that the metadata being on AWS was the best option in this hybrid cluster setup. The future really seems to be a combination of a hybrid cloud, where a decent amount of the important meta-data would be stored in the cloud while the heavy functions and files are held within your own instance.
  • +
  • 4:22pm - Scaling the YoRHa UI down to just around 400px~ and keeping it there for now. This way its mobile focused at first before I migrate it be desktop / landscape friendly. I might shift out of the UX/UI area of the widget and focus more on the backend and Appwrite integration for the time being. I suppose these notes are just for getting a better understanding from my end.
  • +
  • 6:36pm - Okay I was able to swap out the general characters for the Game! I am thinking of keeping it simple for now and only three playable characters before migrating over to a bit more advance style features. There are a couple other aspects that I wanted to overview.
  • +
+

Quote

+
+

Love is the only force capable of transforming an enemy into a friend. +— Martin Luther King Jr.

+
+
+

Tasks

+
    +
  • [ ]
  • +
\ No newline at end of file diff --git a/docs/journal/09-26/index.html b/docs/journal/09-26/index.html new file mode 100644 index 0000000000..64a3e55cd1 --- /dev/null +++ b/docs/journal/09-26/index.html @@ -0,0 +1,271 @@ + +September: 26th | KiloByte Virtual Engine + + +
Daily Post Image from Upsplash

September: 26th

2024

+

RimWorld +Today was a rough day, ended up playing a bit too much of rim world again. +But it is a Friday Jr night, so we are good to go in that situation! +Oh, I also got the pokemon world mod added and its been a super thrilling ride, hmm so far with the mechanoids too. +We got ourselves a poke-robo army and slowly adding more numbers to our stack, I bet we could easiy hit around 100 units with this play through.

+

PGRX +The other issue is still open with the pgrx bindings, I am thinking that we can go two ways with this now? +Actually there are three ways we can go with this! +The first would be to just drop the custom extension, this is the easy route because then we can move forward without any major issues. +Taking a look at the way that supabase handles the pgrx, it seems that they are on version 0.11.3 with the shim, maybe we can downgrade and see if that might be able to help? +From my understanding the current ubuntu 20 LTS is where our build is failing but it should not be, hmm. +The exact issue we are facing is listed here and it is in relation to the pgrx bindgen for C.

+

I am not trying to give up, maybe we can try to build the extension using the pgx

+

Backup

+

While I am looking through trying to resolve this bindgen situation with the extension, I did make some progress within the backup and recovery. +My idea would be to set it up so that not only can our github actions deploy the latest stacks but we can even go a step further and setup a semi-automatic backup through the actions. +The idea would be to have a custom github action that would do something along these lines:

+
    +
  1. Create an issue ticket for the backup job
  2. +
  3. Prepare a couple sha256 reference shas for the job, backup postgres data with logical and physical and then use the barman to handle the backup.
  4. +
  5. Once the backup is done, it will go back to the issue ticket and state that the backup was done and it will then pare the next stage.
  6. +
  7. After it confirms that the backup is done, it can keep a hot-swap of the backup in another cloud provider, giving us a backup in both AWS and another party.
  8. +
  9. Furthermore, we can have the action place the WAL and logical backup into our local cluster and then have it prepare a recovery test.
  10. +
  11. At this point the issue ticket would still be open but it would move down the priority list from a 6 to a 3 or 4 and then begin the recovery deployment at the local cluster.
  12. +
  13. The recovery cluster should be able to deploy in the local cluster and provide the information to verify that it was successful, ie update the issue ticket and place it as a matrix of 1.
  14. +
  15. The final step before closing the issue ticket would then be to give a full report of everything it did today.
  16. +
+

This would be the barebones focus for now, during this issue ticket creation, we can also include a NX Report and any other information that we would like to include.

+

Weekly Meeting

+

Finally the other goal that I had before the end of the month,

+ +

2023

+
    +
  • 6:23pm - Over the night I was able to finish up the Helmet integration with the YoRHa UI widget and now I am going to add the footer buttons for the template. I am thinking that the footer should have about just two buttons, one that would be for the version / game data and another for the external services? I do not want to spend too much time trying to figure that out per say and just move forward with the next issue.
  • +
  • 9:00pm - The current size of the widget is at around 280kb and I am thinking that I might be able to reduce the size by wrapping the modal into its own internal function and then calling it with children props. This way the size is standard across the board or I could have an action display the modal, which would be also interesting as an approach.
  • +
  • 10:51pm - The bundle size has increased to around 290kb, this might become a problem because there are some issues that I am seeing already.
  • +
+

Quote

+
+

Things do not happen. Things are made to happen. +— John F. Kennedy

+
+
+

Tasks

+
    +
  • [ ]
  • +
\ No newline at end of file diff --git a/docs/journal/index.html b/docs/journal/index.html index 8f79b98401..b82cdbe169 100644 --- a/docs/journal/index.html +++ b/docs/journal/index.html @@ -200,7 +200,16 @@ }); }); }); -

The Creator’s Diary: Adventures in Art, Tech, and Cinema

Explore daily dispatches from a modern-day Renaissance journey, spanning the realms of art, technology, and ancient wisdom. Dive into a world where algorithms meet aesthetics and every day is a new adventure in beta.

journalEntry.data.cardImageAlt

September 23rd.

journalEntry.data.authorImageAlt

KBVE Team

KBVE Member

The Creator’s Diary: Adventures in Art, Tech, and Cinema

Explore daily dispatches from a modern-day Renaissance journey, spanning the realms of art, technology, and ancient wisdom. Dive into a world where algorithms meet aesthetics and every day is a new adventure in beta.

journalEntry.data.cardImageAlt

September 26th.

journalEntry.data.authorImageAlt

KBVE Team

KBVE Member