diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..2f8f89b --- /dev/null +++ b/.dockerignore @@ -0,0 +1,26 @@ +# Ignore Git and GitHub files +.git +.github/ + +# Ignore Husky configuration files +.husky/ + +# Ignore documentation and metadata files +CONTRIBUTING.md +LICENSE +README.md + +# Ignore environment examples and sensitive info +.env +*.local +*.example + +# Ignore node modules, logs and cache files +**/*.log +**/node_modules +**/dist +**/build +**/.cache +logs +dist-ssr +.DS_Store diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..5274ff0 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +indent_style = space +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 120 +indent_size = 2 + +[*.md] +trim_trailing_whitespace = false diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..2d736a7 --- /dev/null +++ b/.env.example @@ -0,0 +1,106 @@ +# Rename this file to .env once you have filled in the below environment variables! + +# Get your GROQ API Key here - +# https://console.groq.com/keys +# You only need this environment variable set if you want to use Groq models +GROQ_API_KEY= + +# Get your HuggingFace API Key here - +# https://huggingface.co/settings/tokens +# You only need this environment variable set if you want to use HuggingFace models +HuggingFace_API_KEY= + + +# Get your Open AI API Key by following these instructions - +# https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key +# You only need this environment variable set if you want to use GPT models +OPENAI_API_KEY= + +# Get your Anthropic API Key in your account settings - +# https://console.anthropic.com/settings/keys +# You only need this environment variable set if you want to use Claude models +ANTHROPIC_API_KEY= + +# Get your OpenRouter API Key in your account settings - +# https://openrouter.ai/settings/keys +# You only need this environment variable set if you want to use OpenRouter models +OPEN_ROUTER_API_KEY= + +# Get your Google Generative AI API Key by following these instructions - +# https://console.cloud.google.com/apis/credentials +# You only need this environment variable set if you want to use Google Generative AI models +GOOGLE_GENERATIVE_AI_API_KEY= + +# You only need this environment variable set if you want to use oLLAMA models +# DONT USE http://localhost:11434 due to IPV6 issues +# USE EXAMPLE http://127.0.0.1:11434 +OLLAMA_API_BASE_URL= + +# You only need this environment variable set if you want to use OpenAI Like models +OPENAI_LIKE_API_BASE_URL= + +# You only need this environment variable set if you want to use Together AI models +TOGETHER_API_BASE_URL= + +# You only need this environment variable set if you want to use DeepSeek models through their API +DEEPSEEK_API_KEY= + +# Get your OpenAI Like API Key +OPENAI_LIKE_API_KEY= + +# Get your Together API Key +TOGETHER_API_KEY= + +# You only need this environment variable set if you want to use Hyperbolic models +#Get your Hyperbolics API Key at https://app.hyperbolic.xyz/settings +#baseURL="https://api.hyperbolic.xyz/v1/chat/completions" +HYPERBOLIC_API_KEY= +HYPERBOLIC_API_BASE_URL= + +# Get your Mistral API Key by following these instructions - +# https://console.mistral.ai/api-keys/ +# You only need this environment variable set if you want to use Mistral models +MISTRAL_API_KEY= + +# Get the Cohere Api key by following these instructions - +# https://dashboard.cohere.com/api-keys +# You only need this environment variable set if you want to use Cohere models +COHERE_API_KEY= + +# Get LMStudio Base URL from LM Studio Developer Console +# Make sure to enable CORS +# DONT USE http://localhost:1234 due to IPV6 issues +# Example: http://127.0.0.1:1234 +LMSTUDIO_API_BASE_URL= + +# Get your xAI API key +# https://x.ai/api +# You only need this environment variable set if you want to use xAI models +XAI_API_KEY= + +# Get your Perplexity API Key here - +# https://www.perplexity.ai/settings/api +# You only need this environment variable set if you want to use Perplexity models +PERPLEXITY_API_KEY= + +# Get your AWS configuration +# https://console.aws.amazon.com/iam/home +# The JSON should include the following keys: +# - region: The AWS region where Bedrock is available. +# - accessKeyId: Your AWS access key ID. +# - secretAccessKey: Your AWS secret access key. +# - sessionToken (optional): Temporary session token if using an IAM role or temporary credentials. +# Example JSON: +# {"region": "us-east-1", "accessKeyId": "yourAccessKeyId", "secretAccessKey": "yourSecretAccessKey", "sessionToken": "yourSessionToken"} +AWS_BEDROCK_CONFIG= + +# Include this environment variable if you want more logging for debugging locally +VITE_LOG_LEVEL=debug + +# Example Context Values for qwen2.5-coder:32b +# +# DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM +# DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM +# DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM +# DEFAULT_NUM_CTX=6144 # Consumes 24GB of VRAM +DEFAULT_NUM_CTX= diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000..8b66eb1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,73 @@ +name: "Bug report" +description: Create a report to help us improve +body: + - type: markdown + attributes: + value: | + Thank you for reporting an issue :pray:. + + This issue tracker is for bugs and issues found with [Bolt.diy](https://bolt.diy). + If you experience issues related to WebContainer, please file an issue in the official [StackBlitz WebContainer repo](https://github.com/stackblitz/webcontainer-core). + + The more information you fill in, the better we can help you. + - type: textarea + id: description + attributes: + label: Describe the bug + description: Provide a clear and concise description of what you're running into. + validations: + required: true + - type: input + id: link + attributes: + label: Link to the Bolt URL that caused the error + description: Please do not delete it after reporting! + validations: + required: true + - type: textarea + id: steps + attributes: + label: Steps to reproduce + description: Describe the steps we have to take to reproduce the behavior. + placeholder: | + 1. Go to '...' + 2. Click on '....' + 3. Scroll down to '....' + 4. See error + validations: + required: true + - type: textarea + id: expected + attributes: + label: Expected behavior + description: Provide a clear and concise description of what you expected to happen. + validations: + required: true + - type: textarea + id: screenshots + attributes: + label: Screen Recording / Screenshot + description: If applicable, **please include a screen recording** (preferably) or screenshot showcasing the issue. This will assist us in resolving your issue quickly. + - type: textarea + id: platform + attributes: + label: Platform + value: | + - OS: [e.g. macOS, Windows, Linux] + - Browser: [e.g. Chrome, Safari, Firefox] + - Version: [e.g. 91.1] + - type: input + id: provider + attributes: + label: Provider Used + description: Tell us the provider you are using. + - type: input + id: model + attributes: + label: Model Used + description: Tell us the model you are using. + - type: textarea + id: additional + attributes: + label: Additional context + description: Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..1fbea24 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: false +contact_links: + - name: Bolt.new related issues + url: https://github.com/stackblitz/bolt.new/issues/new/choose + about: Report issues related to Bolt.new (not Bolt.diy) + - name: Chat + url: https://thinktank.ottomator.ai + about: Ask questions and discuss with other Bolt.diy users. diff --git a/.github/ISSUE_TEMPLATE/epic.md b/.github/ISSUE_TEMPLATE/epic.md new file mode 100644 index 0000000..2727594 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/epic.md @@ -0,0 +1,23 @@ +--- +name: Epic +about: Epics define long-term vision and capabilities of the software. They will never be finished but serve as umbrella for features. +title: '' +labels: + - epic +assignees: '' +--- + +# Strategic Impact + + + +# Target Audience + + + +# Capabilities + + diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md new file mode 100644 index 0000000..8df8c32 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.md @@ -0,0 +1,28 @@ +--- +name: Feature +about: A pretty vague description of how a capability of our software can be added or improved. +title: '' +labels: + - feature +assignees: '' +--- + +# Motivation + + + +# Scope + + + +# Options + + + +# Related + + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..6cd1e74 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,23 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' +--- + +**Is your feature request related to a problem? Please describe:** + + + +**Describe the solution you'd like:** + + + +**Describe alternatives you've considered:** + + + +**Additional context:** + + diff --git a/.github/actions/setup-and-build/action.yaml b/.github/actions/setup-and-build/action.yaml new file mode 100644 index 0000000..b27bc6f --- /dev/null +++ b/.github/actions/setup-and-build/action.yaml @@ -0,0 +1,32 @@ +name: Setup and Build +description: Generic setup action +inputs: + pnpm-version: + required: false + type: string + default: '9.4.0' + node-version: + required: false + type: string + default: '20.15.1' + +runs: + using: composite + + steps: + - uses: pnpm/action-setup@v4 + with: + version: ${{ inputs.pnpm-version }} + run_install: false + + - name: Set Node.js version to ${{ inputs.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ inputs.node-version }} + cache: pnpm + + - name: Install dependencies and build project + shell: bash + run: | + pnpm install + pnpm run build diff --git a/.github/scripts/generate-changelog.sh b/.github/scripts/generate-changelog.sh new file mode 100755 index 0000000..e630012 --- /dev/null +++ b/.github/scripts/generate-changelog.sh @@ -0,0 +1,261 @@ +#!/usr/bin/env bash + +# Ensure we're running in bash +if [ -z "$BASH_VERSION" ]; then + echo "This script requires bash. Please run with: bash $0" >&2 + exit 1 +fi + +# Ensure we're using bash 4.0 or later for associative arrays +if ((BASH_VERSINFO[0] < 4)); then + echo "This script requires bash version 4 or later" >&2 + echo "Current bash version: $BASH_VERSION" >&2 + exit 1 +fi + +# Set default values for required environment variables if not in GitHub Actions +if [ -z "$GITHUB_ACTIONS" ]; then + : "${GITHUB_SERVER_URL:=https://github.com}" + : "${GITHUB_REPOSITORY:=stackblitz-labs/bolt.diy}" + : "${GITHUB_OUTPUT:=/tmp/github_output}" + touch "$GITHUB_OUTPUT" + + # Running locally + echo "Running locally - checking for upstream remote..." + MAIN_REMOTE="origin" + if git remote -v | grep -q "upstream"; then + MAIN_REMOTE="upstream" + fi + MAIN_BRANCH="main" # or "master" depending on your repository + + # Ensure we have latest tags + git fetch ${MAIN_REMOTE} --tags + + # Use the remote reference for git log + GITLOG_REF="${MAIN_REMOTE}/${MAIN_BRANCH}" +else + # Running in GitHub Actions + GITLOG_REF="HEAD" +fi + +# Get the latest tag +LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "") + +# Start changelog file +echo "# πŸš€ Release v${NEW_VERSION}" > changelog.md +echo "" >> changelog.md +echo "## What's Changed 🌟" >> changelog.md +echo "" >> changelog.md + +if [ -z "$LATEST_TAG" ]; then + echo "### πŸŽ‰ First Release" >> changelog.md + echo "" >> changelog.md + echo "Exciting times! This marks our first release. Thanks to everyone who contributed! πŸ™Œ" >> changelog.md + echo "" >> changelog.md + COMPARE_BASE="$(git rev-list --max-parents=0 HEAD)" +else + echo "### πŸ”„ Changes since $LATEST_TAG" >> changelog.md + echo "" >> changelog.md + COMPARE_BASE="$LATEST_TAG" +fi + +# Function to extract conventional commit type and associated emoji +get_commit_type() { + local msg="$1" + if [[ $msg =~ ^feat(\(.+\))?:|^feature(\(.+\))?: ]]; then echo "✨ Features" + elif [[ $msg =~ ^fix(\(.+\))?: ]]; then echo "πŸ› Bug Fixes" + elif [[ $msg =~ ^docs(\(.+\))?: ]]; then echo "πŸ“š Documentation" + elif [[ $msg =~ ^style(\(.+\))?: ]]; then echo "πŸ’Ž Styles" + elif [[ $msg =~ ^refactor(\(.+\))?: ]]; then echo "♻️ Code Refactoring" + elif [[ $msg =~ ^perf(\(.+\))?: ]]; then echo "⚑ Performance Improvements" + elif [[ $msg =~ ^test(\(.+\))?: ]]; then echo "πŸ§ͺ Tests" + elif [[ $msg =~ ^build(\(.+\))?: ]]; then echo "πŸ› οΈ Build System" + elif [[ $msg =~ ^ci(\(.+\))?: ]]; then echo "βš™οΈ CI" + elif [[ $msg =~ ^chore(\(.+\))?: ]]; then echo "" # Skip chore commits + else echo "πŸ” Other Changes" # Default category with emoji + fi +} + +# Initialize associative arrays +declare -A CATEGORIES +declare -A COMMITS_BY_CATEGORY +declare -A ALL_AUTHORS +declare -A NEW_CONTRIBUTORS + +# Get all historical authors before the compare base +while IFS= read -r author; do + ALL_AUTHORS["$author"]=1 +done < <(git log "${COMPARE_BASE}" --pretty=format:"%ae" | sort -u) + +# Process all commits since last tag +while IFS= read -r commit_line; do + if [[ ! $commit_line =~ ^[a-f0-9]+\| ]]; then + echo "WARNING: Skipping invalid commit line format: $commit_line" >&2 + continue + fi + + HASH=$(echo "$commit_line" | cut -d'|' -f1) + COMMIT_MSG=$(echo "$commit_line" | cut -d'|' -f2) + BODY=$(echo "$commit_line" | cut -d'|' -f3) + # Skip if hash doesn't match the expected format + if [[ ! $HASH =~ ^[a-f0-9]{40}$ ]]; then + continue + fi + + HASH=$(echo "$commit_line" | cut -d'|' -f1) + COMMIT_MSG=$(echo "$commit_line" | cut -d'|' -f2) + BODY=$(echo "$commit_line" | cut -d'|' -f3) + + + # Validate hash format + if [[ ! $HASH =~ ^[a-f0-9]{40}$ ]]; then + echo "WARNING: Invalid commit hash format: $HASH" >&2 + continue + fi + + # Check if it's a merge commit + if [[ $COMMIT_MSG =~ Merge\ pull\ request\ #([0-9]+) ]]; then + # echo "Processing as merge commit" >&2 + PR_NUM="${BASH_REMATCH[1]}" + + # Extract the PR title from the merge commit body + PR_TITLE=$(echo "$BODY" | grep -v "^Merge pull request" | head -n 1) + + # Only process if it follows conventional commit format + CATEGORY=$(get_commit_type "$PR_TITLE") + + if [ -n "$CATEGORY" ]; then # Only process if it's a conventional commit + # Get PR author's GitHub username + GITHUB_USERNAME=$(gh pr view "$PR_NUM" --json author --jq '.author.login') + + if [ -n "$GITHUB_USERNAME" ]; then + # Check if this is a first-time contributor + AUTHOR_EMAIL=$(git show -s --format='%ae' "$HASH") + if [ -z "${ALL_AUTHORS[$AUTHOR_EMAIL]}" ]; then + NEW_CONTRIBUTORS["$GITHUB_USERNAME"]=1 + ALL_AUTHORS["$AUTHOR_EMAIL"]=1 + fi + + CATEGORIES["$CATEGORY"]=1 + COMMITS_BY_CATEGORY["$CATEGORY"]+="* ${PR_TITLE#*: } ([#$PR_NUM](${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/pull/$PR_NUM)) by @$GITHUB_USERNAME"$'\n' + else + COMMITS_BY_CATEGORY["$CATEGORY"]+="* ${PR_TITLE#*: } ([#$PR_NUM](${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/pull/$PR_NUM))"$'\n' + fi + fi + # Check if it's a squash merge by looking for (#NUMBER) pattern + elif [[ $COMMIT_MSG =~ \(#([0-9]+)\) ]]; then + # echo "Processing as squash commit" >&2 + PR_NUM="${BASH_REMATCH[1]}" + + # Only process if it follows conventional commit format + CATEGORY=$(get_commit_type "$COMMIT_MSG") + + if [ -n "$CATEGORY" ]; then # Only process if it's a conventional commit + # Get PR author's GitHub username + GITHUB_USERNAME=$(gh pr view "$PR_NUM" --json author --jq '.author.login') + + if [ -n "$GITHUB_USERNAME" ]; then + # Check if this is a first-time contributor + AUTHOR_EMAIL=$(git show -s --format='%ae' "$HASH") + if [ -z "${ALL_AUTHORS[$AUTHOR_EMAIL]}" ]; then + NEW_CONTRIBUTORS["$GITHUB_USERNAME"]=1 + ALL_AUTHORS["$AUTHOR_EMAIL"]=1 + fi + + CATEGORIES["$CATEGORY"]=1 + COMMIT_TITLE=${COMMIT_MSG%% (#*} # Remove the PR number suffix + COMMIT_TITLE=${COMMIT_TITLE#*: } # Remove the type prefix + COMMITS_BY_CATEGORY["$CATEGORY"]+="* $COMMIT_TITLE ([#$PR_NUM](${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/pull/$PR_NUM)) by @$GITHUB_USERNAME"$'\n' + else + COMMIT_TITLE=${COMMIT_MSG%% (#*} # Remove the PR number suffix + COMMIT_TITLE=${COMMIT_TITLE#*: } # Remove the type prefix + COMMITS_BY_CATEGORY["$CATEGORY"]+="* $COMMIT_TITLE ([#$PR_NUM](${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/pull/$PR_NUM))"$'\n' + fi + fi + + else + # echo "Processing as regular commit" >&2 + # Process conventional commits without PR numbers + CATEGORY=$(get_commit_type "$COMMIT_MSG") + + if [ -n "$CATEGORY" ]; then # Only process if it's a conventional commit + # Get commit author info + AUTHOR_EMAIL=$(git show -s --format='%ae' "$HASH") + + # Try to get GitHub username using gh api + if [ -n "$GITHUB_ACTIONS" ] || command -v gh >/dev/null 2>&1; then + GITHUB_USERNAME=$(gh api "/repos/${GITHUB_REPOSITORY}/commits/${HASH}" --jq '.author.login' 2>/dev/null) + fi + + if [ -n "$GITHUB_USERNAME" ]; then + # If we got GitHub username, use it + if [ -z "${ALL_AUTHORS[$AUTHOR_EMAIL]}" ]; then + NEW_CONTRIBUTORS["$GITHUB_USERNAME"]=1 + ALL_AUTHORS["$AUTHOR_EMAIL"]=1 + fi + + CATEGORIES["$CATEGORY"]=1 + COMMIT_TITLE=${COMMIT_MSG#*: } # Remove the type prefix + COMMITS_BY_CATEGORY["$CATEGORY"]+="* $COMMIT_TITLE (${HASH:0:7}) by @$GITHUB_USERNAME"$'\n' + else + # Fallback to git author name if no GitHub username found + AUTHOR_NAME=$(git show -s --format='%an' "$HASH") + + if [ -z "${ALL_AUTHORS[$AUTHOR_EMAIL]}" ]; then + NEW_CONTRIBUTORS["$AUTHOR_NAME"]=1 + ALL_AUTHORS["$AUTHOR_EMAIL"]=1 + fi + + CATEGORIES["$CATEGORY"]=1 + COMMIT_TITLE=${COMMIT_MSG#*: } # Remove the type prefix + COMMITS_BY_CATEGORY["$CATEGORY"]+="* $COMMIT_TITLE (${HASH:0:7}) by $AUTHOR_NAME"$'\n' + fi + fi + fi + +done < <(git log "${COMPARE_BASE}..${GITLOG_REF}" --pretty=format:"%H|%s|%b" --reverse --first-parent) + +# Write categorized commits to changelog with their emojis +for category in "✨ Features" "πŸ› Bug Fixes" "πŸ“š Documentation" "πŸ’Ž Styles" "♻️ Code Refactoring" "⚑ Performance Improvements" "πŸ§ͺ Tests" "πŸ› οΈ Build System" "βš™οΈ CI" "πŸ” Other Changes"; do + if [ -n "${COMMITS_BY_CATEGORY[$category]}" ]; then + echo "### $category" >> changelog.md + echo "" >> changelog.md + echo "${COMMITS_BY_CATEGORY[$category]}" >> changelog.md + echo "" >> changelog.md + fi +done + +# Add first-time contributors section if there are any +if [ ${#NEW_CONTRIBUTORS[@]} -gt 0 ]; then + echo "## ✨ First-time Contributors" >> changelog.md + echo "" >> changelog.md + echo "A huge thank you to our amazing new contributors! Your first contribution marks the start of an exciting journey! 🌟" >> changelog.md + echo "" >> changelog.md + # Use readarray to sort the keys + readarray -t sorted_contributors < <(printf '%s\n' "${!NEW_CONTRIBUTORS[@]}" | sort) + for github_username in "${sorted_contributors[@]}"; do + echo "* 🌟 [@$github_username](https://github.com/$github_username)" >> changelog.md + done + echo "" >> changelog.md +fi + +# Add compare link if not first release +if [ -n "$LATEST_TAG" ]; then + echo "## πŸ“ˆ Stats" >> changelog.md + echo "" >> changelog.md + echo "**Full Changelog**: [\`$LATEST_TAG..v${NEW_VERSION}\`](${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/compare/$LATEST_TAG...v${NEW_VERSION})" >> changelog.md +fi + +# Output the changelog content +CHANGELOG_CONTENT=$(cat changelog.md) +{ + echo "content<> "$GITHUB_OUTPUT" + +# Also print to stdout for local testing +echo "Generated changelog:" +echo "===================" +cat changelog.md +echo "===================" \ No newline at end of file diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000..8ab236d --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,27 @@ +name: CI/CD + +on: + push: + branches: + - master + pull_request: + +jobs: + test: + name: Test + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup and Build + uses: ./.github/actions/setup-and-build + + - name: Run type check + run: pnpm run typecheck + + # - name: Run ESLint + # run: pnpm run lint + + - name: Run tests + run: pnpm run test diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml new file mode 100644 index 0000000..d3bd2f1 --- /dev/null +++ b/.github/workflows/docker.yaml @@ -0,0 +1,81 @@ +--- +name: Docker Publish + +on: + workflow_dispatch: + push: + branches: + - main + tags: + - v* + - "*" + +permissions: + packages: write + contents: read + +env: + REGISTRY: ghcr.io + DOCKER_IMAGE: ghcr.io/${{ github.repository }} + BUILD_TARGET: bolt-ai-production # bolt-ai-development + +jobs: + docker-build-publish: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - id: string + uses: ASzc/change-string-case-action@v6 + with: + string: ${{ env.DOCKER_IMAGE }} + + - name: Docker meta + id: meta + uses: crazy-max/ghaction-docker-meta@v5 + with: + images: ${{ steps.string.outputs.lowercase }} + flavor: | + latest=true + prefix= + suffix= + tags: | + type=semver,pattern={{version}} + type=pep440,pattern={{version}} + type=ref,event=tag + type=raw,value={{sha}} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} # ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.GITHUB_TOKEN }} # ${{ secrets.DOCKER_PASSWORD }} + + - name: Build and push + uses: docker/build-push-action@v6 + with: + context: . + file: ./Dockerfile + target: ${{ env.BUILD_TARGET }} + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=registry,ref=${{ steps.string.outputs.lowercase }}:latest + cache-to: type=inline + + - name: Check manifest + run: | + docker buildx imagetools inspect ${{ steps.string.outputs.lowercase }}:${{ steps.meta.outputs.version }} + + - name: Dump context + if: always() + uses: crazy-max/ghaction-dump-context@v2 diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml new file mode 100644 index 0000000..0691be2 --- /dev/null +++ b/.github/workflows/docs.yaml @@ -0,0 +1,35 @@ +name: Docs CI/CD + +on: + push: + branches: + - main + paths: + - 'docs/**' # This will only trigger the workflow when files in docs directory change +permissions: + contents: write +jobs: + build_docs: + runs-on: ubuntu-latest + defaults: + run: + working-directory: ./docs + steps: + - uses: actions/checkout@v4 + - name: Configure Git Credentials + run: | + git config user.name github-actions[bot] + git config user.email 41898282+github-actions[bot]@users.noreply.github.com + - uses: actions/setup-python@v5 + with: + python-version: 3.x + - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV + - uses: actions/cache@v4 + with: + key: mkdocs-material-${{ env.cache_id }} + path: .cache + restore-keys: | + mkdocs-material- + + - run: pip install mkdocs-material + - run: mkdocs gh-deploy --force \ No newline at end of file diff --git a/.github/workflows/pr-release-validation.yaml b/.github/workflows/pr-release-validation.yaml new file mode 100644 index 0000000..99c5703 --- /dev/null +++ b/.github/workflows/pr-release-validation.yaml @@ -0,0 +1,31 @@ +name: PR Validation + +on: + pull_request: + types: [opened, synchronize, reopened, labeled, unlabeled] + branches: + - main + +jobs: + validate: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Validate PR Labels + run: | + if [[ "${{ contains(github.event.pull_request.labels.*.name, 'stable-release') }}" == "true" ]]; then + echo "βœ“ PR has stable-release label" + + # Check version bump labels + if [[ "${{ contains(github.event.pull_request.labels.*.name, 'major') }}" == "true" ]]; then + echo "βœ“ Major version bump requested" + elif [[ "${{ contains(github.event.pull_request.labels.*.name, 'minor') }}" == "true" ]]; then + echo "βœ“ Minor version bump requested" + else + echo "βœ“ Patch version bump will be applied" + fi + else + echo "This PR doesn't have the stable-release label. No release will be created." + fi \ No newline at end of file diff --git a/.github/workflows/semantic-pr.yaml b/.github/workflows/semantic-pr.yaml new file mode 100644 index 0000000..b6d64c8 --- /dev/null +++ b/.github/workflows/semantic-pr.yaml @@ -0,0 +1,32 @@ +name: Semantic Pull Request +on: + pull_request_target: + types: [opened, reopened, edited, synchronize] +permissions: + pull-requests: read +jobs: + main: + name: Validate PR Title + runs-on: ubuntu-latest + steps: + # https://github.com/amannn/action-semantic-pull-request/releases/tag/v5.5.3 + - uses: amannn/action-semantic-pull-request@0723387faaf9b38adef4775cd42cfd5155ed6017 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + subjectPattern: ^(?![A-Z]).+$ + subjectPatternError: | + The subject "{subject}" found in the pull request title "{title}" + didn't match the configured pattern. Please ensure that the subject + doesn't start with an uppercase character. + types: | + fix + feat + chore + build + ci + perf + docs + refactor + revert + test \ No newline at end of file diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000..c9eb890 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,25 @@ +name: Mark Stale Issues and Pull Requests + +on: + schedule: + - cron: '0 2 * * *' # Runs daily at 2:00 AM UTC + workflow_dispatch: # Allows manual triggering of the workflow + +jobs: + stale: + runs-on: ubuntu-latest + + steps: + - name: Mark stale issues and pull requests + uses: actions/stale@v8 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: "This issue has been marked as stale due to inactivity. If no further activity occurs, it will be closed in 7 days." + stale-pr-message: "This pull request has been marked as stale due to inactivity. If no further activity occurs, it will be closed in 7 days." + days-before-stale: 10 # Number of days before marking an issue or PR as stale + days-before-close: 4 # Number of days after being marked stale before closing + stale-issue-label: "stale" # Label to apply to stale issues + stale-pr-label: "stale" # Label to apply to stale pull requests + exempt-issue-labels: "pinned,important" # Issues with these labels won't be marked stale + exempt-pr-labels: "pinned,important" # PRs with these labels won't be marked stale + operations-per-run: 75 # Limits the number of actions per run to avoid API rate limits diff --git a/.github/workflows/update-stable.yml b/.github/workflows/update-stable.yml new file mode 100644 index 0000000..f7341c4 --- /dev/null +++ b/.github/workflows/update-stable.yml @@ -0,0 +1,126 @@ +name: Update Stable Branch + +on: + push: + branches: + - main + +permissions: + contents: write + +jobs: + prepare-release: + if: contains(github.event.head_commit.message, '#release') + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config --global user.name 'github-actions[bot]' + git config --global user.email 'github-actions[bot]@users.noreply.github.com' + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Install pnpm + uses: pnpm/action-setup@v2 + with: + version: latest + run_install: false + + - name: Get pnpm store directory + id: pnpm-cache + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path)" >> $GITHUB_OUTPUT + + - name: Setup pnpm cache + uses: actions/cache@v4 + with: + path: ${{ steps.pnpm-cache.outputs.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: Get Current Version + id: current_version + run: | + CURRENT_VERSION=$(node -p "require('./package.json').version") + echo "version=$CURRENT_VERSION" >> $GITHUB_OUTPUT + + - name: Install semver + run: pnpm add -g semver + + - name: Determine Version Bump + id: version_bump + run: | + COMMIT_MSG="${{ github.event.head_commit.message }}" + if [[ $COMMIT_MSG =~ "#release:major" ]]; then + echo "bump=major" >> $GITHUB_OUTPUT + elif [[ $COMMIT_MSG =~ "#release:minor" ]]; then + echo "bump=minor" >> $GITHUB_OUTPUT + else + echo "bump=patch" >> $GITHUB_OUTPUT + fi + + - name: Bump Version + id: bump_version + run: | + NEW_VERSION=$(semver -i ${{ steps.version_bump.outputs.bump }} ${{ steps.current_version.outputs.version }}) + echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT + + - name: Update Package.json + run: | + NEW_VERSION=${{ steps.bump_version.outputs.new_version }} + pnpm version $NEW_VERSION --no-git-tag-version --allow-same-version + + + - name: Prepare changelog script + run: chmod +x .github/scripts/generate-changelog.sh + + - name: Generate Changelog + id: changelog + env: + NEW_VERSION: ${{ steps.bump_version.outputs.new_version }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + run: .github/scripts/generate-changelog.sh + + - name: Get the latest commit hash and version tag + run: | + echo "COMMIT_HASH=$(git rev-parse HEAD)" >> $GITHUB_ENV + echo "NEW_VERSION=${{ steps.bump_version.outputs.new_version }}" >> $GITHUB_ENV + + - name: Commit and Tag Release + run: | + git pull + git add package.json pnpm-lock.yaml changelog.md + git commit -m "chore: release version ${{ steps.bump_version.outputs.new_version }}" + git tag "v${{ steps.bump_version.outputs.new_version }}" + git push + git push --tags + + - name: Update Stable Branch + run: | + if ! git checkout stable 2>/dev/null; then + echo "Creating new stable branch..." + git checkout -b stable + fi + git merge main --no-ff -m "chore: release version ${{ steps.bump_version.outputs.new_version }}" + git push --set-upstream origin stable --force + + - name: Create GitHub Release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + VERSION="v${{ steps.bump_version.outputs.new_version }}" + gh release create "$VERSION" \ + --title "Release $VERSION" \ + --notes "${{ steps.changelog.outputs.content }}" \ + --target stable \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..53eb036 --- /dev/null +++ b/.gitignore @@ -0,0 +1,42 @@ +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +.vscode/* +.vscode/launch.json +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? + +/.history +/.cache +/build +.env.local +.env +.dev.vars +*.vars +.wrangler +_worker.bundle + +Modelfile +modelfiles + +# docs ignore +site + +# commit file ignore +app/commit.json \ No newline at end of file diff --git a/.husky/pre-commit b/.husky/pre-commit new file mode 100644 index 0000000..5f5c2b9 --- /dev/null +++ b/.husky/pre-commit @@ -0,0 +1,32 @@ +#!/bin/sh + +echo "πŸ” Running pre-commit hook to check the code looks good... πŸ”" + +# Load NVM if available (useful for managing Node.js versions) +export NVM_DIR="$HOME/.nvm" +[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" + +# Ensure `pnpm` is available +echo "Checking if pnpm is available..." +if ! command -v pnpm >/dev/null 2>&1; then + echo "❌ pnpm not found! Please ensure pnpm is installed and available in PATH." + exit 1 +fi + +# Run typecheck +echo "Running typecheck..." +if ! pnpm typecheck; then + echo "❌ Type checking failed! Please review TypeScript types." + echo "Once you're done, don't forget to add your changes to the commit! πŸš€" + exit 1 +fi + +# Run lint +echo "Running lint..." +if ! pnpm lint; then + echo "❌ Linting failed! Run 'pnpm lint:fix' to fix the easy issues." + echo "Once you're done, don't forget to add your beautification to the commit! 🀩" + exit 1 +fi + +echo "πŸ‘ All checks passed! Committing changes..." diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000..3a08d6e --- /dev/null +++ b/.prettierignore @@ -0,0 +1,2 @@ +pnpm-lock.yaml +.astro diff --git a/.prettierrc b/.prettierrc new file mode 100644 index 0000000..8d3dfb0 --- /dev/null +++ b/.prettierrc @@ -0,0 +1,8 @@ +{ + "printWidth": 120, + "singleQuote": true, + "useTabs": false, + "tabWidth": 2, + "semi": true, + "bracketSpacing": true +} diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 0000000..427253d --- /dev/null +++ b/.tool-versions @@ -0,0 +1,2 @@ +nodejs 20.15.1 +pnpm 9.4.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..3a8d5be --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,219 @@ +# Contribution Guidelines + +Welcome! This guide provides all the details you need to contribute effectively to the project. Thank you for helping us make **bolt.diy** a better tool for developers worldwide. πŸ’‘ + +--- + +## πŸ“‹ Table of Contents + +1. [Code of Conduct](#code-of-conduct) +2. [How Can I Contribute?](#how-can-i-contribute) +3. [Pull Request Guidelines](#pull-request-guidelines) +4. [Coding Standards](#coding-standards) +5. [Development Setup](#development-setup) +6. [Testing](#testing) +7. [Deployment](#deployment) +8. [Docker Deployment](#docker-deployment) +9. [VS Code Dev Containers Integration](#vs-code-dev-containers-integration) + +--- + +## πŸ›‘οΈ Code of Conduct + +This project is governed by our **Code of Conduct**. By participating, you agree to uphold this code. Report unacceptable behavior to the project maintainers. + +--- + +## πŸ› οΈ How Can I Contribute? + +### 1️⃣ Reporting Bugs or Feature Requests +- Check the [issue tracker](#) to avoid duplicates. +- Use issue templates (if available). +- Provide detailed, relevant information and steps to reproduce bugs. + +### 2️⃣ Code Contributions +1. Fork the repository. +2. Create a feature or fix branch. +3. Write and test your code. +4. Submit a pull request (PR). + +### 3️⃣ Join as a Core Contributor +Interested in maintaining and growing the project? Fill out our [Contributor Application Form](https://forms.gle/TBSteXSDCtBDwr5m7). + +--- + +## βœ… Pull Request Guidelines + +### PR Checklist +- Branch from the **main** branch. +- Update documentation, if needed. +- Test all functionality manually. +- Focus on one feature/bug per PR. + +### Review Process +1. Manual testing by reviewers. +2. At least one maintainer review required. +3. Address review comments. +4. Maintain a clean commit history. + +--- + +## πŸ“ Coding Standards + +### General Guidelines +- Follow existing code style. +- Comment complex logic. +- Keep functions small and focused. +- Use meaningful variable names. + +--- + +## πŸ–₯️ Development Setup + +### 1️⃣ Initial Setup +- Clone the repository: + ```bash + git clone https://github.com/stackblitz-labs/bolt.diy.git + ``` +- Install dependencies: + ```bash + pnpm install + ``` +- Set up environment variables: + 1. Rename `.env.example` to `.env.local`. + 2. Add your API keys: + ```bash + GROQ_API_KEY=XXX + HuggingFace_API_KEY=XXX + OPENAI_API_KEY=XXX + ... + ``` + 3. Optionally set: + - Debug level: `VITE_LOG_LEVEL=debug` + - Context size: `DEFAULT_NUM_CTX=32768` + +**Note**: Never commit your `.env.local` file to version control. It’s already in `.gitignore`. + +### 2️⃣ Run Development Server +```bash +pnpm run dev +``` +**Tip**: Use **Google Chrome Canary** for local testing. + +--- + +## πŸ§ͺ Testing + +Run the test suite with: +```bash +pnpm test +``` + +--- + +## πŸš€ Deployment + +### Deploy to Cloudflare Pages +```bash +pnpm run deploy +``` +Ensure you have required permissions and that Wrangler is configured. + +--- + +## 🐳 Docker Deployment + +This section outlines the methods for deploying the application using Docker. The processes for **Development** and **Production** are provided separately for clarity. + +--- + +### πŸ§‘β€πŸ’» Development Environment + +#### Build Options + +**Option 1: Helper Scripts** +```bash +# Development build +npm run dockerbuild +``` + +**Option 2: Direct Docker Build Command** +```bash +docker build . --target bolt-ai-development +``` + +**Option 3: Docker Compose Profile** +```bash +docker-compose --profile development up +``` + +#### Running the Development Container +```bash +docker run -p 5173:5173 --env-file .env.local bolt-ai:development +``` + +--- + +### 🏭 Production Environment + +#### Build Options + +**Option 1: Helper Scripts** +```bash +# Production build +npm run dockerbuild:prod +``` + +**Option 2: Direct Docker Build Command** +```bash +docker build . --target bolt-ai-production +``` + +**Option 3: Docker Compose Profile** +```bash +docker-compose --profile production up +``` + +#### Running the Production Container +```bash +docker run -p 5173:5173 --env-file .env.local bolt-ai:production +``` + +--- + +### Coolify Deployment + +For an easy deployment process, use [Coolify](https://github.com/coollabsio/coolify): +1. Import your Git repository into Coolify. +2. Choose **Docker Compose** as the build pack. +3. Configure environment variables (e.g., API keys). +4. Set the start command: + ```bash + docker compose --profile production up + ``` + +--- + +## πŸ› οΈ VS Code Dev Containers Integration + +The `docker-compose.yaml` configuration is compatible with **VS Code Dev Containers**, making it easy to set up a development environment directly in Visual Studio Code. + +### Steps to Use Dev Containers + +1. Open the command palette in VS Code (`Ctrl+Shift+P` or `Cmd+Shift+P` on macOS). +2. Select **Dev Containers: Reopen in Container**. +3. Choose the **development** profile when prompted. +4. VS Code will rebuild the container and open it with the pre-configured environment. + +--- + +## πŸ”‘ Environment Variables + +Ensure `.env.local` is configured correctly with: +- API keys. +- Context-specific configurations. + +Example for the `DEFAULT_NUM_CTX` variable: +```bash +DEFAULT_NUM_CTX=24576 # Uses 32GB VRAM +``` \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..d287d40 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,92 @@ +ARG BASE=node:20.18.0 +FROM ${BASE} AS base + +WORKDIR /app + +# Install dependencies (this step is cached as long as the dependencies don't change) +COPY package.json pnpm-lock.yaml ./ + +RUN corepack enable pnpm && pnpm install + +# Copy the rest of your app's source code +COPY . . + +# Expose the port the app runs on +EXPOSE 5173 + +# Production image +FROM base AS bolt-ai-production + +# Define environment variables with default values or let them be overridden +ARG GROQ_API_KEY +ARG HuggingFace_API_KEY +ARG OPENAI_API_KEY +ARG ANTHROPIC_API_KEY +ARG OPEN_ROUTER_API_KEY +ARG GOOGLE_GENERATIVE_AI_API_KEY +ARG OLLAMA_API_BASE_URL +ARG XAI_API_KEY +ARG TOGETHER_API_KEY +ARG TOGETHER_API_BASE_URL +ARG AWS_BEDROCK_CONFIG +ARG VITE_LOG_LEVEL=debug +ARG DEFAULT_NUM_CTX + +ENV WRANGLER_SEND_METRICS=false \ + GROQ_API_KEY=${GROQ_API_KEY} \ + HuggingFace_KEY=${HuggingFace_API_KEY} \ + OPENAI_API_KEY=${OPENAI_API_KEY} \ + ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \ + OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \ + GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \ + OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \ + XAI_API_KEY=${XAI_API_KEY} \ + TOGETHER_API_KEY=${TOGETHER_API_KEY} \ + TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \ + AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \ + VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \ + DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}\ + RUNNING_IN_DOCKER=true + +# Pre-configure wrangler to disable metrics +RUN mkdir -p /root/.config/.wrangler && \ + echo '{"enabled":false}' > /root/.config/.wrangler/metrics.json + +RUN pnpm run build + +CMD [ "pnpm", "run", "dockerstart"] + +# Development image +FROM base AS bolt-ai-development + +# Define the same environment variables for development +ARG GROQ_API_KEY +ARG HuggingFace +ARG OPENAI_API_KEY +ARG ANTHROPIC_API_KEY +ARG OPEN_ROUTER_API_KEY +ARG GOOGLE_GENERATIVE_AI_API_KEY +ARG OLLAMA_API_BASE_URL +ARG XAI_API_KEY +ARG TOGETHER_API_KEY +ARG TOGETHER_API_BASE_URL +ARG VITE_LOG_LEVEL=debug +ARG DEFAULT_NUM_CTX + +ENV GROQ_API_KEY=${GROQ_API_KEY} \ + HuggingFace_API_KEY=${HuggingFace_API_KEY} \ + OPENAI_API_KEY=${OPENAI_API_KEY} \ + ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \ + OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \ + GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \ + OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \ + XAI_API_KEY=${XAI_API_KEY} \ + TOGETHER_API_KEY=${TOGETHER_API_KEY} \ + TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \ + AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \ + VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \ + DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}\ + RUNNING_IN_DOCKER=true + +RUN mkdir -p ${WORKDIR}/run +CMD pnpm run dev --host diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 0000000..a09fae8 --- /dev/null +++ b/FAQ.md @@ -0,0 +1,91 @@ +# Frequently Asked Questions (FAQ) + +
+What are the best models for bolt.diy? + +For the best experience with bolt.diy, we recommend using the following models: + +- **Claude 3.5 Sonnet (old)**: Best overall coder, providing excellent results across all use cases +- **Gemini 2.0 Flash**: Exceptional speed while maintaining good performance +- **GPT-4o**: Strong alternative to Claude 3.5 Sonnet with comparable capabilities +- **DeepSeekCoder V2 236b**: Best open source model (available through OpenRouter, DeepSeek API, or self-hosted) +- **Qwen 2.5 Coder 32b**: Best model for self-hosting with reasonable hardware requirements + +**Note**: Models with less than 7b parameters typically lack the capability to properly interact with bolt! +
+ +
+How do I get the best results with bolt.diy? + +- **Be specific about your stack**: + Mention the frameworks or libraries you want to use (e.g., Astro, Tailwind, ShadCN) in your initial prompt. This ensures that bolt.diy scaffolds the project according to your preferences. + +- **Use the enhance prompt icon**: + Before sending your prompt, click the *enhance* icon to let the AI refine your prompt. You can edit the suggested improvements before submitting. + +- **Scaffold the basics first, then add features**: + Ensure the foundational structure of your application is in place before introducing advanced functionality. This helps bolt.diy establish a solid base to build on. + +- **Batch simple instructions**: + Combine simple tasks into a single prompt to save time and reduce API credit consumption. For example: + *"Change the color scheme, add mobile responsiveness, and restart the dev server."* +
+ +
+How do I contribute to bolt.diy? + +Check out our [Contribution Guide](CONTRIBUTING.md) for more details on how to get involved! +
+ +
+What are the future plans for bolt.diy? + +Visit our [Roadmap](https://roadmap.sh/r/ottodev-roadmap-2ovzo) for the latest updates. +New features and improvements are on the way! +
+ +
+Why are there so many open issues/pull requests? + +bolt.diy began as a small showcase project on @ColeMedin's YouTube channel to explore editing open-source projects with local LLMs. However, it quickly grew into a massive community effort! + +We're forming a team of maintainers to manage demand and streamline issue resolution. The maintainers are rockstars, and we're also exploring partnerships to help the project thrive. +
+ +
+How do local LLMs compare to larger models like Claude 3.5 Sonnet for bolt.diy? + +While local LLMs are improving rapidly, larger models like GPT-4o, Claude 3.5 Sonnet, and DeepSeek Coder V2 236b still offer the best results for complex applications. Our ongoing focus is to improve prompts, agents, and the platform to better support smaller local LLMs. +
+ +
+Common Errors and Troubleshooting + +### **"There was an error processing this request"** +This generic error message means something went wrong. Check both: +- The terminal (if you started the app with Docker or `pnpm`). +- The developer console in your browser (press `F12` or right-click > *Inspect*, then go to the *Console* tab). + +### **"x-api-key header missing"** +This error is sometimes resolved by restarting the Docker container. +If that doesn't work, try switching from Docker to `pnpm` or vice versa. We're actively investigating this issue. + +### **Blank preview when running the app** +A blank preview often occurs due to hallucinated bad code or incorrect commands. +To troubleshoot: +- Check the developer console for errors. +- Remember, previews are core functionality, so the app isn't broken! We're working on making these errors more transparent. + +### **"Everything works, but the results are bad"** +Local LLMs like Qwen-2.5-Coder are powerful for small applications but still experimental for larger projects. For better results, consider using larger models like GPT-4o, Claude 3.5 Sonnet, or DeepSeek Coder V2 236b. + +### **"Received structured exception #0xc0000005: access violation"** +If you are getting this, you are probably on Windows. The fix is generally to update the [Visual C++ Redistributable](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170) + +### **"Miniflare or Wrangler errors in Windows"** +You will need to make sure you have the latest version of Visual Studio C++ installed (14.40.33816), more information here https://github.com/stackblitz-labs/bolt.diy/issues/19. +
+ +--- + +Got more questions? Feel free to reach out or open an issue in our GitHub repo! diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..8fb312e --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 StackBlitz, Inc. and bolt.diy contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/PROJECT.md b/PROJECT.md new file mode 100644 index 0000000..33e697e --- /dev/null +++ b/PROJECT.md @@ -0,0 +1,57 @@ +# Project management of bolt.diy + +First off: this sounds funny, we know. "Project management" comes from a world of enterprise stuff and this project is +far from being enterprisy- it's still anarchy all over the place πŸ˜‰ + +But we need to organize ourselves somehow, right? + +> tl;dr: We've got a project board with epics and features. We use PRs as change log and as materialized features. Find it [here](https://github.com/orgs/stackblitz-labs/projects/4). + +Here's how we structure long-term vision, mid-term capabilities of the software and short term improvements. + +## Strategic epics (long-term) + +Strategic epics define areas in which the product evolves. Usually, these epics don’t overlap. They shall allow the core +team to define what they believe is most important and should be worked on with the highest priority. + +You can find the [epics as issues](https://github.com/stackblitz-labs/bolt.diy/labels/epic) which are probably never +going to be closed. + +What's the benefit / purpose of epics? + +1. Prioritization + +E. g. we could say β€œmanaging files is currently more important that quality”. Then, we could thing about which features +would bring β€œmanaging files” forward. It may be different features, such as β€œupload local files”, β€œimport from a repo” +or also undo/redo/commit. + +In a more-or-less regular meeting dedicated for that, the core team discusses which epics matter most, sketch features +and then check who can work on them. After the meeting, they update the roadmap (at least for the next development turn) +and this way communicate where the focus currently is. + +2. Grouping of features + +By linking features with epics, we can keep them together and document *why* we invest work into a particular thing. + +## Features (mid-term) + +We all know probably a dozen of methodologies following which features are being described (User story, business +function, you name it). + +However, we intentionally describe features in a more vague manner. Why? Everybody loves crisp, well-defined +acceptance-criteria, no? Well, every product owner loves it. because he knows what he’ll get once it’s done. + +But: **here is no owner of this product**. Therefore, we grant *maximum flexibility to the developer contributing a feature* – so that he can bring in his ideas and have most fun implementing it. + +The feature therefore tries to describe *what* should be improved but not in detail *how*. + +## PRs as materialized features (short-term) + +Once a developer starts working on a feature, a draft-PR *can* be opened asap to share, describe and discuss, how the feature shall be implemented. But: this is not a must. It just helps to get early feedback and get other developers involved. Sometimes, the developer just wants to get started and then open a PR later. + +In a loosely organized project, it may as well happen that multiple PRs are opened for the same feature. This is no real issue: Usually, peoply being passionate about a solution are willing to join forces and get it done together. And if a second developer was just faster getting the same feature realized: Be happy that it's been done, close the PR and look out for the next feature to implement πŸ€“ + +## PRs as change log + +Once a PR is merged, a squashed commit contains the whole PR description which allows for a good change log. +All authors of commits in the PR are mentioned in the squashed commit message and become contributors πŸ™Œ diff --git a/README.md b/README.md new file mode 100644 index 0000000..d216bf0 --- /dev/null +++ b/README.md @@ -0,0 +1,339 @@ +# bolt.diy (Previously oTToDev) +[![bolt.diy: AI-Powered Full-Stack Web Development in the Browser](./public/social_preview_index.jpg)](https://bolt.diy) + +Welcome to bolt.diy, the official open source version of Bolt.new (previously known as oTToDev and bolt.new ANY LLM), which allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models. + +Check the [bolt.diy Docs](https://stackblitz-labs.github.io/bolt.diy/) for more information. + +Also [this pinned post in our community](https://thinktank.ottomator.ai/t/videos-tutorial-helpful-content/3243) has a bunch of incredible resources for running and deploying bolt.diy yourself! + +We have also launched an experimental agent called the "bolt.diy Expert" that can answer common questions about bolt.diy. Find it here on the [oTTomator Live Agent Studio](https://studio.ottomator.ai/). + +bolt.diy was originally started by [Cole Medin](https://www.youtube.com/@ColeMedin) but has quickly grown into a massive community effort to build the BEST open source AI coding assistant! + +## Table of Contents + +- [Join the Community](#join-the-community) +- [Requested Additions](#requested-additions) +- [Features](#features) +- [Setup](#setup) +- [Run the Application](#run-the-application) +- [Available Scripts](#available-scripts) +- [Contributing](#contributing) +- [Roadmap](#roadmap) +- [FAQ](#faq) + +## Join the community + +[Join the bolt.diy community here, in the oTTomator Think Tank!](https://thinktank.ottomator.ai) + +## Project management + +Bolt.diy is a community effort! Still, the core team of contributors aims at organizing the project in way that allows +you to understand where the current areas of focus are. + +If you want to know what we are working on, what we are planning to work on, or if you want to contribute to the +project, please check the [project management guide](./PROJECT.md) to get started easily. + +## Requested Additions + +- βœ… OpenRouter Integration (@coleam00) +- βœ… Gemini Integration (@jonathands) +- βœ… Autogenerate Ollama models from what is downloaded (@yunatamos) +- βœ… Filter models by provider (@jasonm23) +- βœ… Download project as ZIP (@fabwaseem) +- βœ… Improvements to the main bolt.new prompt in `app\lib\.server\llm\prompts.ts` (@kofi-bhr) +- βœ… DeepSeek API Integration (@zenith110) +- βœ… Mistral API Integration (@ArulGandhi) +- βœ… "Open AI Like" API Integration (@ZerxZ) +- βœ… Ability to sync files (one way sync) to local folder (@muzafferkadir) +- βœ… Containerize the application with Docker for easy installation (@aaronbolton) +- βœ… Publish projects directly to GitHub (@goncaloalves) +- βœ… Ability to enter API keys in the UI (@ali00209) +- βœ… xAI Grok Beta Integration (@milutinke) +- βœ… LM Studio Integration (@karrot0) +- βœ… HuggingFace Integration (@ahsan3219) +- βœ… Bolt terminal to see the output of LLM run commands (@thecodacus) +- βœ… Streaming of code output (@thecodacus) +- βœ… Ability to revert code to earlier version (@wonderwhy-er) +- βœ… Chat history backup and restore functionality (@sidbetatester) +- βœ… Cohere Integration (@hasanraiyan) +- βœ… Dynamic model max token length (@hasanraiyan) +- βœ… Better prompt enhancing (@SujalXplores) +- βœ… Prompt caching (@SujalXplores) +- βœ… Load local projects into the app (@wonderwhy-er) +- βœ… Together Integration (@mouimet-infinisoft) +- βœ… Mobile friendly (@qwikode) +- βœ… Better prompt enhancing (@SujalXplores) +- βœ… Attach images to prompts (@atrokhym) +- βœ… Added Git Clone button (@thecodacus) +- βœ… Git Import from url (@thecodacus) +- βœ… PromptLibrary to have different variations of prompts for different use cases (@thecodacus) +- βœ… Detect package.json and commands to auto install & run preview for folder and git import (@wonderwhy-er) +- βœ… Selection tool to target changes visually (@emcconnell) +- βœ… Detect terminal Errors and ask bolt to fix it (@thecodacus) +- βœ… Detect preview Errors and ask bolt to fix it (@wonderwhy-er) +- βœ… Add Starter Template Options (@thecodacus) +- βœ… Perplexity Integration (@meetpateltech) +- βœ… AWS Bedrock Integration (@kunjabijukchhe) +- ⬜ **HIGH PRIORITY** - Prevent bolt from rewriting files as often (file locking and diffs) +- ⬜ **HIGH PRIORITY** - Better prompting for smaller LLMs (code window sometimes doesn't start) +- ⬜ **HIGH PRIORITY** - Run agents in the backend as opposed to a single model call +- ⬜ Deploy directly to Vercel/Netlify/other similar platforms +- ⬜ Have LLM plan the project in a MD file for better results/transparency +- ⬜ VSCode Integration with git-like confirmations +- ⬜ Upload documents for knowledge - UI design templates, a code base to reference coding style, etc. +- ⬜ Voice prompting +- ⬜ Azure Open AI API Integration +- ⬜ Vertex AI Integration +- ⬜ Granite Integration +- ⬜ Popout Window for Web Container + +## Features + +- **AI-powered full-stack web development** directly in your browser. +- **Support for multiple LLMs** with an extensible architecture to integrate additional models. +- **Attach images to prompts** for better contextual understanding. +- **Integrated terminal** to view output of LLM-run commands. +- **Revert code to earlier versions** for easier debugging and quicker changes. +- **Download projects as ZIP** for easy portability. +- **Integration-ready Docker support** for a hassle-free setup. + +## Setup + +If you're new to installing software from GitHub, don't worry! If you encounter any issues, feel free to submit an "issue" using the provided links or improve this documentation by forking the repository, editing the instructions, and submitting a pull request. The following instruction will help you get the stable branch up and running on your local machine in no time. + +Let's get you up and running with the stable version of Bolt.DIY! + +## Quick Download + +[![Download Latest Release](https://img.shields.io/github/v/release/stackblitz-labs/bolt.diy?label=Download%20Bolt&sort=semver)](https://github.com/stackblitz-labs/bolt.diy/releases/latest) ← Click here to go the the latest release version! + +- Next **click source.zip** + + + + +## Prerequisites + +Before you begin, you'll need to install two important pieces of software: + +### Install Node.js + +Node.js is required to run the application. + +1. Visit the [Node.js Download Page](https://nodejs.org/en/download/) +2. Download the "LTS" (Long Term Support) version for your operating system +3. Run the installer, accepting the default settings +4. Verify Node.js is properly installed: + - **For Windows Users**: + 1. Press `Windows + R` + 2. Type "sysdm.cpl" and press Enter + 3. Go to "Advanced" tab β†’ "Environment Variables" + 4. Check if `Node.js` appears in the "Path" variable + - **For Mac/Linux Users**: + 1. Open Terminal + 2. Type this command: + ```bash + echo $PATH + ``` + 3. Look for `/usr/local/bin` in the output + +## Running the Application + +You have two options for running Bolt.DIY: directly on your machine or using Docker. + +### Option 1: Direct Installation (Recommended for Beginners) + +1. **Install Package Manager (pnpm)**: + ```bash + npm install -g pnpm + ``` + +2. **Install Project Dependencies**: + ```bash + pnpm install + ``` + +3. **Start the Application**: + ```bash + pnpm run dev + ``` + + **Important Note**: If you're using Google Chrome, you'll need Chrome Canary for local development. [Download it here](https://www.google.com/chrome/canary/) + +### Option 2: Using Docker + +This option requires some familiarity with Docker but provides a more isolated environment. + +#### Additional Prerequisite +- Install Docker: [Download Docker](https://www.docker.com/) + +#### Steps: + +1. **Build the Docker Image**: + ```bash + # Using npm script: + npm run dockerbuild + + # OR using direct Docker command: + docker build . --target bolt-ai-development + ``` + +2. **Run the Container**: + ```bash + docker-compose --profile development up + ``` + + + + +## Configuring API Keys and Providers + +### Adding Your API Keys + +Setting up your API keys in Bolt.DIY is straightforward: + +1. Open the home page (main interface) +2. Select your desired provider from the dropdown menu +3. Click the pencil (edit) icon +4. Enter your API key in the secure input field + +![API Key Configuration Interface](./docs/images/api-key-ui-section.png) + +### Configuring Custom Base URLs + +For providers that support custom base URLs (such as Ollama or LM Studio), follow these steps: + +1. Click the settings icon in the sidebar to open the settings menu + ![Settings Button Location](./docs/images/bolt-settings-button.png) + +2. Navigate to the "Providers" tab +3. Search for your provider using the search bar +4. Enter your custom base URL in the designated field + ![Provider Base URL Configuration](./docs/images/provider-base-url.png) + +> **Note**: Custom base URLs are particularly useful when running local instances of AI models or using custom API endpoints. + +### Supported Providers +- Ollama +- LM Studio +- OpenAILike + +## Setup Using Git (For Developers only) + +This method is recommended for developers who want to: +- Contribute to the project +- Stay updated with the latest changes +- Switch between different versions +- Create custom modifications + +#### Prerequisites +1. Install Git: [Download Git](https://git-scm.com/downloads) + +#### Initial Setup + +1. **Clone the Repository**: + ```bash + # Using HTTPS + git clone https://github.com/stackblitz-labs/bolt.diy.git + ``` + +2. **Navigate to Project Directory**: + ```bash + cd bolt.diy + ``` + +3. **Switch to the Main Branch**: + ```bash + git checkout main + ``` +4. **Install Dependencies**: + ```bash + pnpm install + ``` + +5. **Start the Development Server**: + ```bash + pnpm run dev + ``` + +#### Staying Updated + +To get the latest changes from the repository: + +1. **Save Your Local Changes** (if any): + ```bash + git stash + ``` + +2. **Pull Latest Updates**: + ```bash + git pull origin main + ``` + +3. **Update Dependencies**: + ```bash + pnpm install + ``` + +4. **Restore Your Local Changes** (if any): + ```bash + git stash pop + ``` + +#### Troubleshooting Git Setup + +If you encounter issues: + +1. **Clean Installation**: + ```bash + # Remove node modules and lock files + rm -rf node_modules pnpm-lock.yaml + + # Clear pnpm cache + pnpm store prune + + # Reinstall dependencies + pnpm install + ``` + +2. **Reset Local Changes**: + ```bash + # Discard all local changes + git reset --hard origin/main + ``` + +Remember to always commit your local changes or stash them before pulling updates to avoid conflicts. + +--- + +## Available Scripts + +- **`pnpm run dev`**: Starts the development server. +- **`pnpm run build`**: Builds the project. +- **`pnpm run start`**: Runs the built application locally using Wrangler Pages. +- **`pnpm run preview`**: Builds and runs the production build locally. +- **`pnpm test`**: Runs the test suite using Vitest. +- **`pnpm run typecheck`**: Runs TypeScript type checking. +- **`pnpm run typegen`**: Generates TypeScript types using Wrangler. +- **`pnpm run deploy`**: Deploys the project to Cloudflare Pages. +- **`pnpm run lint:fix`**: Automatically fixes linting issues. + +--- + +## Contributing + +We welcome contributions! Check out our [Contributing Guide](CONTRIBUTING.md) to get started. + +--- + +## Roadmap + +Explore upcoming features and priorities on our [Roadmap](https://roadmap.sh/r/ottodev-roadmap-2ovzo). + +--- + +## FAQ + +For answers to common questions, issues, and to see a list of recommended models, visit our [FAQ Page](FAQ.md). diff --git a/app/components/chat/APIKeyManager.tsx b/app/components/chat/APIKeyManager.tsx new file mode 100644 index 0000000..9226336 --- /dev/null +++ b/app/components/chat/APIKeyManager.tsx @@ -0,0 +1,169 @@ +import React, { useState, useEffect, useCallback } from 'react'; +import { IconButton } from '~/components/ui/IconButton'; +import type { ProviderInfo } from '~/types/model'; +import Cookies from 'js-cookie'; + +interface APIKeyManagerProps { + provider: ProviderInfo; + apiKey: string; + setApiKey: (key: string) => void; + getApiKeyLink?: string; + labelForGetApiKey?: string; +} + +// cache which stores whether the provider's API key is set via environment variable +const providerEnvKeyStatusCache: Record = {}; + +const apiKeyMemoizeCache: { [k: string]: Record } = {}; + +export function getApiKeysFromCookies() { + const storedApiKeys = Cookies.get('apiKeys'); + let parsedKeys: Record = {}; + + if (storedApiKeys) { + parsedKeys = apiKeyMemoizeCache[storedApiKeys]; + + if (!parsedKeys) { + parsedKeys = apiKeyMemoizeCache[storedApiKeys] = JSON.parse(storedApiKeys); + } + } + + return parsedKeys; +} + +// eslint-disable-next-line @typescript-eslint/naming-convention +export const APIKeyManager: React.FC = ({ provider, apiKey, setApiKey }) => { + const [isEditing, setIsEditing] = useState(false); + const [tempKey, setTempKey] = useState(apiKey); + const [isEnvKeySet, setIsEnvKeySet] = useState(false); + + // Reset states and load saved key when provider changes + useEffect(() => { + // Load saved API key from cookies for this provider + const savedKeys = getApiKeysFromCookies(); + const savedKey = savedKeys[provider.name] || ''; + + setTempKey(savedKey); + setApiKey(savedKey); + setIsEditing(false); + }, [provider.name]); + + const checkEnvApiKey = useCallback(async () => { + // Check cache first + if (providerEnvKeyStatusCache[provider.name] !== undefined) { + setIsEnvKeySet(providerEnvKeyStatusCache[provider.name]); + return; + } + + try { + const response = await fetch(`/api/check-env-key?provider=${encodeURIComponent(provider.name)}`); + const data = await response.json(); + const isSet = (data as { isSet: boolean }).isSet; + + // Cache the result + providerEnvKeyStatusCache[provider.name] = isSet; + setIsEnvKeySet(isSet); + } catch (error) { + console.error('Failed to check environment API key:', error); + setIsEnvKeySet(false); + } + }, [provider.name]); + + useEffect(() => { + checkEnvApiKey(); + }, [checkEnvApiKey]); + + const handleSave = () => { + // Save to parent state + setApiKey(tempKey); + + // Save to cookies + const currentKeys = getApiKeysFromCookies(); + const newKeys = { ...currentKeys, [provider.name]: tempKey }; + Cookies.set('apiKeys', JSON.stringify(newKeys)); + + setIsEditing(false); + }; + + return ( +
+
+
+ {provider?.name} API Key: + {!isEditing && ( +
+ {apiKey ? ( + <> +
+ Set via UI + + ) : isEnvKeySet ? ( + <> +
+ Set via environment variable + + ) : ( + <> +
+ Not Set (Please set via UI or ENV_VAR) + + )} +
+ )} +
+
+ +
+ {isEditing ? ( +
+ setTempKey(e.target.value)} + className="w-[300px] px-3 py-1.5 text-sm rounded border border-bolt-elements-borderColor + bg-bolt-elements-prompt-background text-bolt-elements-textPrimary + focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus" + /> + +
+ + setIsEditing(false)} + title="Cancel" + className="bg-red-500/10 hover:bg-red-500/20 text-red-500" + > +
+ +
+ ) : ( + <> + { + setIsEditing(true)} + title="Edit API Key" + className="bg-blue-500/10 hover:bg-blue-500/20 text-blue-500" + > +
+ + } + {provider?.getApiKeyLink && !apiKey && ( + window.open(provider?.getApiKeyLink)} + title="Get API Key" + className="bg-purple-500/10 hover:bg-purple-500/20 text-purple-500 flex items-center gap-2" + > + {provider?.labelForGetApiKey || 'Get API Key'} +
+ + )} + + )} +
+
+ ); +}; diff --git a/app/components/chat/Artifact.tsx b/app/components/chat/Artifact.tsx new file mode 100644 index 0000000..5f0c991 --- /dev/null +++ b/app/components/chat/Artifact.tsx @@ -0,0 +1,263 @@ +import { useStore } from '@nanostores/react'; +import { AnimatePresence, motion } from 'framer-motion'; +import { computed } from 'nanostores'; +import { memo, useEffect, useRef, useState } from 'react'; +import { createHighlighter, type BundledLanguage, type BundledTheme, type HighlighterGeneric } from 'shiki'; +import type { ActionState } from '~/lib/runtime/action-runner'; +import { workbenchStore } from '~/lib/stores/workbench'; +import { classNames } from '~/utils/classNames'; +import { cubicEasingFn } from '~/utils/easings'; +import { WORK_DIR } from '~/utils/constants'; + +const highlighterOptions = { + langs: ['shell'], + themes: ['light-plus', 'dark-plus'], +}; + +const shellHighlighter: HighlighterGeneric = + import.meta.hot?.data.shellHighlighter ?? (await createHighlighter(highlighterOptions)); + +if (import.meta.hot) { + import.meta.hot.data.shellHighlighter = shellHighlighter; +} + +interface ArtifactProps { + messageId: string; +} + +export const Artifact = memo(({ messageId }: ArtifactProps) => { + const userToggledActions = useRef(false); + const [showActions, setShowActions] = useState(false); + const [allActionFinished, setAllActionFinished] = useState(false); + + const artifacts = useStore(workbenchStore.artifacts); + const artifact = artifacts[messageId]; + + const actions = useStore( + computed(artifact.runner.actions, (actions) => { + return Object.values(actions); + }), + ); + + const toggleActions = () => { + userToggledActions.current = true; + setShowActions(!showActions); + }; + + useEffect(() => { + if (actions.length && !showActions && !userToggledActions.current) { + setShowActions(true); + } + + if (actions.length !== 0 && artifact.type === 'bundled') { + const finished = !actions.find((action) => action.status !== 'complete'); + + if (allActionFinished !== finished) { + setAllActionFinished(finished); + } + } + }, [actions]); + + return ( +
+
+ +
+ + {actions.length && artifact.type !== 'bundled' && ( + +
+
+
+
+ )} +
+
+ + {artifact.type !== 'bundled' && showActions && actions.length > 0 && ( + +
+ +
+ +
+ + )} + +
+ ); +}); + +interface ShellCodeBlockProps { + classsName?: string; + code: string; +} + +function ShellCodeBlock({ classsName, code }: ShellCodeBlockProps) { + return ( +
+ ); +} + +interface ActionListProps { + actions: ActionState[]; +} + +const actionVariants = { + hidden: { opacity: 0, y: 20 }, + visible: { opacity: 1, y: 0 }, +}; + +function openArtifactInWorkbench(filePath: any) { + if (workbenchStore.currentView.get() !== 'code') { + workbenchStore.currentView.set('code'); + } + + workbenchStore.setSelectedFile(`${WORK_DIR}/${filePath}`); +} + +const ActionList = memo(({ actions }: ActionListProps) => { + return ( + +
    + {actions.map((action, index) => { + const { status, type, content } = action; + const isLast = index === actions.length - 1; + + return ( + +
    +
    + {status === 'running' ? ( + <> + {type !== 'start' ? ( +
    + ) : ( +
    + )} + + ) : status === 'pending' ? ( +
    + ) : status === 'complete' ? ( +
    + ) : status === 'failed' || status === 'aborted' ? ( +
    + ) : null} +
    + {type === 'file' ? ( +
    + Create{' '} + openArtifactInWorkbench(action.filePath)} + > + {action.filePath} + +
    + ) : type === 'shell' ? ( +
    + Run command +
    + ) : type === 'start' ? ( + { + e.preventDefault(); + workbenchStore.currentView.set('preview'); + }} + className="flex items-center w-full min-h-[28px]" + > + Start Application + + ) : null} +
    + {(type === 'shell' || type === 'start') && ( + + )} +
    + ); + })} +
+
+ ); +}); + +function getIconColor(status: ActionState['status']) { + switch (status) { + case 'pending': { + return 'text-bolt-elements-textTertiary'; + } + case 'running': { + return 'text-bolt-elements-loader-progress'; + } + case 'complete': { + return 'text-bolt-elements-icon-success'; + } + case 'aborted': { + return 'text-bolt-elements-textSecondary'; + } + case 'failed': { + return 'text-bolt-elements-icon-error'; + } + default: { + return undefined; + } + } +} diff --git a/app/components/chat/AssistantMessage.tsx b/app/components/chat/AssistantMessage.tsx new file mode 100644 index 0000000..be304c7 --- /dev/null +++ b/app/components/chat/AssistantMessage.tsx @@ -0,0 +1,31 @@ +import { memo } from 'react'; +import { Markdown } from './Markdown'; +import type { JSONValue } from 'ai'; + +interface AssistantMessageProps { + content: string; + annotations?: JSONValue[]; +} + +export const AssistantMessage = memo(({ content, annotations }: AssistantMessageProps) => { + const filteredAnnotations = (annotations?.filter( + (annotation: JSONValue) => annotation && typeof annotation === 'object' && Object.keys(annotation).includes('type'), + ) || []) as { type: string; value: any }[]; + + const usage: { + completionTokens: number; + promptTokens: number; + totalTokens: number; + } = filteredAnnotations.find((annotation) => annotation.type === 'usage')?.value; + + return ( +
+ {usage && ( +
+ Tokens: {usage.totalTokens} (prompt: {usage.promptTokens}, completion: {usage.completionTokens}) +
+ )} + {content} +
+ ); +}); diff --git a/app/components/chat/BaseChat.module.scss b/app/components/chat/BaseChat.module.scss new file mode 100644 index 0000000..4908e34 --- /dev/null +++ b/app/components/chat/BaseChat.module.scss @@ -0,0 +1,47 @@ +.BaseChat { + &[data-chat-visible='false'] { + --workbench-inner-width: 100%; + --workbench-left: 0; + + .Chat { + --at-apply: bolt-ease-cubic-bezier; + transition-property: transform, opacity; + transition-duration: 0.3s; + will-change: transform, opacity; + transform: translateX(-50%); + opacity: 0; + } + } +} + +.Chat { + opacity: 1; +} + +.PromptEffectContainer { + --prompt-container-offset: 50px; + --prompt-line-stroke-width: 1px; + position: absolute; + pointer-events: none; + inset: calc(var(--prompt-container-offset) / -2); + width: calc(100% + var(--prompt-container-offset)); + height: calc(100% + var(--prompt-container-offset)); +} + +.PromptEffectLine { + width: calc(100% - var(--prompt-container-offset) + var(--prompt-line-stroke-width)); + height: calc(100% - var(--prompt-container-offset) + var(--prompt-line-stroke-width)); + x: calc(var(--prompt-container-offset) / 2 - var(--prompt-line-stroke-width) / 2); + y: calc(var(--prompt-container-offset) / 2 - var(--prompt-line-stroke-width) / 2); + rx: calc(8px - var(--prompt-line-stroke-width)); + fill: transparent; + stroke-width: var(--prompt-line-stroke-width); + stroke: url(#line-gradient); + stroke-dasharray: 35px 65px; + stroke-dashoffset: 10; +} + +.PromptShine { + fill: url(#shine-gradient); + mix-blend-mode: overlay; +} diff --git a/app/components/chat/BaseChat.tsx b/app/components/chat/BaseChat.tsx new file mode 100644 index 0000000..bf995a3 --- /dev/null +++ b/app/components/chat/BaseChat.tsx @@ -0,0 +1,625 @@ +/* + * @ts-nocheck + * Preventing TS checks with files presented in the video for a better presentation. + */ +import type { Message } from 'ai'; +import React, { type RefCallback, useCallback, useEffect, useState } from 'react'; +import { ClientOnly } from 'remix-utils/client-only'; +import { Menu } from '~/components/sidebar/Menu.client'; +import { IconButton } from '~/components/ui/IconButton'; +import { Workbench } from '~/components/workbench/Workbench.client'; +import { classNames } from '~/utils/classNames'; +import { MODEL_LIST, PROVIDER_LIST, initializeModelList } from '~/utils/constants'; +import { Messages } from './Messages.client'; +import { SendButton } from './SendButton.client'; +import { APIKeyManager, getApiKeysFromCookies } from './APIKeyManager'; +import Cookies from 'js-cookie'; +import * as Tooltip from '@radix-ui/react-tooltip'; + +import styles from './BaseChat.module.scss'; +import { ExportChatButton } from '~/components/chat/chatExportAndImport/ExportChatButton'; +import { ImportButtons } from '~/components/chat/chatExportAndImport/ImportButtons'; +import { ExamplePrompts } from '~/components/chat/ExamplePrompts'; +import GitCloneButton from './GitCloneButton'; + +import FilePreview from './FilePreview'; +import { ModelSelector } from '~/components/chat/ModelSelector'; +import { SpeechRecognitionButton } from '~/components/chat/SpeechRecognition'; +import type { IProviderSetting, ProviderInfo } from '~/types/model'; +import { ScreenshotStateManager } from './ScreenshotStateManager'; +import { toast } from 'react-toastify'; +import StarterTemplates from './StarterTemplates'; +import type { ActionAlert } from '~/types/actions'; +import ChatAlert from './ChatAlert'; +import { LLMManager } from '~/lib/modules/llm/manager'; + +const TEXTAREA_MIN_HEIGHT = 76; + +interface BaseChatProps { + textareaRef?: React.RefObject | undefined; + messageRef?: RefCallback | undefined; + scrollRef?: RefCallback | undefined; + showChat?: boolean; + chatStarted?: boolean; + isStreaming?: boolean; + messages?: Message[]; + description?: string; + enhancingPrompt?: boolean; + promptEnhanced?: boolean; + input?: string; + model?: string; + setModel?: (model: string) => void; + provider?: ProviderInfo; + setProvider?: (provider: ProviderInfo) => void; + providerList?: ProviderInfo[]; + handleStop?: () => void; + sendMessage?: (event: React.UIEvent, messageInput?: string) => void; + handleInputChange?: (event: React.ChangeEvent) => void; + enhancePrompt?: () => void; + importChat?: (description: string, messages: Message[]) => Promise; + exportChat?: () => void; + uploadedFiles?: File[]; + setUploadedFiles?: (files: File[]) => void; + imageDataList?: string[]; + setImageDataList?: (dataList: string[]) => void; + actionAlert?: ActionAlert; + clearAlert?: () => void; +} + +export const BaseChat = React.forwardRef( + ( + { + textareaRef, + messageRef, + scrollRef, + showChat = true, + chatStarted = false, + isStreaming = false, + model, + setModel, + provider, + setProvider, + providerList, + input = '', + enhancingPrompt, + handleInputChange, + + // promptEnhanced, + enhancePrompt, + sendMessage, + handleStop, + importChat, + exportChat, + uploadedFiles = [], + setUploadedFiles, + imageDataList = [], + setImageDataList, + messages, + actionAlert, + clearAlert, + }, + ref, + ) => { + const TEXTAREA_MAX_HEIGHT = chatStarted ? 400 : 200; + const [apiKeys, setApiKeys] = useState>(getApiKeysFromCookies()); + const [modelList, setModelList] = useState(MODEL_LIST); + const [isModelSettingsCollapsed, setIsModelSettingsCollapsed] = useState(false); + const [isListening, setIsListening] = useState(false); + const [recognition, setRecognition] = useState(null); + const [transcript, setTranscript] = useState(''); + const [isModelLoading, setIsModelLoading] = useState('all'); + + const getProviderSettings = useCallback(() => { + let providerSettings: Record | undefined = undefined; + + try { + const savedProviderSettings = Cookies.get('providers'); + + if (savedProviderSettings) { + const parsedProviderSettings = JSON.parse(savedProviderSettings); + + if (typeof parsedProviderSettings === 'object' && parsedProviderSettings !== null) { + providerSettings = parsedProviderSettings; + } + } + } catch (error) { + console.error('Error loading Provider Settings from cookies:', error); + + // Clear invalid cookie data + Cookies.remove('providers'); + } + + return providerSettings; + }, []); + useEffect(() => { + console.log(transcript); + }, [transcript]); + + useEffect(() => { + if (typeof window !== 'undefined' && ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window)) { + const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; + const recognition = new SpeechRecognition(); + recognition.continuous = true; + recognition.interimResults = true; + + recognition.onresult = (event) => { + const transcript = Array.from(event.results) + .map((result) => result[0]) + .map((result) => result.transcript) + .join(''); + + setTranscript(transcript); + + if (handleInputChange) { + const syntheticEvent = { + target: { value: transcript }, + } as React.ChangeEvent; + handleInputChange(syntheticEvent); + } + }; + + recognition.onerror = (event) => { + console.error('Speech recognition error:', event.error); + setIsListening(false); + }; + + setRecognition(recognition); + } + }, []); + + useEffect(() => { + if (typeof window !== 'undefined') { + const providerSettings = getProviderSettings(); + let parsedApiKeys: Record | undefined = {}; + + try { + parsedApiKeys = getApiKeysFromCookies(); + setApiKeys(parsedApiKeys); + } catch (error) { + console.error('Error loading API keys from cookies:', error); + + // Clear invalid cookie data + Cookies.remove('apiKeys'); + } + setIsModelLoading('all'); + initializeModelList({ apiKeys: parsedApiKeys, providerSettings }) + .then((modelList) => { + setModelList(modelList); + }) + .catch((error) => { + console.error('Error initializing model list:', error); + }) + .finally(() => { + setIsModelLoading(undefined); + }); + } + }, [providerList, provider]); + + const onApiKeysChange = async (providerName: string, apiKey: string) => { + const newApiKeys = { ...apiKeys, [providerName]: apiKey }; + setApiKeys(newApiKeys); + Cookies.set('apiKeys', JSON.stringify(newApiKeys)); + + const provider = LLMManager.getInstance(import.meta.env || process.env || {}).getProvider(providerName); + + if (provider && provider.getDynamicModels) { + setIsModelLoading(providerName); + + try { + const providerSettings = getProviderSettings(); + const staticModels = provider.staticModels; + const dynamicModels = await provider.getDynamicModels( + newApiKeys, + providerSettings, + import.meta.env || process.env || {}, + ); + + setModelList((preModels) => { + const filteredOutPreModels = preModels.filter((x) => x.provider !== providerName); + return [...filteredOutPreModels, ...staticModels, ...dynamicModels]; + }); + } catch (error) { + console.error('Error loading dynamic models:', error); + } + setIsModelLoading(undefined); + } + }; + + const startListening = () => { + if (recognition) { + recognition.start(); + setIsListening(true); + } + }; + + const stopListening = () => { + if (recognition) { + recognition.stop(); + setIsListening(false); + } + }; + + const handleSendMessage = (event: React.UIEvent, messageInput?: string) => { + if (sendMessage) { + sendMessage(event, messageInput); + + if (recognition) { + recognition.abort(); // Stop current recognition + setTranscript(''); // Clear transcript + setIsListening(false); + + // Clear the input by triggering handleInputChange with empty value + if (handleInputChange) { + const syntheticEvent = { + target: { value: '' }, + } as React.ChangeEvent; + handleInputChange(syntheticEvent); + } + } + } + }; + + const handleFileUpload = () => { + const input = document.createElement('input'); + input.type = 'file'; + input.accept = 'image/*'; + + input.onchange = async (e) => { + const file = (e.target as HTMLInputElement).files?.[0]; + + if (file) { + const reader = new FileReader(); + + reader.onload = (e) => { + const base64Image = e.target?.result as string; + setUploadedFiles?.([...uploadedFiles, file]); + setImageDataList?.([...imageDataList, base64Image]); + }; + reader.readAsDataURL(file); + } + }; + + input.click(); + }; + + const handlePaste = async (e: React.ClipboardEvent) => { + const items = e.clipboardData?.items; + + if (!items) { + return; + } + + for (const item of items) { + if (item.type.startsWith('image/')) { + e.preventDefault(); + + const file = item.getAsFile(); + + if (file) { + const reader = new FileReader(); + + reader.onload = (e) => { + const base64Image = e.target?.result as string; + setUploadedFiles?.([...uploadedFiles, file]); + setImageDataList?.([...imageDataList, base64Image]); + }; + reader.readAsDataURL(file); + } + + break; + } + } + }; + + const baseChat = ( +
+ {() => } +
+
+ {!chatStarted && ( +
+

+ Where ideas begin +

+

+ Bring ideas to life in seconds or get help on existing projects. +

+
+ )} +
+ + {() => { + return chatStarted ? ( + + ) : null; + }} + +
+
+ {actionAlert && ( + clearAlert?.()} + postMessage={(message) => { + sendMessage?.({} as any, message); + clearAlert?.(); + }} + /> + )} +
+
+ + + + + + + + + + + + + + + + + + +
+ + {() => ( +
+ + {(providerList || []).length > 0 && provider && ( + { + onApiKeysChange(provider.name, key); + }} + /> + )} +
+ )} +
+
+ { + setUploadedFiles?.(uploadedFiles.filter((_, i) => i !== index)); + setImageDataList?.(imageDataList.filter((_, i) => i !== index)); + }} + /> + + {() => ( + + )} + +
+