Skip to content

chore(deps): upgrade deps #50

chore(deps): upgrade deps

chore(deps): upgrade deps #50

Workflow file for this run

name: CI/CD Pipeline
on:
pull_request:
branches: [master, develop, 'release/**']
types: [opened, synchronize, reopened, ready_for_review]
push:
branches: [master, develop]
tags: ['v*']
workflow_dispatch:
inputs:
debug_enabled:
type: boolean
description: 'Enable debug logging'
required: false
default: false
schedule:
- cron: '0 2 * * 1' # Weekly dependency check
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/master' }}
env:
NODE_ENV: ci
FORCE_COLOR: 3
BUN_VERSION: 'latest'
NODE_VERSION: '20'
PNPM_VERSION: '8'
permissions:
contents: read
checks: write
pull-requests: write
security-events: write
issues: write
packages: write
jobs:
# ========================================
# CHANGE DETECTION
# ========================================
changes:
name: 🔍 Detect Changes
runs-on: ubuntu-latest
outputs:
src: ${{ steps.changes.outputs.src }}
tests: ${{ steps.changes.outputs.tests }}
deps: ${{ steps.changes.outputs.deps }}
docs: ${{ steps.changes.outputs.docs }}
ci: ${{ steps.changes.outputs.ci }}
config: ${{ steps.changes.outputs.config }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 2
- uses: dorny/paths-filter@v3
id: changes
with:
base: ${{ github.ref }}
filters: |
src:
- 'src/**'
- '*.ts'
- '*.js'
tests:
- 'tests/**'
- '**/*.test.ts'
- '**/*.spec.ts'
- 'vitest.config.ts'
deps:
- 'package.json'
- 'bun.lockb'
- 'pnpm-lock.yaml'
- 'package-lock.json'
- 'yarn.lock'
docs:
- '**.md'
- 'docs/**'
- 'assets/**'
ci:
- '.github/**'
config:
- 'tsconfig.json'
- 'bunfig.toml'
- '.bun-version'
- '.nvmrc'
- '.prettierrc*'
- '.eslintrc*'
# ========================================
# SECURITY & COMPLIANCE
# ========================================
security:
name: 🔒 Security Audit
runs-on: ubuntu-latest
if: github.event_name != 'schedule' || github.event_name == 'workflow_dispatch'
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: 🔧 Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: ${{ env.BUN_VERSION }}
- name: 🔧 Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
- name: 📦 Cache dependencies
uses: actions/cache@v4
with:
path: |
~/.bun/install/cache
~/.npm
~/.cache
key: ${{ runner.os }}-deps-${{ hashFiles('**/bun.lockb', '**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-deps-
- name: 🔍 Audit Bun dependencies
run: |
if [ -f "bun.lockb" ]; then
bun audit || true
fi
continue-on-error: true
- name: 🔍 Audit npm dependencies
run: |
if [ -f "package-lock.json" ]; then
npm audit --audit-level=moderate || true
fi
continue-on-error: true
- name: 🛡️ Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
scan-type: 'fs'
scan-ref: '.'
format: 'sarif'
output: 'trivy-results.sarif'
severity: 'CRITICAL,HIGH'
- name: 📋 Upload Trivy results to GitHub Security
uses: github/codeql-action/upload-sarif@v3
if: always()
with:
sarif_file: 'trivy-results.sarif'
- name: 📋 Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: typescript, javascript
queries: security-and-quality
- name: 🏗️ Autobuild
uses: github/codeql-action/autobuild@v3
- name: 🔬 Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:typescript"
- name: 🔐 Check for secrets
uses: trufflesecurity/trufflehog@main
with:
path: ./
base: ${{ github.event.repository.default_branch }}
head: HEAD
extra_args: --debug --only-verified
# ========================================
# CODE QUALITY & LINTING
# ========================================
lint:
name: 🎨 Code Quality
runs-on: ubuntu-latest
needs: changes
if: |
needs.changes.outputs.src == 'true' ||
needs.changes.outputs.tests == 'true' ||
needs.changes.outputs.config == 'true' ||
github.event_name == 'workflow_dispatch'
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: 🔧 Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: ${{ env.BUN_VERSION }}
- name: 📦 Restore dependency cache
uses: actions/cache@v4
id: cache
with:
path: |
~/.bun/install/cache
node_modules
key: ${{ runner.os }}-bun-${{ hashFiles('**/bun.lockb') }}-${{ hashFiles('**/package.json') }}
restore-keys: |
${{ runner.os }}-bun-${{ hashFiles('**/bun.lockb') }}-
${{ runner.os }}-bun-
- name: 📦 Install dependencies
if: steps.cache.outputs.cache-hit != 'true'
run: bun install --frozen-lockfile
- name: 🔍 Run ESLint
if: hashFiles('.eslintrc*') != ''
run: |
if command -v eslint &> /dev/null; then
bun run lint
fi
continue-on-error: true
- name: 🎨 Check code formatting
run: |
if [ -f ".prettierrc" ] || [ -f ".prettierrc.json" ] || [ -f ".prettierrc.js" ]; then
bun run format:check || true
fi
- name: 💪 Type checking
run: |
if [ -f "tsconfig.json" ]; then
bun run typecheck || bun tsc --noEmit
fi
- name: 🛠️ Build project
run: bun run build
env:
NODE_ENV: production
- name: 📊 Upload build artifacts
uses: actions/upload-artifact@v4
with:
name: build-${{ github.sha }}
path: |
dist/
retention-days: 7
compression-level: 9
- name: 📈 SonarCloud Scan
if: github.event_name == 'pull_request' && github.actor != 'dependabot[bot]'
uses: SonarSource/sonarcloud-github-action@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
continue-on-error: true
# ========================================
# TESTING
# ========================================
test:
name: 🧪 Test (${{ matrix.os }}, ${{ matrix.runtime }}-${{ matrix.version }})
runs-on: ${{ matrix.os }}
needs: [changes, lint]
if: |
needs.changes.outputs.src == 'true' ||
needs.changes.outputs.tests == 'true' ||
github.event_name == 'workflow_dispatch'
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
runtime: [bun, node]
version: ['latest', 'lts']
exclude:
- runtime: bun
version: lts
- os: windows-latest
runtime: bun
include:
- os: ubuntu-latest
runtime: node
version: '18'
coverage: true
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- name: 🔧 Setup Bun
if: matrix.runtime == 'bun'
uses: oven-sh/setup-bun@v2
with:
bun-version: ${{ matrix.version }}
- name: 🔧 Setup Node.js
if: matrix.runtime == 'node'
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.version == 'latest' && '21' || matrix.version == 'lts' && '20' || matrix.version }}
- name: 📦 Get package manager
id: pm
run: |
if [ "${{ matrix.runtime }}" == "bun" ]; then
echo "cmd=bun" >> $GITHUB_OUTPUT
echo "cache=~/.bun/install/cache" >> $GITHUB_OUTPUT
else
echo "cmd=npm" >> $GITHUB_OUTPUT
echo "cache=~/.npm" >> $GITHUB_OUTPUT
fi
shell: bash
- name: 📦 Cache dependencies
uses: actions/cache@v4
with:
path: |
${{ steps.pm.outputs.cache }}
node_modules
key: ${{ runner.os }}-${{ matrix.runtime }}-${{ matrix.version }}-${{ hashFiles('**/package.json', '**/bun.lockb', '**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-${{ matrix.runtime }}-${{ matrix.version }}-
${{ runner.os }}-${{ matrix.runtime }}-
- name: 📦 Install dependencies
run: |
if [ "${{ matrix.runtime }}" == "bun" ]; then
bun install --frozen-lockfile
else
npm ci || npm install
fi
shell: bash
- name: 🛠️ Build project
run: ${{ steps.pm.outputs.cmd }} run build
- name: 🧪 Run unit tests
run: ${{ steps.pm.outputs.cmd }} test
env:
CI: true
NODE_ENV: test
- name: 🧪 Run unit tests with coverage
if: matrix.coverage
run: ${{ steps.pm.outputs.cmd }} run test:coverage
env:
CI: true
- name: 📊 Generate test report
if: always()
run: |
if [ -f "package.json" ] && grep -q "test:junit" package.json; then
${{ steps.pm.outputs.cmd }} run test:junit || true
fi
shell: bash
continue-on-error: true
- name: 📈 Upload test results
if: always() && matrix.coverage
uses: actions/upload-artifact@v4
with:
name: test-results-${{ matrix.os }}-${{ matrix.runtime }}-${{ matrix.version }}
path: |
coverage/
test-results/
*.xml
retention-days: 7
- name: 🟩 Upload coverage to Codecov
if: matrix.coverage && !cancelled()
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./coverage/lcov.info,./coverage/coverage-final.json
flags: unittests
name: codecov-${{ matrix.os }}-${{ matrix.runtime }}
fail_ci_if_error: false
verbose: true
# ========================================
# INTEGRATION TESTING
# ========================================
integration:
name: 🔗 Integration Tests
runs-on: ubuntu-latest
needs: [test]
if: |
github.event_name == 'push' ||
github.event_name == 'pull_request' ||
github.event_name == 'workflow_dispatch'
steps:
- uses: actions/checkout@v4
- name: 🔧 Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: ${{ env.BUN_VERSION }}
- name: 🔧 Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
- name: 📦 Download build artifacts
uses: actions/download-artifact@v4
with:
name: build-${{ github.sha }}
path: dist/
- name: 📦 Install dependencies
run: bun install --frozen-lockfile
- name: 🔧 Make CLI executable
run: |
chmod +x ./dist/runtime.js || true
chmod +x ./dist/cli.js || true
- name: 🧪 Test CLI with Bun
run: |
echo "Testing with Bun runtime..."
bun ./dist/runtime.js --help
bun ./dist/runtime.js --version
# Create test environment
mkdir -p test-bun/node_modules/example-package/{docs,tests,coverage}
echo '{"name": "example-package"}' > test-bun/node_modules/example-package/package.json
echo 'test docs' > test-bun/node_modules/example-package/docs/README.md
echo 'test' > test-bun/node_modules/example-package/tests/test.js
# Test pruning with Bun
cd test-bun
bun ../dist/runtime.js --dry-run
bun ../dist/runtime.js --verbose
# Verify pruning worked
[ ! -d "node_modules/example-package/docs" ] || exit 1
[ ! -d "node_modules/example-package/tests" ] || exit 1
- name: 🧪 Test CLI with Node.js
run: |
echo "Testing with Node.js runtime..."
node ./dist/runtime.js --help
node ./dist/runtime.js --version
# Create test environment
mkdir -p test-node/node_modules/example-package/{docs,tests,coverage}
echo '{"name": "example-package"}' > test-node/node_modules/example-package/package.json
echo 'test docs' > test-node/node_modules/example-package/docs/README.md
echo 'test' > test-node/node_modules/example-package/tests/test.js
# Test pruning with Node
cd test-node
node ../dist/runtime.js --dry-run
node ../dist/runtime.js --verbose
# Verify pruning worked
[ ! -d "node_modules/example-package/docs" ] || exit 1
[ ! -d "node_modules/example-package/tests" ] || exit 1
- name: 🧪 Test with various package managers
run: |
# Test with npm project
mkdir -p test-npm && cd test-npm
npm init -y
npm install lodash
node ../dist/runtime.js --dry-run
cd ..
# Test with pnpm project (if available)
if command -v pnpm &> /dev/null; then
mkdir -p test-pnpm && cd test-pnpm
pnpm init
pnpm add lodash
node ../dist/runtime.js --dry-run
cd ..
fi
# ========================================
# PERFORMANCE TESTING
# ========================================
performance:
name: ⚡ Performance Benchmarks
runs-on: ubuntu-latest
needs: [test]
if: github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch'
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: 🔧 Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: ${{ env.BUN_VERSION }}
- name: 📦 Download build artifacts
uses: actions/download-artifact@v4
with:
name: build-${{ github.sha }}
path: dist/
- name: 📦 Install dependencies
run: bun install --frozen-lockfile
- name: 🔧 Make CLI executable
run: chmod +x ./dist/runtime.js
- name: 📊 Run performance benchmarks
run: |
# Create large test environment
echo "Creating benchmark environment with 500 packages..."
mkdir -p benchmark-env/node_modules
for i in {1..500}; do
pkg_dir="benchmark-env/node_modules/package-$i"
mkdir -p "$pkg_dir"/{docs,tests,coverage,examples,.github,scripts}
# Create realistic package structure
echo "{\"name\": \"package-$i\", \"version\": \"1.0.0\"}" > "$pkg_dir/package.json"
echo "# Package $i Documentation" > "$pkg_dir/docs/README.md"
echo "console.log('test $i');" > "$pkg_dir/tests/test.js"
echo "coverage data" > "$pkg_dir/coverage/lcov.info"
echo "example" > "$pkg_dir/examples/example.js"
echo "workflow" > "$pkg_dir/.github/workflow.yml"
echo "#!/bin/bash" > "$pkg_dir/scripts/build.sh"
# Add some larger files
dd if=/dev/zero of="$pkg_dir/docs/large.pdf" bs=1M count=1 2>/dev/null
dd if=/dev/zero of="$pkg_dir/tests/fixtures.dat" bs=500K count=1 2>/dev/null
done
echo "📊 Benchmark Results:"
echo "===================="
# Benchmark dry run
echo -e "\n🔍 Dry Run Performance:"
cd benchmark-env
time ../dist/runtime.js --dry-run
# Benchmark actual pruning
echo -e "\n✂️ Pruning Performance:"
time ../dist/runtime.js --verbose
# Calculate space saved
echo -e "\n💾 Space Analysis:"
du -sh node_modules/ || echo "Already pruned"
# Memory usage
echo -e "\n🧠 Memory Usage:"
/usr/bin/time -v ../dist/runtime.js --dry-run 2>&1 | grep -E "Maximum resident set size"
- name: 📊 Compare with previous benchmarks
if: github.event_name == 'pull_request'
run: |
echo "Comparing performance with base branch..."
# This would compare with stored benchmarks from the base branch
# For now, just output current metrics
# ========================================
# DEPENDENCY MANAGEMENT
# ========================================
dependency-review:
name: 🔍 Dependency Review
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- uses: actions/checkout@v4
- name: 🔍 Dependency Review
uses: actions/dependency-review-action@v4
with:
fail-on-severity: moderate
deny-licenses: GPL-3.0, AGPL-3.0
allow-ghsas: GHSA-yyyy-yyyy-yyyy
- name: 📊 License Scanning
uses: fossas/fossa-action@main
with:
api-key: ${{ secrets.FOSSA_API_KEY }}
continue-on-error: true
# ========================================
# SUMMARY & NOTIFICATIONS
# ========================================
summary:
name: 📊 Pipeline Summary
runs-on: ubuntu-latest
needs: [lint, test, integration, security, performance]
if: always()
steps:
- name: 📊 Generate summary
run: |
echo "## 🚀 CI/CD Pipeline Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Status badges
echo "### Status" >> $GITHUB_STEP_SUMMARY
echo "| Job | Status |" >> $GITHUB_STEP_SUMMARY
echo "|-----|--------|" >> $GITHUB_STEP_SUMMARY
echo "| 🎨 Lint | ${{ needs.lint.result }} |" >> $GITHUB_STEP_SUMMARY
echo "| 🧪 Tests | ${{ needs.test.result }} |" >> $GITHUB_STEP_SUMMARY
echo "| 🔗 Integration | ${{ needs.integration.result }} |" >> $GITHUB_STEP_SUMMARY
echo "| 🔒 Security | ${{ needs.security.result }} |" >> $GITHUB_STEP_SUMMARY
echo "| ⚡ Performance | ${{ needs.performance.result }} |" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Details" >> $GITHUB_STEP_SUMMARY
echo "- **Commit**: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
echo "- **Branch**: ${{ github.ref_name }}" >> $GITHUB_STEP_SUMMARY
echo "- **Runner**: ${{ runner.os }}" >> $GITHUB_STEP_SUMMARY
echo "- **Triggered by**: ${{ github.actor }}" >> $GITHUB_STEP_SUMMARY
echo "- **Event**: ${{ github.event_name }}" >> $GITHUB_STEP_SUMMARY
- name: 💬 Comment on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const summary = `
## 🚀 CI/CD Pipeline Results
| Check | Status |
|-------|--------|
| 🎨 Code Quality | ${{ needs.lint.result }} |
| 🧪 Tests | ${{ needs.test.result }} |
| 🔗 Integration | ${{ needs.integration.result }} |
| 🔒 Security | ${{ needs.security.result }} |
| ⚡ Performance | ${{ needs.performance.result }} |
[View full details](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})
`;
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: summary
});