mirror of
https://github.com/gitpod-io/gitpod.git
synced 2025-12-08 17:36:30 +00:00
[.werft] Remove (almost) entirely (#18361)
* [.werft] Delete superfluous files * Move post-process.sh * move vm folder * fix
This commit is contained in:
parent
ecbb418b11
commit
f96ba5011d
10
.github/CODEOWNERS
vendored
10
.github/CODEOWNERS
vendored
@ -98,16 +98,6 @@
|
|||||||
# a single review should be enough
|
# a single review should be enough
|
||||||
/operations/observability/mixins/cross-teams
|
/operations/observability/mixins/cross-teams
|
||||||
|
|
||||||
# werft is shared between all teams
|
|
||||||
/.werft
|
|
||||||
/.werft/ide-* @gitpod-io/team-experience
|
|
||||||
/.werft/platform-* @gitpod-io/team-engine
|
|
||||||
/.werft/webapp-* @gitpod-io/team-experience
|
|
||||||
/.werft/workspace-* @gitpod-io/team-engine
|
|
||||||
/.werft/self-hosted-* @gitpod-io/team-engine
|
|
||||||
/.werft/*installer-tests* @gitpod-io/team-engine
|
|
||||||
/.werft/jobs/build/self-hosted-* @gitpod-io/team-engine
|
|
||||||
|
|
||||||
.github/workflows/ide-*.yml @gitpod-io/team-experience
|
.github/workflows/ide-*.yml @gitpod-io/team-experience
|
||||||
.github/workflows/jetbrains-*.yml @gitpod-io/team-experience
|
.github/workflows/jetbrains-*.yml @gitpod-io/team-experience
|
||||||
.github/workflows/code-nightly.yml @gitpod-io/team-experience
|
.github/workflows/code-nightly.yml @gitpod-io/team-experience
|
||||||
|
|||||||
@ -1,2 +0,0 @@
|
|||||||
vm/charts/**
|
|
||||||
vm/manifests/**
|
|
||||||
@ -1,97 +0,0 @@
|
|||||||
import * as fs from "fs";
|
|
||||||
import { SpanStatusCode } from "@opentelemetry/api";
|
|
||||||
import { FailedSliceError, Werft } from "./util/werft";
|
|
||||||
import { reportBuildFailureInSlack } from "./util/slack";
|
|
||||||
import * as Tracing from "./observability/tracing";
|
|
||||||
import * as VM from "./vm/vm";
|
|
||||||
import { buildAndPublish } from "./jobs/build/build-and-publish";
|
|
||||||
import { validateChanges } from "./jobs/build/validate-changes";
|
|
||||||
import { prepare } from "./jobs/build/prepare";
|
|
||||||
import { deployToPreviewEnvironment } from "./jobs/build/deploy-to-preview-environment";
|
|
||||||
import { runIntegrationTests } from "./jobs/build/trigger-integration-tests";
|
|
||||||
import { triggerSelfHostedPreview, triggerUpgradeTests } from "./jobs/build/self-hosted-upgrade-tests";
|
|
||||||
import { jobConfig } from "./jobs/build/job-config";
|
|
||||||
|
|
||||||
// Will be set once tracing has been initialized
|
|
||||||
let werft: Werft;
|
|
||||||
const context: any = JSON.parse(fs.readFileSync("context.json").toString());
|
|
||||||
|
|
||||||
Tracing.initialize()
|
|
||||||
.then(() => {
|
|
||||||
werft = new Werft("build");
|
|
||||||
})
|
|
||||||
.then(() => run(context))
|
|
||||||
.catch((err) => {
|
|
||||||
werft.rootSpan.setStatus({
|
|
||||||
code: SpanStatusCode.ERROR,
|
|
||||||
message: err,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (err instanceof FailedSliceError) {
|
|
||||||
// This error was produced using werft.fail which means that we
|
|
||||||
// already handled it "gracefully"
|
|
||||||
} else {
|
|
||||||
console.log("Error", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (context.Repository.ref === "refs/heads/main") {
|
|
||||||
reportBuildFailureInSlack(context, err).catch((error: Error) => {
|
|
||||||
console.error("Failed to send message to Slack", error);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Explicitly not using process.exit as we need to flush tracing, see tracing.js
|
|
||||||
process.exitCode = 1;
|
|
||||||
})
|
|
||||||
.finally(() => {
|
|
||||||
werft.phase("Stop kubectl port forwards", "Stopping kubectl port forwards");
|
|
||||||
VM.stopKubectlPortForwards();
|
|
||||||
|
|
||||||
werft.phase("Flushing telemetry", "Flushing telemetry before stopping job");
|
|
||||||
werft.endAllSpans();
|
|
||||||
});
|
|
||||||
|
|
||||||
async function run(context: any) {
|
|
||||||
const config = jobConfig(werft, context);
|
|
||||||
|
|
||||||
if(!config.withWerft) {
|
|
||||||
werft.phase("Build Disabled");
|
|
||||||
werft.log("(not building)","The build is being performed via GitHub Actions; Thus, this Werft build does not run");
|
|
||||||
werft.done("(not building)");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
await validateChanges(werft, config);
|
|
||||||
await prepare(werft, config);
|
|
||||||
if (config.withUpgradeTests) {
|
|
||||||
// this will trigger an upgrade test on a self-hosted gitpod instance on a new cluster.
|
|
||||||
await triggerUpgradeTests(werft, config, context.Owner);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
await buildAndPublish(werft, config);
|
|
||||||
|
|
||||||
if (config.withSelfHostedPreview) {
|
|
||||||
await triggerSelfHostedPreview(werft, config, context.Owner);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!config.withPreview || config.publishRelease) {
|
|
||||||
werft.phase("deploy", "not deploying");
|
|
||||||
console.log("running without preview environment or publish-release is set");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
await deployToPreviewEnvironment(werft, config);
|
|
||||||
} catch (e) {
|
|
||||||
// We currently don't support concurrent deployments to the same preview environment.
|
|
||||||
// Until we do we don't want errors to mark the main build as failed.
|
|
||||||
if (config.mainBuild) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
|
|
||||||
await runIntegrationTests(werft, config, context.Owner);
|
|
||||||
}
|
|
||||||
@ -1,252 +0,0 @@
|
|||||||
# debug using `werft run github -f -s .werft/build.js -j .werft/build.yaml -a debug=true`
|
|
||||||
pod:
|
|
||||||
serviceAccount: werft
|
|
||||||
restartPolicy: Never
|
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: dev/workload
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- "builds"
|
|
||||||
volumes:
|
|
||||||
- name: monitoring-satellite-preview-token
|
|
||||||
secret:
|
|
||||||
secretName: monitoring-satellite-preview-token
|
|
||||||
- name: monitoring-satellite-stackdriver-credentials
|
|
||||||
secret:
|
|
||||||
secretName: monitoring-satellite-stackdriver-credentials
|
|
||||||
- name: gcp-sa
|
|
||||||
secret:
|
|
||||||
secretName: gcp-sa-gitpod-dev-deployer
|
|
||||||
- name: gcp-sa-release
|
|
||||||
secret:
|
|
||||||
secretName: gcp-sa-gitpod-release-deployer
|
|
||||||
- name: prometheus-remote-write-auth
|
|
||||||
secret:
|
|
||||||
secretName: prometheus-remote-write-auth
|
|
||||||
- name: go-build-cache
|
|
||||||
hostPath:
|
|
||||||
path: /mnt/disks/ssd0/go-build-cache
|
|
||||||
type: DirectoryOrCreate
|
|
||||||
- name: harvester-kubeconfig
|
|
||||||
secret:
|
|
||||||
secretName: harvester-kubeconfig
|
|
||||||
- name: harvester-vm-ssh-keys
|
|
||||||
secret:
|
|
||||||
secretName: harvester-vm-ssh-keys
|
|
||||||
- name: harvester-k3s-dockerhub-pull-account
|
|
||||||
secret:
|
|
||||||
secretName: harvester-k3s-dockerhub-pull-account
|
|
||||||
- name: github-token-gitpod-bot
|
|
||||||
secret:
|
|
||||||
defaultMode: 420
|
|
||||||
secretName: github-token-gitpod-bot
|
|
||||||
# - name: deploy-key
|
|
||||||
# secret:
|
|
||||||
# secretName: deploy-key
|
|
||||||
# - name: github-ssh-key
|
|
||||||
# secret:
|
|
||||||
# secretName: github-ssh-key
|
|
||||||
# defaultMode: 0600
|
|
||||||
# - name: gitpod-test-tokens
|
|
||||||
# secret:
|
|
||||||
# secretName: gitpod-test-tokens
|
|
||||||
containers:
|
|
||||||
- name: testdb
|
|
||||||
image: mysql:5.7
|
|
||||||
env:
|
|
||||||
- name: MYSQL_ROOT_PASSWORD
|
|
||||||
value: test
|
|
||||||
# Using the same port as in our Gitpod workspaces here
|
|
||||||
- name: MYSQL_TCP_PORT
|
|
||||||
value: 23306
|
|
||||||
- name: build
|
|
||||||
image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:aledbf-oci-tool-gha.14121
|
|
||||||
workingDir: /workspace
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "10Gi"
|
|
||||||
cpu: "3500m"
|
|
||||||
limits:
|
|
||||||
memory: "20Gi"
|
|
||||||
cpu: "10000m"
|
|
||||||
volumeMounts:
|
|
||||||
- name: monitoring-satellite-stackdriver-credentials
|
|
||||||
mountPath: /mnt/secrets/monitoring-satellite-stackdriver-credentials
|
|
||||||
- name: monitoring-satellite-preview-token
|
|
||||||
mountPath: /mnt/secrets/monitoring-satellite-preview-token
|
|
||||||
- name: gcp-sa
|
|
||||||
mountPath: /mnt/secrets/gcp-sa
|
|
||||||
readOnly: true
|
|
||||||
- name: gcp-sa-release
|
|
||||||
mountPath: /mnt/secrets/gcp-sa-release
|
|
||||||
readOnly: true
|
|
||||||
- name: go-build-cache
|
|
||||||
mountPath: /go-build-cache
|
|
||||||
readOnly: false
|
|
||||||
- name: harvester-kubeconfig
|
|
||||||
mountPath: /mnt/secrets/harvester-kubeconfig
|
|
||||||
- name: harvester-vm-ssh-keys
|
|
||||||
mountPath: /mnt/secrets/harvester-vm-ssh-keys
|
|
||||||
- name: harvester-k3s-dockerhub-pull-account
|
|
||||||
mountPath: /mnt/secrets/harvester-k3s-dockerhub-pull-account
|
|
||||||
- mountPath: /mnt/secrets/github-token-gitpod-bot
|
|
||||||
name: github-token-gitpod-bot
|
|
||||||
# - name: deploy-key
|
|
||||||
# mountPath: /mnt/secrets/deploy-key
|
|
||||||
# readOnly: true
|
|
||||||
# - name: github-ssh-key
|
|
||||||
# mountPath: /mnt/secrets/github-ssh-key
|
|
||||||
# readOnly: true
|
|
||||||
env:
|
|
||||||
- name: DB_HOST
|
|
||||||
value: 127.0.0.1
|
|
||||||
- name: LEEWAY_WORKSPACE_ROOT
|
|
||||||
value: /workspace
|
|
||||||
- name: LEEWAY_REMOTE_CACHE_BUCKET
|
|
||||||
{{- if eq .Repository.Ref "refs/heads/main" }}
|
|
||||||
value: gitpod-core-leeway-cache-main
|
|
||||||
{{- else }}
|
|
||||||
value: gitpod-core-leeway-cache-branch
|
|
||||||
{{- end }}
|
|
||||||
- name: GOPROXY
|
|
||||||
value: http://athens-athens-proxy.athens.svc.cluster.local:9999
|
|
||||||
- name: GOCACHE
|
|
||||||
value: /go-build-cache
|
|
||||||
- name: NODENAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: spec.nodeName
|
|
||||||
- name: NPM_AUTH_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: npm-auth-token
|
|
||||||
key: npm-auth-token.json
|
|
||||||
- name: JB_MARKETPLACE_PUBLISH_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: jb-marketplace-publish-token
|
|
||||||
key: token
|
|
||||||
- name: SLACK_NOTIFICATION_PATH
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: slack-path
|
|
||||||
key: slackPath
|
|
||||||
- name: DEVX_SLACK_NOTIFICATION_PATH
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: devx-slack-path
|
|
||||||
key: token
|
|
||||||
# used for GitHub releases (NOTE: for some reasons the token contains a trailing \n, is trimmed below)
|
|
||||||
- name: GITHUB_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: github-sh-release-token
|
|
||||||
key: token
|
|
||||||
# - name: GITPOD_TEST_TOKEN_GITHUB
|
|
||||||
# valueFrom:
|
|
||||||
# secretKeyRef:
|
|
||||||
# name: gitpod-test-tokens
|
|
||||||
# key: github-test-token.json
|
|
||||||
# - name: GITPOD_TEST_TOKEN_GITLAB
|
|
||||||
# valueFrom:
|
|
||||||
# secretKeyRef:
|
|
||||||
# name: gitpod-test-tokens
|
|
||||||
# key: gitlab-test-token.json
|
|
||||||
# - name: GITPOD_TEST_TOKEN_BITBUCKET
|
|
||||||
# valueFrom:
|
|
||||||
# secretKeyRef:
|
|
||||||
# name: gitpod-test-tokens
|
|
||||||
# key: bitbucket-test-token.json
|
|
||||||
- name: CODECOV_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: codecov
|
|
||||||
key: token
|
|
||||||
- name: HONEYCOMB_DATASET
|
|
||||||
value: "werft"
|
|
||||||
- name: HONEYCOMB_API_KEY
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: honeycomb-api-key
|
|
||||||
key: apikey
|
|
||||||
- name: SEGMENT_IO_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: self-hosted
|
|
||||||
key: segmentIOToken
|
|
||||||
- name: JAVA_HOME
|
|
||||||
value: /home/gitpod/.sdkman/candidates/java/current
|
|
||||||
# Used by the Werft CLI through werft-credential-helper.sh
|
|
||||||
- name: WERFT_GITHUB_TOKEN_PATH
|
|
||||||
value: "/mnt/secrets/github-token-gitpod-bot/token"
|
|
||||||
- name: WERFT_CREDENTIAL_HELPER
|
|
||||||
value: "/workspace/dev/preview/werft-credential-helper.sh"
|
|
||||||
# When running the build with 'with-integration-tests' these are used
|
|
||||||
# to specify what Gitpod user to use in those tests.
|
|
||||||
- name: INTEGRATION_TEST_USERNAME
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: integration-test-user
|
|
||||||
key: username
|
|
||||||
- name: INTEGRATION_TEST_USER_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: integration-test-user
|
|
||||||
key: token
|
|
||||||
- name: ROBOQUAT_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: github-roboquat-automatic-changelog
|
|
||||||
key: token
|
|
||||||
command:
|
|
||||||
- bash
|
|
||||||
- -c
|
|
||||||
- |
|
|
||||||
sleep 1
|
|
||||||
set -Eeuo pipefail
|
|
||||||
|
|
||||||
export GITHUB_TOKEN=$(echo $GITHUB_TOKEN | xargs)
|
|
||||||
export DOCKER_HOST=tcp://$NODENAME:2375
|
|
||||||
|
|
||||||
echo "Job is running on node $NODENAME" | werft log slice "Node information"
|
|
||||||
|
|
||||||
( \
|
|
||||||
sudo chown gitpod:gitpod $GOCACHE && \
|
|
||||||
sudo chown -R gitpod:gitpod /workspace && \
|
|
||||||
echo "done" \
|
|
||||||
) | werft log slice "chowning /workspace and $GOCACHE"
|
|
||||||
|
|
||||||
{{ if .Annotations.leewayfromgit }}
|
|
||||||
( \
|
|
||||||
LEEWAY_SRC="github.com/gitpod-io/leeway@{{ .Annotations.leewayfromgit }}" && \
|
|
||||||
echo "Installing Leeway from $LEEWAY_SRC" && \
|
|
||||||
GOBIN=$(pwd) go install "$LEEWAY_SRC" && \
|
|
||||||
sudo mv leeway $(dirname $(which leeway))
|
|
||||||
) | werft log slice "Building fresh Leeway binary from source"
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
( \
|
|
||||||
mkdir -p /workspace/.ssh && \
|
|
||||||
cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa /workspace/.ssh/id_rsa_harvester_vm && \
|
|
||||||
cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa.pub /workspace/.ssh/id_rsa_harvester_vm.pub && \
|
|
||||||
sudo chmod 600 /workspace/.ssh/id_rsa_harvester_vm && \
|
|
||||||
sudo chmod 644 /workspace/.ssh/id_rsa_harvester_vm.pub && \
|
|
||||||
echo "done" \
|
|
||||||
) | werft log slice "Prepare SSH keys"
|
|
||||||
|
|
||||||
( \
|
|
||||||
cd .werft && \
|
|
||||||
yarn install && \
|
|
||||||
mv node_modules .. \
|
|
||||||
) | werft log slice "Installing dependencies"
|
|
||||||
|
|
||||||
printf '{{ toJson . }}' > context.json
|
|
||||||
|
|
||||||
npx ts-node .werft/build.ts
|
|
||||||
sidecars:
|
|
||||||
- testdb
|
|
||||||
@ -1,129 +0,0 @@
|
|||||||
# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/self-hosted-installer-tests.yaml -a debug=true`
|
|
||||||
pod:
|
|
||||||
serviceAccount: werft
|
|
||||||
restartPolicy: Never
|
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: dev/workload
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- "builds"
|
|
||||||
securityContext:
|
|
||||||
runAsUser: 0
|
|
||||||
volumes:
|
|
||||||
- name: sh-playground-sa-perm
|
|
||||||
secret:
|
|
||||||
secretName: sh-playground-sa-perm
|
|
||||||
- name: sh-playground-dns-perm
|
|
||||||
secret:
|
|
||||||
secretName: sh-playground-dns-perm
|
|
||||||
- name: sh-aks-perm
|
|
||||||
secret:
|
|
||||||
secretName: aks-credentials
|
|
||||||
containers:
|
|
||||||
- name: nightly-test
|
|
||||||
image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:aledbf-oci-tool-gha.14121
|
|
||||||
workingDir: /workspace
|
|
||||||
imagePullPolicy: Always
|
|
||||||
volumeMounts:
|
|
||||||
- name: sh-playground-sa-perm
|
|
||||||
mountPath: /mnt/secrets/sh-playground-sa-perm
|
|
||||||
- name: sh-playground-dns-perm # this sa is used for the DNS management
|
|
||||||
mountPath: /mnt/secrets/sh-playground-dns-perm
|
|
||||||
env:
|
|
||||||
- name: GOOGLE_APPLICATION_CREDENTIALS
|
|
||||||
value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
|
|
||||||
- name: TF_VAR_sa_creds
|
|
||||||
value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
|
|
||||||
- name: TF_VAR_dns_sa_creds
|
|
||||||
value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json"
|
|
||||||
- name: ARM_SUBSCRIPTION_ID
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: aks-credentials
|
|
||||||
key: subscriptionid
|
|
||||||
- name: ARM_TENANT_ID
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: aks-credentials
|
|
||||||
key: tenantid
|
|
||||||
- name: ARM_CLIENT_ID
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: aks-credentials
|
|
||||||
key: clientid
|
|
||||||
- name: ARM_CLIENT_SECRET
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: aks-credentials
|
|
||||||
key: clientsecret
|
|
||||||
- name: NODENAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: spec.nodeName
|
|
||||||
- name: USER_TOKEN # this is for the integration tests
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: integration-test-user
|
|
||||||
key: token
|
|
||||||
- name: AWS_ACCESS_KEY_ID
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: aws-credentials
|
|
||||||
key: aws-access-key
|
|
||||||
- name: AWS_SECRET_ACCESS_KEY
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: aws-credentials
|
|
||||||
key: aws-secret-key
|
|
||||||
- name: AWS_REGION
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: aws-credentials
|
|
||||||
key: aws-region
|
|
||||||
- name: IDE_SLACK_NOTIFICATION_PATH
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: slack-webhook-urls
|
|
||||||
key: ide_jobs
|
|
||||||
- name: WS_SLACK_NOTIFICATION_PATH
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: slack-webhook-urls
|
|
||||||
key: workspace_jobs
|
|
||||||
- name: SH_SLACK_NOTIFICATION_PATH
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: slack-webhook-urls
|
|
||||||
key: self_hosted_jobs
|
|
||||||
- name: self_hosted_jobs
|
|
||||||
value: "true"
|
|
||||||
command:
|
|
||||||
- bash
|
|
||||||
- -c
|
|
||||||
- |
|
|
||||||
sleep 1
|
|
||||||
set -Eeuo pipefail
|
|
||||||
|
|
||||||
sudo chown -R gitpod:gitpod /workspace
|
|
||||||
sudo apt update && apt install gettext-base
|
|
||||||
|
|
||||||
curl -sLS https://get.k3sup.dev | sh
|
|
||||||
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
|
|
||||||
|
|
||||||
(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
|
|
||||||
printf '{{ toJson . }}' > context.json
|
|
||||||
|
|
||||||
export domain="{{ .Annotations.subdomain }}"
|
|
||||||
if [[ "$domain" == "<no value>" ]]; then
|
|
||||||
echo "Cleanup all old workspaces"
|
|
||||||
else
|
|
||||||
export TF_VAR_TEST_ID="$domain"
|
|
||||||
fi
|
|
||||||
|
|
||||||
TESTCONFIG="CLEANUP_OLD_TESTS"
|
|
||||||
|
|
||||||
npx ts-node .werft/installer-tests.ts ${TESTCONFIG}
|
|
||||||
@ -1,5 +0,0 @@
|
|||||||
rules:
|
|
||||||
- path: ".werft/build.yaml"
|
|
||||||
matchesAll:
|
|
||||||
- or: ["repo.ref ~= refs/heads/"]
|
|
||||||
- or: ["trigger !== deleted"]
|
|
||||||
@ -1,152 +0,0 @@
|
|||||||
# debug using `werft run github -f -s .werft/build.js -j .werft/build.yaml -a debug=true`
|
|
||||||
pod:
|
|
||||||
serviceAccount: werft
|
|
||||||
restartPolicy: Never
|
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: dev/workload
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- "builds"
|
|
||||||
volumes:
|
|
||||||
- name: monitoring-satellite-preview-token
|
|
||||||
secret:
|
|
||||||
secretName: monitoring-satellite-preview-token
|
|
||||||
- name: gcp-sa
|
|
||||||
secret:
|
|
||||||
secretName: gcp-sa-gitpod-dev-deployer
|
|
||||||
- name: gcp-sa-release
|
|
||||||
secret:
|
|
||||||
secretName: gcp-sa-gitpod-release-deployer
|
|
||||||
- name: gpsh-coredev-license
|
|
||||||
secret:
|
|
||||||
secretName: gpsh-coredev-license
|
|
||||||
- name: go-build-cache
|
|
||||||
hostPath:
|
|
||||||
path: /mnt/disks/ssd0/go-build-cache
|
|
||||||
type: DirectoryOrCreate
|
|
||||||
# - name: deploy-key
|
|
||||||
# secret:
|
|
||||||
# secretName: deploy-key
|
|
||||||
# - name: github-ssh-key
|
|
||||||
# secret:
|
|
||||||
# secretName: github-ssh-key
|
|
||||||
# defaultMode: 0600
|
|
||||||
# - name: gitpod-test-tokens
|
|
||||||
# secret:
|
|
||||||
# secretName: gitpod-test-tokens
|
|
||||||
containers:
|
|
||||||
- name: testdb
|
|
||||||
image: mysql:5.7
|
|
||||||
env:
|
|
||||||
- name: MYSQL_ROOT_PASSWORD
|
|
||||||
value: test
|
|
||||||
# Using the same port as in our Gitpod workspaces here
|
|
||||||
- name: MYSQL_TCP_PORT
|
|
||||||
value: 23306
|
|
||||||
- name: build
|
|
||||||
image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:aledbf-oci-tool-gha.14121
|
|
||||||
workingDir: /workspace
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
volumeMounts:
|
|
||||||
- name: monitoring-satellite-preview-token
|
|
||||||
mountPath: /mnt/secrets/monitoring-satellite-preview-token
|
|
||||||
- name: gcp-sa
|
|
||||||
mountPath: /mnt/secrets/gcp-sa
|
|
||||||
readOnly: true
|
|
||||||
- name: gcp-sa-release
|
|
||||||
mountPath: /mnt/secrets/gcp-sa-release
|
|
||||||
readOnly: true
|
|
||||||
- name: gpsh-coredev-license
|
|
||||||
mountPath: /mnt/secrets/gpsh-coredev
|
|
||||||
readOnly: true
|
|
||||||
- name: go-build-cache
|
|
||||||
mountPath: /go-build-cache
|
|
||||||
readOnly: false
|
|
||||||
# - name: deploy-key
|
|
||||||
# mountPath: /mnt/secrets/deploy-key
|
|
||||||
# readOnly: true
|
|
||||||
# - name: github-ssh-key
|
|
||||||
# mountPath: /mnt/secrets/github-ssh-key
|
|
||||||
# readOnly: true
|
|
||||||
env:
|
|
||||||
- name: LEEWAY_WORKSPACE_ROOT
|
|
||||||
value: /workspace
|
|
||||||
- name: LEEWAY_REMOTE_CACHE_BUCKET
|
|
||||||
{{- if eq .Repository.Ref "refs/heads/master" }}
|
|
||||||
value: gitpod-core-leeway-cache-master
|
|
||||||
{{- else }}
|
|
||||||
value: gitpod-core-leeway-cache-branch
|
|
||||||
{{- end }}
|
|
||||||
- name: GOPROXY
|
|
||||||
value: http://athens-athens-proxy.athens.svc.cluster.local:9999
|
|
||||||
- name: GOCACHE
|
|
||||||
value: /go-build-cache
|
|
||||||
- name: NODENAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: spec.nodeName
|
|
||||||
- name: NPM_AUTH_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: npm-auth-token
|
|
||||||
key: npm-auth-token.json
|
|
||||||
- name: JB_MARKETPLACE_PUBLISH_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: jb_marketplace_publish_token
|
|
||||||
key: token
|
|
||||||
- name: SLACK_NOTIFICATION_PATH
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: slack-path
|
|
||||||
key: slackPath
|
|
||||||
# used for GitHub releases (NOTE: for some reasons the token contains a trailing \n, is trimmed below)
|
|
||||||
- name: GITHUB_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: github-sh-release-token
|
|
||||||
key: token
|
|
||||||
# - name: GITPOD_TEST_TOKEN_GITHUB
|
|
||||||
# valueFrom:
|
|
||||||
# secretKeyRef:
|
|
||||||
# name: gitpod-test-tokens
|
|
||||||
# key: github-test-token.json
|
|
||||||
# - name: GITPOD_TEST_TOKEN_GITLAB
|
|
||||||
# valueFrom:
|
|
||||||
# secretKeyRef:
|
|
||||||
# name: gitpod-test-tokens
|
|
||||||
# key: gitlab-test-token.json
|
|
||||||
# - name: GITPOD_TEST_TOKEN_BITBUCKET
|
|
||||||
# valueFrom:
|
|
||||||
# secretKeyRef:
|
|
||||||
# name: gitpod-test-tokens
|
|
||||||
# key: bitbucket-test-token.json
|
|
||||||
- name: CODECOV_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: codecov
|
|
||||||
key: token
|
|
||||||
command:
|
|
||||||
- bash
|
|
||||||
- -c
|
|
||||||
- |
|
|
||||||
sleep 1
|
|
||||||
set -Eeuo pipefail
|
|
||||||
|
|
||||||
sudo chown gitpod:gitpod $GOCACHE
|
|
||||||
export GITHUB_TOKEN=$(echo $GITHUB_TOKEN | xargs)
|
|
||||||
|
|
||||||
export DOCKER_HOST=tcp://$NODENAME:2375
|
|
||||||
sudo chown -R gitpod:gitpod /workspace
|
|
||||||
|
|
||||||
(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
|
|
||||||
printf '{{ toJson . }}' > context.json
|
|
||||||
|
|
||||||
leeway build components/supervisor/openssh:app
|
|
||||||
# npx ts-node .werft/build.ts
|
|
||||||
sidecars:
|
|
||||||
- testdb
|
|
||||||
@ -1,97 +0,0 @@
|
|||||||
import * as semver from "semver";
|
|
||||||
import { exec } from "../../util/shell";
|
|
||||||
import { Werft } from "../../util/werft";
|
|
||||||
import { GCLOUD_SERVICE_ACCOUNT_PATH } from "./const";
|
|
||||||
import { JobConfig } from "./job-config";
|
|
||||||
|
|
||||||
export async function buildAndPublish(werft: Werft, jobConfig: JobConfig) {
|
|
||||||
const {
|
|
||||||
publishRelease,
|
|
||||||
dontTest,
|
|
||||||
version,
|
|
||||||
localAppVersion,
|
|
||||||
publishToNpm,
|
|
||||||
coverageOutput,
|
|
||||||
} = jobConfig;
|
|
||||||
|
|
||||||
const releaseBranch = jobConfig.repository.ref;
|
|
||||||
|
|
||||||
// We set it to false as default and only set it true if the build succeeds.
|
|
||||||
werft.rootSpan.setAttributes({ "preview.gitpod_built_successfully": false });
|
|
||||||
|
|
||||||
werft.phase("build", "build running");
|
|
||||||
const imageRepo = publishRelease ? "gcr.io/gitpod-io/self-hosted" : "eu.gcr.io/gitpod-core-dev/build";
|
|
||||||
|
|
||||||
exec(
|
|
||||||
`LICENCE_HEADER_CHECK_ONLY=true leeway run components:update-license-header || { echo "[build|FAIL] There are some license headers missing. Please run 'leeway run components:update-license-header'."; exit 1; }`,
|
|
||||||
);
|
|
||||||
|
|
||||||
exec(
|
|
||||||
`leeway build --docker-build-options network=host --werft=true -c remote ${
|
|
||||||
dontTest ? "--dont-test" : ""
|
|
||||||
} --coverage-output-path=${coverageOutput} --save /tmp/dev.tar.gz -Dversion=${version} -DimageRepoBase=eu.gcr.io/gitpod-core-dev/dev dev:all`,
|
|
||||||
);
|
|
||||||
|
|
||||||
if (publishRelease) {
|
|
||||||
exec(`gcloud auth activate-service-account --key-file "/mnt/secrets/gcp-sa-release/service-account.json"`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const buildArguments = Object.entries({
|
|
||||||
version: version,
|
|
||||||
removeSources: "false",
|
|
||||||
imageRepoBase: imageRepo,
|
|
||||||
localAppVersion: localAppVersion,
|
|
||||||
SEGMENT_IO_TOKEN: process.env.SEGMENT_IO_TOKEN,
|
|
||||||
npmPublishTrigger: publishToNpm ? Date.now().toString() : "false",
|
|
||||||
}).map(([key, value]) => `-D${key}=${value}`).join(" ");
|
|
||||||
|
|
||||||
const buildFlags = [
|
|
||||||
"--docker-build-options network=host",
|
|
||||||
"--werft=true",
|
|
||||||
"-c remote",
|
|
||||||
dontTest ? "--dont-test" : "",
|
|
||||||
`--coverage-output-path=${coverageOutput}`,
|
|
||||||
].filter((value) => value).join(" ");
|
|
||||||
|
|
||||||
await exec(`leeway build ${buildFlags} ${buildArguments}`, { async: true });
|
|
||||||
|
|
||||||
if (jobConfig.withLocalPreview) {
|
|
||||||
await exec(`leeway build install/preview:docker ${buildFlags} ${buildArguments}`, { async: true });
|
|
||||||
}
|
|
||||||
|
|
||||||
if (publishRelease) {
|
|
||||||
try {
|
|
||||||
werft.phase("publish", "checking version semver compliance...");
|
|
||||||
if (!semver.valid(version)) {
|
|
||||||
// make this an explicit error as early as possible. Is required by helm Charts.yaml/version
|
|
||||||
throw new Error(
|
|
||||||
`'${version}' is not semver compliant and thus cannot be used for Self-Hosted releases!`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
werft.phase("publish", `preparing GitHub release files...`);
|
|
||||||
const releaseFilesTmpDir = exec("mktemp -d", { silent: true }).stdout.trim();
|
|
||||||
const releaseTarName = "release.tar.gz";
|
|
||||||
exec(
|
|
||||||
`leeway build --docker-build-options network=host --werft=true chart:release-tars -Dversion=${version} -DimageRepoBase=${imageRepo} --save ${releaseFilesTmpDir}/${releaseTarName}`,
|
|
||||||
);
|
|
||||||
exec(`cd ${releaseFilesTmpDir} && tar xzf ${releaseTarName} && rm -f ${releaseTarName}`);
|
|
||||||
|
|
||||||
werft.phase("publish", `publishing GitHub release ${version}...`);
|
|
||||||
const prereleaseFlag = semver.prerelease(version) !== null ? "-prerelease" : "";
|
|
||||||
const tag = `v${version}`;
|
|
||||||
const description = `Gitpod Self-Hosted ${version}<br/><br/>Docs: https://www.gitpod.io/docs/self-hosted/latest/self-hosted/`;
|
|
||||||
exec(
|
|
||||||
`github-release ${prereleaseFlag} gitpod-io/gitpod ${tag} ${releaseBranch} '${description}' "${releaseFilesTmpDir}/*"`,
|
|
||||||
);
|
|
||||||
|
|
||||||
werft.done("publish");
|
|
||||||
} catch (err) {
|
|
||||||
werft.fail("publish", err);
|
|
||||||
} finally {
|
|
||||||
exec(`gcloud auth activate-service-account --key-file "${GCLOUD_SERVICE_ACCOUNT_PATH}"`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
werft.rootSpan.setAttributes({ "preview.gitpod_built_successfully": true });
|
|
||||||
}
|
|
||||||
@ -1,5 +0,0 @@
|
|||||||
export const GCLOUD_SERVICE_ACCOUNT_PATH = "/mnt/secrets/gcp-sa/service-account.json";
|
|
||||||
export const CORE_DEV_KUBECONFIG_PATH = "/workspace/gitpod/kubeconfigs/core-dev";
|
|
||||||
export const HARVESTER_KUBECONFIG_PATH = "/workspace/gitpod/kubeconfigs/harvester";
|
|
||||||
export const PREVIEW_K3S_KUBECONFIG_PATH = "/workspace/gitpod/kubeconfigs/k3s";
|
|
||||||
export const GLOBAL_KUBECONFIG_PATH = process.env.HOME + "/.kube/config"
|
|
||||||
@ -1,155 +0,0 @@
|
|||||||
import * as fs from "fs";
|
|
||||||
import { exec } from "../../util/shell";
|
|
||||||
import { MonitoringSatelliteInstaller } from "../../observability/monitoring-satellite";
|
|
||||||
import { Werft } from "../../util/werft";
|
|
||||||
import { Analytics, JobConfig } from "./job-config";
|
|
||||||
import * as VM from "../../vm/vm";
|
|
||||||
import { Installer } from "./installer/installer";
|
|
||||||
import { ChildProcess, spawn } from 'child_process';
|
|
||||||
|
|
||||||
// used by Installer
|
|
||||||
const STACKDRIVER_SERVICEACCOUNT = JSON.parse(
|
|
||||||
fs.readFileSync(`/mnt/secrets/monitoring-satellite-stackdriver-credentials/credentials.json`, "utf8"),
|
|
||||||
);
|
|
||||||
|
|
||||||
const phases = {
|
|
||||||
DEPLOY: "deploy",
|
|
||||||
VM: "Ensure VM Readiness",
|
|
||||||
};
|
|
||||||
|
|
||||||
const installerSlices = {
|
|
||||||
INSTALL: "Generate, validate, and install Gitpod",
|
|
||||||
DEDICATED_PRESEED: "Preseed for Dedicated",
|
|
||||||
};
|
|
||||||
|
|
||||||
const vmSlices = {
|
|
||||||
VM_READINESS: "Waiting for VM readiness",
|
|
||||||
KUBECONFIG: "Getting kubeconfig",
|
|
||||||
};
|
|
||||||
|
|
||||||
interface DeploymentConfig {
|
|
||||||
version: string;
|
|
||||||
destname: string;
|
|
||||||
namespace: string;
|
|
||||||
domain: string;
|
|
||||||
monitoringDomain: string;
|
|
||||||
url: string;
|
|
||||||
cleanSlateDeployment: boolean;
|
|
||||||
installEELicense: boolean;
|
|
||||||
withObservability: boolean;
|
|
||||||
analytics: Analytics;
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function deployToPreviewEnvironment(werft: Werft, jobConfig: JobConfig) {
|
|
||||||
const { version, cleanSlateDeployment, withObservability, installEELicense, workspaceFeatureFlags } = jobConfig;
|
|
||||||
|
|
||||||
const { destname, namespace } = jobConfig.previewEnvironment;
|
|
||||||
|
|
||||||
const domain = `${destname}.preview.gitpod-dev.com`;
|
|
||||||
const monitoringDomain = `${destname}.preview.gitpod-dev.com`;
|
|
||||||
const url = `https://${domain}`;
|
|
||||||
|
|
||||||
const deploymentConfig: DeploymentConfig = {
|
|
||||||
version,
|
|
||||||
destname,
|
|
||||||
namespace,
|
|
||||||
domain,
|
|
||||||
monitoringDomain,
|
|
||||||
url,
|
|
||||||
cleanSlateDeployment,
|
|
||||||
installEELicense,
|
|
||||||
withObservability,
|
|
||||||
analytics: jobConfig.analytics,
|
|
||||||
};
|
|
||||||
|
|
||||||
// We set all attributes to false as default and only set it to true once the each process is complete.
|
|
||||||
// We only set the attribute for jobs where a VM is expected.
|
|
||||||
werft.rootSpan.setAttributes({ "preview.monitoring_installed_successfully": false });
|
|
||||||
|
|
||||||
werft.phase(phases.VM, "Ensuring VM is ready for deployment");
|
|
||||||
|
|
||||||
werft.log(vmSlices.VM_READINESS, "Wait for VM readiness");
|
|
||||||
VM.waitForVMReadiness({ name: destname, timeoutSeconds: 60 * 10, slice: vmSlices.VM_READINESS });
|
|
||||||
werft.done(vmSlices.VM_READINESS);
|
|
||||||
|
|
||||||
werft.log(vmSlices.KUBECONFIG, "Installing preview context");
|
|
||||||
await VM.installPreviewContext({ name: destname, slice: vmSlices.KUBECONFIG });
|
|
||||||
werft.done(vmSlices.KUBECONFIG);
|
|
||||||
|
|
||||||
werft.phase(phases.DEPLOY, "Deploying Gitpod and Observability Stack");
|
|
||||||
|
|
||||||
const installMonitoringSatellite = (async () => {
|
|
||||||
if (!jobConfig.withObservability) {
|
|
||||||
werft.log(installerSlices.INSTALL, "skipping installation of Observability Stack");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const sliceID = "Install monitoring satellite";
|
|
||||||
const monitoringSatelliteInstaller = new MonitoringSatelliteInstaller({
|
|
||||||
branch: jobConfig.observability.branch,
|
|
||||||
previewName: exec(`previewctl get name --branch=${jobConfig.repository.branch}`).stdout.trim(),
|
|
||||||
stackdriverServiceAccount: STACKDRIVER_SERVICEACCOUNT,
|
|
||||||
werft: werft,
|
|
||||||
});
|
|
||||||
try {
|
|
||||||
await monitoringSatelliteInstaller.install(sliceID);
|
|
||||||
werft.rootSpan.setAttributes({ "preview.monitoring_installed_successfully": true });
|
|
||||||
} catch (err) {
|
|
||||||
// Currently failing to install the monitoring-satellite stack shouldn't cause the job to fail
|
|
||||||
// so we only mark this single slice as failed.
|
|
||||||
werft.failSlice(sliceID, err);
|
|
||||||
}
|
|
||||||
})();
|
|
||||||
|
|
||||||
const installGitpod = (async () => {
|
|
||||||
const installer = new Installer({
|
|
||||||
werft: werft,
|
|
||||||
previewName: deploymentConfig.destname,
|
|
||||||
version: deploymentConfig.version,
|
|
||||||
analytics: deploymentConfig.analytics,
|
|
||||||
withEELicense: deploymentConfig.installEELicense,
|
|
||||||
workspaceFeatureFlags: workspaceFeatureFlags,
|
|
||||||
withDedicatedEmulation: jobConfig.withDedicatedEmulation,
|
|
||||||
});
|
|
||||||
try {
|
|
||||||
werft.log(installerSlices.INSTALL, "deploying using installer");
|
|
||||||
await installer.install(installerSlices.INSTALL);
|
|
||||||
exec(
|
|
||||||
`werft log result -d "dev installation" -c github-check-preview-env url https://${deploymentConfig.domain}/workspaces`,
|
|
||||||
);
|
|
||||||
} catch (err) {
|
|
||||||
werft.fail(installerSlices.INSTALL, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (jobConfig.withDedicatedEmulation) {
|
|
||||||
// After the installation is done, and everything is running, we need to prepare first-time access for the admin-user
|
|
||||||
let portForwardProcess: ChildProcess | undefined;
|
|
||||||
try {
|
|
||||||
werft.log(installerSlices.DEDICATED_PRESEED, "preseed for dedicated");
|
|
||||||
portForwardProcess = spawn("kubectl", ["port-forward", "deployment/server", "9000", "&"], {
|
|
||||||
shell: true,
|
|
||||||
detached: true,
|
|
||||||
stdio: "overlapped",
|
|
||||||
});
|
|
||||||
await new Promise(resolve => portForwardProcess.stdout.on('data', resolve)); // wait until process is running
|
|
||||||
|
|
||||||
const adminLoginOts = exec(`curl -X POST localhost:9000/admin-user/login-token/create`, { silent: true }).stdout.trim();
|
|
||||||
exec(
|
|
||||||
`werft log result "admin login OTS token" ${adminLoginOts}`,
|
|
||||||
);
|
|
||||||
exec(
|
|
||||||
`werft log result -d "admin-user login link" url https://${deploymentConfig.domain}/api/login/ots/admin-user/${adminLoginOts}`,
|
|
||||||
);
|
|
||||||
werft.log(installerSlices.DEDICATED_PRESEED, "done preseeding for dedicated.");
|
|
||||||
} catch (err) {
|
|
||||||
werft.fail(installerSlices.DEDICATED_PRESEED, err);
|
|
||||||
} finally {
|
|
||||||
if (portForwardProcess) {
|
|
||||||
portForwardProcess.kill("SIGINT");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})();
|
|
||||||
|
|
||||||
await Promise.all([installMonitoringSatellite, installGitpod]);
|
|
||||||
}
|
|
||||||
@ -1,40 +0,0 @@
|
|||||||
import { execStream } from "../../../util/shell";
|
|
||||||
import { Werft } from "../../../util/werft";
|
|
||||||
import { Analytics } from "../job-config";
|
|
||||||
import { CORE_DEV_KUBECONFIG_PATH, PREVIEW_K3S_KUBECONFIG_PATH } from "../const";
|
|
||||||
|
|
||||||
export type InstallerOptions = {
|
|
||||||
werft: Werft;
|
|
||||||
previewName: string;
|
|
||||||
version: string;
|
|
||||||
analytics?: Analytics;
|
|
||||||
withEELicense: boolean;
|
|
||||||
workspaceFeatureFlags: string[];
|
|
||||||
withDedicatedEmulation: boolean;
|
|
||||||
};
|
|
||||||
|
|
||||||
export class Installer {
|
|
||||||
options: InstallerOptions;
|
|
||||||
|
|
||||||
constructor(options: InstallerOptions) {
|
|
||||||
this.options = options;
|
|
||||||
}
|
|
||||||
|
|
||||||
async install(slice: string): Promise<void> {
|
|
||||||
const environment = {
|
|
||||||
VERSION: this.options.version,
|
|
||||||
DEV_KUBE_PATH: CORE_DEV_KUBECONFIG_PATH,
|
|
||||||
DEV_KUBE_CONTEXT: "dev",
|
|
||||||
PREVIEW_K3S_KUBE_PATH: PREVIEW_K3S_KUBECONFIG_PATH,
|
|
||||||
PREVIEW_NAME: this.options.previewName,
|
|
||||||
GITPOD_ANALYTICS: this.options.analytics,
|
|
||||||
GITPOD_WORKSPACE_FEATURE_FLAGS: this.options.workspaceFeatureFlags.join(" "),
|
|
||||||
GITPOD_WITH_DEDICATED_EMU: this.options.withDedicatedEmulation,
|
|
||||||
};
|
|
||||||
const variables = Object.entries(environment)
|
|
||||||
.map(([key, value]) => `${key}="${value}"`)
|
|
||||||
.join(" ");
|
|
||||||
await execStream(`${variables} leeway run dev/preview:deploy-gitpod`, { slice: slice });
|
|
||||||
this.options.werft.done(slice);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,263 +0,0 @@
|
|||||||
import {exec} from "../../util/shell";
|
|
||||||
import {Werft} from "../../util/werft";
|
|
||||||
import {previewNameFromBranchName} from "../../util/preview";
|
|
||||||
|
|
||||||
type IdeIntegrationTests = "ide" | "jetbrains" | "ssh" | "vscode";
|
|
||||||
type WithIntegrationTests = "skip" | "all" | "workspace" | "webapp" | IdeIntegrationTests
|
|
||||||
|
|
||||||
export type Analytics = "skip" | "segment";
|
|
||||||
|
|
||||||
export interface JobConfig {
|
|
||||||
analytics: Analytics;
|
|
||||||
buildConfig: any;
|
|
||||||
cleanSlateDeployment: boolean;
|
|
||||||
cluster: string;
|
|
||||||
coverageOutput: string;
|
|
||||||
dontTest: boolean;
|
|
||||||
fromVersion: string;
|
|
||||||
installEELicense: boolean;
|
|
||||||
localAppVersion: string;
|
|
||||||
mainBuild: boolean;
|
|
||||||
withPreview: boolean;
|
|
||||||
publishRelease: boolean;
|
|
||||||
publishToJBMarketplace: boolean;
|
|
||||||
publishToNpm: string;
|
|
||||||
storage: string;
|
|
||||||
storageClass: string;
|
|
||||||
version: string;
|
|
||||||
withContrib: boolean;
|
|
||||||
withIntegrationTests: WithIntegrationTests;
|
|
||||||
withUpgradeTests: boolean;
|
|
||||||
withSelfHostedPreview: boolean;
|
|
||||||
withObservability: boolean;
|
|
||||||
withLocalPreview: boolean;
|
|
||||||
withDedicatedEmulation: boolean;
|
|
||||||
workspaceFeatureFlags: string[];
|
|
||||||
previewEnvironment: PreviewEnvironmentConfig;
|
|
||||||
repository: Repository;
|
|
||||||
observability: Observability;
|
|
||||||
withLargeVM: boolean;
|
|
||||||
certIssuer: string;
|
|
||||||
recreatePreview: boolean;
|
|
||||||
recreateVm: boolean;
|
|
||||||
withWerft: boolean;
|
|
||||||
withGceVm: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface PreviewEnvironmentConfig {
|
|
||||||
destname: string;
|
|
||||||
namespace: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface Repository {
|
|
||||||
owner: string;
|
|
||||||
repo: string;
|
|
||||||
ref: string;
|
|
||||||
branch: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface Observability {
|
|
||||||
// The branch of gitpod-io/observability to use
|
|
||||||
branch: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function jobConfig(werft: Werft, context: any): JobConfig {
|
|
||||||
const sliceId = "Parsing job configuration";
|
|
||||||
werft.phase("Job configuration");
|
|
||||||
werft.log(sliceId, "Parsing the job configuration");
|
|
||||||
const version = parseVersion(context);
|
|
||||||
const repo = `${context.Repository.host}/${context.Repository.owner}/${context.Repository.repo}`;
|
|
||||||
const mainBuild = repo === "github.com/gitpod-io/gitpod" && context.Repository.ref.includes("refs/heads/main");
|
|
||||||
|
|
||||||
let buildConfig = context.Annotations || {};
|
|
||||||
const dontTest = "no-test" in buildConfig;
|
|
||||||
const publishRelease = "publish-release" in buildConfig;
|
|
||||||
const workspaceFeatureFlags: string[] = ((): string[] => {
|
|
||||||
const raw: string = buildConfig["ws-feature-flags"] || "";
|
|
||||||
if (!raw) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
return raw.split(",").map((e) => e.trim());
|
|
||||||
})();
|
|
||||||
|
|
||||||
const coverageOutput = exec("mktemp -d", {silent: true}).stdout.trim();
|
|
||||||
|
|
||||||
// Main build should only contain the annotations below:
|
|
||||||
// ['with-contrib', 'publish-to-npm', 'publish-to-jb-marketplace', 'with-clean-slate-deployment']
|
|
||||||
const withContrib = "with-contrib" in buildConfig || mainBuild;
|
|
||||||
const storage = buildConfig["storage"] || "";
|
|
||||||
const withUpgradeTests = "with-upgrade-tests" in buildConfig && !mainBuild;
|
|
||||||
const fromVersion = withUpgradeTests ? buildConfig["from-version"] : "";
|
|
||||||
const cluster = buildConfig["cluster"];
|
|
||||||
const withSelfHostedPreview = "with-sh-preview" in buildConfig;
|
|
||||||
const publishToNpm = "publish-to-npm" in buildConfig || mainBuild;
|
|
||||||
const publishToJBMarketplace = "publish-to-jb-marketplace" in buildConfig || mainBuild;
|
|
||||||
|
|
||||||
const localAppVersion = mainBuild || "with-localapp-version" in buildConfig ? version : "unknown";
|
|
||||||
const cleanSlateDeployment = mainBuild || "with-clean-slate-deployment" in buildConfig;
|
|
||||||
const installEELicense = !("without-ee-license" in buildConfig) || mainBuild;
|
|
||||||
const withObservability = "with-observability" in buildConfig && !mainBuild;
|
|
||||||
const withLargeVM = "with-large-vm" in buildConfig && !mainBuild;
|
|
||||||
const withLocalPreview = "with-local-preview" in buildConfig || mainBuild
|
|
||||||
const recreatePreview = "recreate-preview" in buildConfig
|
|
||||||
const recreateVm = mainBuild || "recreate-vm" in buildConfig;
|
|
||||||
const withDedicatedEmulation = "with-dedicated-emulation" in buildConfig && !mainBuild;
|
|
||||||
const storageClass = buildConfig["storage-class"] || "";
|
|
||||||
|
|
||||||
const analytics = parseAnalytics(werft, sliceId, buildConfig["analytics"])
|
|
||||||
const withIntegrationTests = parseWithIntegrationTests(werft, sliceId, buildConfig["with-integration-tests"]);
|
|
||||||
const withPreview = decideWithPreview({werft, sliceID: sliceId, buildConfig, mainBuild, withIntegrationTests})
|
|
||||||
const withWerft = "with-werft" in buildConfig;
|
|
||||||
const withGceVm = "with-gce-vm" in buildConfig;
|
|
||||||
|
|
||||||
switch (buildConfig["cert-issuer"]) {
|
|
||||||
case "zerossl":
|
|
||||||
buildConfig["cert-issuer"] = "zerossl-issuer-gitpod-core-dev"
|
|
||||||
break
|
|
||||||
case "letsencrypt":
|
|
||||||
default:
|
|
||||||
buildConfig["cert-issuer"] = "letsencrypt-issuer-gitpod-core-dev"
|
|
||||||
}
|
|
||||||
const certIssuer = buildConfig["cert-issuer"];
|
|
||||||
|
|
||||||
const repository: Repository = {
|
|
||||||
owner: context.Repository.owner,
|
|
||||||
repo: context.Repository.repo,
|
|
||||||
ref: context.Repository.ref,
|
|
||||||
branch: context.Repository.ref,
|
|
||||||
};
|
|
||||||
const refsPrefix = "refs/heads/";
|
|
||||||
if (repository.branch.startsWith(refsPrefix)) {
|
|
||||||
repository.branch = repository.branch.substring(refsPrefix.length);
|
|
||||||
}
|
|
||||||
|
|
||||||
const previewName = previewNameFromBranchName(repository.branch);
|
|
||||||
const previewEnvironmentNamespace = `default`;
|
|
||||||
const previewEnvironment = {
|
|
||||||
destname: previewName,
|
|
||||||
namespace: previewEnvironmentNamespace,
|
|
||||||
};
|
|
||||||
|
|
||||||
const observability: Observability = {
|
|
||||||
branch: context.Annotations.withObservabilityBranch || "main",
|
|
||||||
};
|
|
||||||
|
|
||||||
const jobConfig = {
|
|
||||||
analytics,
|
|
||||||
buildConfig,
|
|
||||||
cleanSlateDeployment,
|
|
||||||
cluster,
|
|
||||||
coverageOutput,
|
|
||||||
dontTest,
|
|
||||||
fromVersion,
|
|
||||||
installEELicense,
|
|
||||||
localAppVersion,
|
|
||||||
mainBuild,
|
|
||||||
withPreview,
|
|
||||||
observability,
|
|
||||||
previewEnvironment,
|
|
||||||
publishRelease,
|
|
||||||
publishToJBMarketplace,
|
|
||||||
publishToNpm,
|
|
||||||
repository,
|
|
||||||
storage,
|
|
||||||
storageClass,
|
|
||||||
version,
|
|
||||||
withContrib,
|
|
||||||
withIntegrationTests,
|
|
||||||
withObservability,
|
|
||||||
withUpgradeTests,
|
|
||||||
withSelfHostedPreview,
|
|
||||||
withLocalPreview,
|
|
||||||
workspaceFeatureFlags,
|
|
||||||
withLargeVM,
|
|
||||||
certIssuer,
|
|
||||||
recreatePreview,
|
|
||||||
recreateVm,
|
|
||||||
withWerft,
|
|
||||||
withDedicatedEmulation,
|
|
||||||
withGceVm,
|
|
||||||
};
|
|
||||||
|
|
||||||
werft.logOutput(sliceId, JSON.stringify(jobConfig, null, 2));
|
|
||||||
werft.log(sliceId, "Expand to see the parsed configuration");
|
|
||||||
const globalAttributes = Object.fromEntries(
|
|
||||||
Object.entries(jobConfig).map((kv) => {
|
|
||||||
const [key, value] = kv;
|
|
||||||
return [`werft.job.config.${key}`, value];
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
globalAttributes["werft.job.config.branch"] = context.Repository.ref;
|
|
||||||
werft.addAttributes(globalAttributes);
|
|
||||||
werft.done(sliceId);
|
|
||||||
|
|
||||||
return jobConfig;
|
|
||||||
}
|
|
||||||
|
|
||||||
function parseVersion(context: any) {
|
|
||||||
let buildConfig = context.Annotations || {};
|
|
||||||
const explicitVersion = buildConfig.version;
|
|
||||||
if (explicitVersion) {
|
|
||||||
return explicitVersion;
|
|
||||||
}
|
|
||||||
let version = context.Name;
|
|
||||||
const PREFIX_TO_STRIP = "gitpod-build-";
|
|
||||||
if (version.substr(0, PREFIX_TO_STRIP.length) === PREFIX_TO_STRIP) {
|
|
||||||
version = version.substr(PREFIX_TO_STRIP.length);
|
|
||||||
}
|
|
||||||
return version;
|
|
||||||
}
|
|
||||||
|
|
||||||
function decideWithPreview(options: { werft: Werft, sliceID: string, buildConfig: any, mainBuild: boolean, withIntegrationTests: WithIntegrationTests }) {
|
|
||||||
const {werft, sliceID, buildConfig, mainBuild, withIntegrationTests} = options
|
|
||||||
if (mainBuild) {
|
|
||||||
werft.log(sliceID, "with-preview is disabled for main builds")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if ("with-preview" in buildConfig) {
|
|
||||||
werft.log(sliceID, "with-preview is enabled as it was passed as a Werft annotation")
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if (withIntegrationTests != "skip") {
|
|
||||||
werft.log(sliceID, "with-preview is enabled as the with-integration-tests Werft annotation was used")
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
export function parseAnalytics(werft: Werft, sliceId: string, value: string): Analytics {
|
|
||||||
switch (value) {
|
|
||||||
case "segment":
|
|
||||||
return "segment"
|
|
||||||
}
|
|
||||||
|
|
||||||
werft.log(sliceId, "Analytics is not enabled")
|
|
||||||
return "skip";
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
export function parseWithIntegrationTests(werft: Werft, sliceID: string, value?: string): WithIntegrationTests {
|
|
||||||
switch (value) {
|
|
||||||
case null:
|
|
||||||
case undefined:
|
|
||||||
werft.log(sliceID, "with-integration-tests was not set - will use 'skip'");
|
|
||||||
return "skip";
|
|
||||||
case "skip":
|
|
||||||
case "all":
|
|
||||||
case "webapp":
|
|
||||||
case "ide":
|
|
||||||
case "jetbrains":
|
|
||||||
case "vscode":
|
|
||||||
case "ssh":
|
|
||||||
case "workspace":
|
|
||||||
case "webapp":
|
|
||||||
return value;
|
|
||||||
case "":
|
|
||||||
werft.log(sliceID, "with-integration-tests was set but no value was provided - falling back to 'all'");
|
|
||||||
return "all";
|
|
||||||
default:
|
|
||||||
werft.log(sliceID, `Unknown value for with-integration-tests: '${value}' - falling back to 'all'`);
|
|
||||||
return "all";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,8 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
data:
|
|
||||||
providerOptions: eyJzaXRlIjogImdpdHBvZC10ZXN0IiwiYXBpX2tleSI6ICJ0ZXN0X1hheTY0eVJYY2RHR2N1NG1haVhlSTNsY3VZNXlzTmVIWlFwIn0=
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: chargebee-config
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
type: Opaque
|
|
||||||
@ -1,17 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: stripe-config
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
data:
|
|
||||||
config: |
|
|
||||||
{
|
|
||||||
"individualUsagePriceIds": {
|
|
||||||
"EUR": "price_1LmYVxGadRXm50o3AiLq0Qmo",
|
|
||||||
"USD": "price_1LmYWRGadRXm50o3Ym8PLqnG"
|
|
||||||
},
|
|
||||||
"teamUsagePriceIds": {
|
|
||||||
"EUR": "price_1LmYVxGadRXm50o3AiLq0Qmo",
|
|
||||||
"USD": "price_1LmYWRGadRXm50o3Ym8PLqnG"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,89 +0,0 @@
|
|||||||
import {execStream} from "../../util/shell";
|
|
||||||
import {Werft} from "../../util/werft";
|
|
||||||
import {GCLOUD_SERVICE_ACCOUNT_PATH} from "./const";
|
|
||||||
import {JobConfig} from "./job-config";
|
|
||||||
import {vmExists} from "../../vm/vm";
|
|
||||||
import {configureAccess, configureDocker} from "../../util/preview";
|
|
||||||
|
|
||||||
const phaseName = "prepare";
|
|
||||||
const prepareSlices = {
|
|
||||||
CONFIGURE_K8S: "Configuring k8s access.",
|
|
||||||
CONFIGURE_CORE_DEV: "Configuring core-dev access.",
|
|
||||||
BOOT_VM: "Booting VM.",
|
|
||||||
};
|
|
||||||
|
|
||||||
export async function prepare(werft: Werft, config: JobConfig) {
|
|
||||||
werft.phase(phaseName);
|
|
||||||
try {
|
|
||||||
werft.log(prepareSlices.CONFIGURE_CORE_DEV, prepareSlices.CONFIGURE_CORE_DEV);
|
|
||||||
await configureAccess(werft)
|
|
||||||
configureDocker();
|
|
||||||
werft.done(prepareSlices.CONFIGURE_CORE_DEV);
|
|
||||||
if (!config.withPreview) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
await decideHarvesterVMCreation(werft, config);
|
|
||||||
} catch (err) {
|
|
||||||
werft.fail(phaseName, err);
|
|
||||||
}
|
|
||||||
werft.done(phaseName);
|
|
||||||
}
|
|
||||||
|
|
||||||
async function decideHarvesterVMCreation(werft: Werft, config: JobConfig) {
|
|
||||||
// always try to create - usually it will be no-op, but if tf changed for any reason we would reconcile
|
|
||||||
if (config.withPreview && (!vmExists({name: config.previewEnvironment.destname}) || config.cleanSlateDeployment || config.recreatePreview || config.recreateVm)) {
|
|
||||||
await createVM(werft, config);
|
|
||||||
}
|
|
||||||
werft.done(prepareSlices.BOOT_VM);
|
|
||||||
}
|
|
||||||
|
|
||||||
// createVM only triggers the VM creation.
|
|
||||||
// Readiness is not guaranted.
|
|
||||||
async function createVM(werft: Werft, config: JobConfig) {
|
|
||||||
const infra = config.withGceVm ? "gce" : "harvester"
|
|
||||||
const replace = config.withGceVm ? "module.preview_gce[0].google_compute_instance.default" : "module.preview_harvester[0].harvester_virtualmachine.harvester"
|
|
||||||
|
|
||||||
const environment = {
|
|
||||||
// We pass the GCP credentials explicitly, otherwise for some reason TF doesn't pick them up
|
|
||||||
"GOOGLE_BACKEND_CREDENTIALS": GCLOUD_SERVICE_ACCOUNT_PATH,
|
|
||||||
"GOOGLE_APPLICATION_CREDENTIALS": GCLOUD_SERVICE_ACCOUNT_PATH,
|
|
||||||
"TF_VAR_cert_issuer": config.certIssuer,
|
|
||||||
"TF_VAR_preview_name": config.previewEnvironment.destname,
|
|
||||||
"TF_VAR_with_large_vm": `${config.withLargeVM}`,
|
|
||||||
"TF_VAR_infra_provider": `${infra}`,
|
|
||||||
}
|
|
||||||
|
|
||||||
if (config.storageClass.length > 0) {
|
|
||||||
environment["TF_VAR_vm_storage_class"] = config.storageClass
|
|
||||||
}
|
|
||||||
|
|
||||||
const variables = Object
|
|
||||||
.entries(environment)
|
|
||||||
.filter(([_, value]) => value.length > 0)
|
|
||||||
.map(([key, value]) => `${key}="${value}"`)
|
|
||||||
.join(" ")
|
|
||||||
|
|
||||||
if (config.recreatePreview) {
|
|
||||||
werft.log(prepareSlices.BOOT_VM, "Recreating environment");
|
|
||||||
await execStream(`${variables} \
|
|
||||||
leeway run dev/preview:delete-preview`, {slice: prepareSlices.BOOT_VM});
|
|
||||||
} else if (config.cleanSlateDeployment || config.recreateVm) {
|
|
||||||
werft.log(prepareSlices.BOOT_VM, "Cleaning previously created VM");
|
|
||||||
// -replace=... forces recreation of the resource
|
|
||||||
await execStream(`${variables} \
|
|
||||||
TF_CLI_ARGS_plan=-replace=${replace} \
|
|
||||||
leeway run dev/preview:create-preview`, {slice: prepareSlices.BOOT_VM});
|
|
||||||
}
|
|
||||||
|
|
||||||
werft.log(prepareSlices.BOOT_VM, "Creating VM");
|
|
||||||
|
|
||||||
try {
|
|
||||||
await execStream(`${variables} leeway run dev/preview:create-preview`, {slice: prepareSlices.BOOT_VM});
|
|
||||||
} catch (err) {
|
|
||||||
werft.currentPhaseSpan.setAttribute("preview.created_vm", false);
|
|
||||||
werft.fail(prepareSlices.BOOT_VM, new Error(`Failed creating VM: ${err}`))
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
werft.currentPhaseSpan.setAttribute("preview.created_vm", true);
|
|
||||||
}
|
|
||||||
@ -1,76 +0,0 @@
|
|||||||
import { exec } from "../../util/shell";
|
|
||||||
import { Werft } from "../../util/werft";
|
|
||||||
import { JobConfig } from "./job-config";
|
|
||||||
|
|
||||||
export async function validateChanges(werft: Werft, config: JobConfig) {
|
|
||||||
werft.phase("validate-changes", "validating changes");
|
|
||||||
// We run pre-commit checks first to avoid potential race conditions with the
|
|
||||||
// other validation checks.
|
|
||||||
await preCommitCheck(werft);
|
|
||||||
await Promise.all([
|
|
||||||
branchNameCheck(werft, config),
|
|
||||||
typecheckWerftJobs(werft),
|
|
||||||
leewayVet(werft),
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Branch names cannot be longer than 45 characters.
|
|
||||||
//
|
|
||||||
// The branch name is used as part of the Werft job name. The job name is used for the name of the pod
|
|
||||||
// and k8s has a limit of 63 characters. We use 13 characters for the "gitpod-build-" prefix and 5
|
|
||||||
// more for the ".<BUILD NUMBER>" ending. That leaves us 45 characters for the branch name.
|
|
||||||
// See Werft source https://github.com/csweichel/werft/blob/057cfae0fd7bb1a7b05f89d1b162348378d74e71/pkg/werft/service.go#L376
|
|
||||||
async function branchNameCheck(werft: Werft, config: JobConfig) {
|
|
||||||
if (config.withPreview) {
|
|
||||||
const maxBranchNameLength = 45;
|
|
||||||
werft.log("check-branchname", `Checking if branch name is shorter than ${maxBranchNameLength} characters.`);
|
|
||||||
|
|
||||||
if (config.previewEnvironment.destname.length > maxBranchNameLength) {
|
|
||||||
throw new Error(
|
|
||||||
`The branch name ${config.previewEnvironment.destname} is more than ${maxBranchNameLength} character. Please choose a shorter name!`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
werft.done("check-branchname");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function preCommitCheck(werft: Werft) {
|
|
||||||
werft.log("pre-commit checks", "Running pre-commit hooks.");
|
|
||||||
const preCommitCmd = await exec(`pre-commit run --show-diff-on-failure`, {
|
|
||||||
slice: "pre-commit checks",
|
|
||||||
async: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (preCommitCmd.code != 0) {
|
|
||||||
throw new Error(preCommitCmd.stderr.toString().trim());
|
|
||||||
}
|
|
||||||
werft.done("pre-commit checks");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This validates all the .ts files inside of the .werft folder and fails the
|
|
||||||
* build if there are compile errors.
|
|
||||||
*/
|
|
||||||
export async function typecheckWerftJobs(werft: Werft) {
|
|
||||||
const slice = "tsc --noEmit";
|
|
||||||
try {
|
|
||||||
werft.log(slice, "Typechecking Werft Typescript files");
|
|
||||||
await exec("cd .werft && tsc --noEmit", { slice, async: true });
|
|
||||||
werft.log(slice, "No compilation errors");
|
|
||||||
} catch (e) {
|
|
||||||
werft.fail(slice, e);
|
|
||||||
}
|
|
||||||
werft.done(slice);
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function leewayVet(werft: Werft) {
|
|
||||||
const slice = "leeway vet --ignore-warnings"
|
|
||||||
try {
|
|
||||||
werft.log(slice, "Running leeway vet")
|
|
||||||
await exec(`leeway vet --ignore-warnings`, {slice, async: true});
|
|
||||||
werft.log(slice, "leeway vet successful")
|
|
||||||
} catch (e) {
|
|
||||||
werft.fail(slice, e)
|
|
||||||
}
|
|
||||||
werft.done(slice);
|
|
||||||
}
|
|
||||||
@ -1,35 +0,0 @@
|
|||||||
import { execStream } from "../util/shell";
|
|
||||||
import { Werft } from "../util/werft";
|
|
||||||
import { CORE_DEV_KUBECONFIG_PATH, PREVIEW_K3S_KUBECONFIG_PATH } from "../jobs/build/const";
|
|
||||||
|
|
||||||
type MonitoringSatelliteInstallerOptions = {
|
|
||||||
werft: Werft;
|
|
||||||
branch: string;
|
|
||||||
previewName: string;
|
|
||||||
stackdriverServiceAccount: any;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Installs monitoring-satellite, while updating its dependencies to the latest commit in the branch it is running.
|
|
||||||
*/
|
|
||||||
export class MonitoringSatelliteInstaller {
|
|
||||||
constructor(private readonly options: MonitoringSatelliteInstallerOptions) {}
|
|
||||||
|
|
||||||
public async install(slice: string) {
|
|
||||||
const environment = {
|
|
||||||
DEV_KUBE_PATH: CORE_DEV_KUBECONFIG_PATH,
|
|
||||||
DEV_KUBE_CONTEXT: "dev",
|
|
||||||
PREVIEW_K3S_KUBE_PATH: PREVIEW_K3S_KUBECONFIG_PATH,
|
|
||||||
PREVIEW_NAME: this.options.previewName,
|
|
||||||
};
|
|
||||||
const variables = Object.entries(environment)
|
|
||||||
.map(([key, value]) => `${key}="${value}"`)
|
|
||||||
.join(" ");
|
|
||||||
this.options.werft.log(slice, `Installing observability stack - Branch: ${this.options.branch}`);
|
|
||||||
await execStream(`${variables} leeway run dev/preview:deploy-monitoring-satellite`, {
|
|
||||||
slice: slice,
|
|
||||||
});
|
|
||||||
this.options.werft.log(slice, "Succeeded installing monitoring satellite");
|
|
||||||
this.options.werft.done(slice);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,54 +0,0 @@
|
|||||||
import { Metadata, credentials } from "@grpc/grpc-js";
|
|
||||||
import { NodeSDK } from "@opentelemetry/sdk-node";
|
|
||||||
import { getNodeAutoInstrumentations } from "@opentelemetry/auto-instrumentations-node";
|
|
||||||
import { Resource } from "@opentelemetry/resources";
|
|
||||||
import { SemanticResourceAttributes } from "@opentelemetry/semantic-conventions";
|
|
||||||
import { CollectorTraceExporter } from "@opentelemetry/exporter-collector-grpc";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialize tracing and wait for it to be ready.
|
|
||||||
*
|
|
||||||
* Registers a beforeExit event handler to gracefully flush traces upon exit.
|
|
||||||
*/
|
|
||||||
export async function initialize() {
|
|
||||||
const metadata = new Metadata();
|
|
||||||
metadata.set("x-honeycomb-team", process.env.HONEYCOMB_API_KEY);
|
|
||||||
metadata.set("x-honeycomb-dataset", process.env.HONEYCOMB_DATASET);
|
|
||||||
const traceExporter = new CollectorTraceExporter({
|
|
||||||
url: "grpc://api.honeycomb.io:443/",
|
|
||||||
credentials: credentials.createSsl(),
|
|
||||||
metadata,
|
|
||||||
});
|
|
||||||
|
|
||||||
const sdk = new NodeSDK({
|
|
||||||
resource: new Resource({
|
|
||||||
[SemanticResourceAttributes.SERVICE_NAME]: "werft",
|
|
||||||
}),
|
|
||||||
traceExporter,
|
|
||||||
instrumentations: [getNodeAutoInstrumentations()],
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log("Initializing tracing");
|
|
||||||
try {
|
|
||||||
await sdk.start();
|
|
||||||
} catch (err) {
|
|
||||||
console.log("Error initializing tracing", err);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
let didFlushTraces = false;
|
|
||||||
process.on("beforeExit", (code) => {
|
|
||||||
const sliceID = "tracing shutdown";
|
|
||||||
if (!didFlushTraces) {
|
|
||||||
console.log(`[${sliceID}] About to exit with code ${code}. Shutting down tracing.`);
|
|
||||||
didFlushTraces = true;
|
|
||||||
sdk.shutdown()
|
|
||||||
.then(() => console.log(`[${sliceID}] Tracing terminated`))
|
|
||||||
.catch((error) => console.log(`[${sliceID}] Error terminating tracing`, error));
|
|
||||||
} else {
|
|
||||||
console.log(
|
|
||||||
`[${sliceID}] About to exit with code ${code}. Traces already flushed, no further action needed.`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
{
|
|
||||||
"private": true,
|
|
||||||
"name": "@gitpod/build",
|
|
||||||
"version": "0.0.0",
|
|
||||||
"license": "UNLICENSED",
|
|
||||||
"scripts": {
|
|
||||||
"run": "npx ts-node build.ts"
|
|
||||||
},
|
|
||||||
"dependencies": {
|
|
||||||
"@google-cloud/dns": "^2.2.4",
|
|
||||||
"@grpc/grpc-js": "1.8.8",
|
|
||||||
"@opentelemetry/api": "^1.0.3",
|
|
||||||
"@opentelemetry/auto-instrumentations-node": "^0.26.0",
|
|
||||||
"@opentelemetry/exporter-collector-grpc": "^0.25.0",
|
|
||||||
"@opentelemetry/sdk-node": "^0.26.0",
|
|
||||||
"semver": "7.3.5",
|
|
||||||
"shelljs": "^0.8.4",
|
|
||||||
"ts-node": "^10.4.0",
|
|
||||||
"typescript": "~4.4.2"
|
|
||||||
},
|
|
||||||
"devDependencies": {
|
|
||||||
"@types/node": "^16.11.0",
|
|
||||||
"@types/semver": "7.3.5",
|
|
||||||
"@types/shelljs": "^0.8.8",
|
|
||||||
"prettier": "2.6.2",
|
|
||||||
"tslib": "^2.3.0",
|
|
||||||
"typescript": "~4.4.2"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,5 +0,0 @@
|
|||||||
title=":x: *Workspace integration test failed*"
|
|
||||||
body=$(grep "\-\-\- FAIL: " entrypoing.sh.log)
|
|
||||||
echo "${title}"
|
|
||||||
echo "${body}"
|
|
||||||
# echo "[int-tests|FAIL]"
|
|
||||||
@ -1,81 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
#
|
|
||||||
# This script iterates over the "dev/workload: builds" nodes and cordons them if
|
|
||||||
# their disk usage is higher than DISK_USED_THRESHOLD.
|
|
||||||
#
|
|
||||||
# The easiest way to run this script is through Werft so you don't have to worry
|
|
||||||
# about installing the appropraite service account etc. locally.
|
|
||||||
#
|
|
||||||
# werft job run github -j .werft/platform-trigger-werft-cleanup.yaml -s .werft/platform-trigger-werft-cleanup.sh
|
|
||||||
#
|
|
||||||
|
|
||||||
sleep 1
|
|
||||||
|
|
||||||
set -Eeuo pipefail
|
|
||||||
|
|
||||||
DISK_USED_THRESHOLD=80
|
|
||||||
|
|
||||||
function cordon-node-if-almost-full {
|
|
||||||
local node="$1"
|
|
||||||
local zone disk_used_pct
|
|
||||||
local slice_id="Cleanup up node $node"
|
|
||||||
|
|
||||||
zone="$(
|
|
||||||
kubectl get node "${node}" -o json \
|
|
||||||
| jq -r '.metadata.labels["topology.kubernetes.io/zone"]'
|
|
||||||
)"
|
|
||||||
|
|
||||||
echo "Checking disk usage of /dev/sdb" | werft log slice "$slice_id"
|
|
||||||
disk_used_pct=$(
|
|
||||||
gcloud compute ssh \
|
|
||||||
--project "gitpod-core-dev" \
|
|
||||||
--zone "$zone" \
|
|
||||||
--command="df /dev/sdb --output=pcent | tail -n1 | cut -d'%' -f1" \
|
|
||||||
"${node}" 2>&1 \
|
|
||||||
| tail -n1 \
|
|
||||||
| tr -d '[:space:]'
|
|
||||||
)
|
|
||||||
echo "The disk is ${disk_used_pct}% full" | werft log slice "$slice_id"
|
|
||||||
|
|
||||||
if [ "$disk_used_pct" -gt "$DISK_USED_THRESHOLD" ]; then
|
|
||||||
echo "${disk_used_pct} is greater than ${DISK_USED_THRESHOLD}. Cordining node" | werft log slice "$slice_id"
|
|
||||||
kubectl cordon "$node" | werft log slice "$slice_id"
|
|
||||||
|
|
||||||
if [[ "${node}" =~ "builds-static" ]]; then
|
|
||||||
echo "Cleaning up static node [${node}]"
|
|
||||||
while ! is_node_empty "${node}";do
|
|
||||||
echo "Node is not empty yet. Sleeping for 15 seconds." | werft log slice "$slice_id"
|
|
||||||
sleep 15
|
|
||||||
done
|
|
||||||
|
|
||||||
kubectl drain "${node}" --delete-emptydir-data --force --ignore-daemonsets=true --grace-period=120 | werft log slice "$slice_id"
|
|
||||||
|
|
||||||
gcloud compute instances delete "${node}" --zone="${zone}" -q | werft log slice "$slice_id"
|
|
||||||
kubectl uncordon "${node}" | werft log slice "$slice_id"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "${disk_used_pct} is less than the trehold of ${DISK_USED_THRESHOLD}. Skipping node" | werft log slice "$slice_id"
|
|
||||||
fi
|
|
||||||
|
|
||||||
werft log slice "$slice_id" --done
|
|
||||||
}
|
|
||||||
|
|
||||||
function is_node_empty {
|
|
||||||
local node=$1
|
|
||||||
pods=$(kubectl -n werft get pods -o wide --field-selector spec.nodeName="${node}" 2>&1)
|
|
||||||
if [[ "${pods}" == "No resources found in werft namespace." ]]; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Activate service account and install core-dev context
|
|
||||||
gcloud auth activate-service-account --key-file "/mnt/secrets/gcp-sa/service-account.json"
|
|
||||||
gcloud container clusters get-credentials core-dev --zone europe-west1-b --project gitpod-core-dev
|
|
||||||
|
|
||||||
echo "[Process nodes|PHASE] Processing each build node"
|
|
||||||
nodes=$(kubectl get nodes -l dev/workload=builds --no-headers -o custom-columns=":metadata.name")
|
|
||||||
for node in $nodes ; do
|
|
||||||
cordon-node-if-almost-full "$node"
|
|
||||||
done
|
|
||||||
@ -1,37 +0,0 @@
|
|||||||
# This job is responsible for cordoning Werft build nodes if their disk is almost full.
|
|
||||||
# It runs periodically but you can always manually trigger it using:
|
|
||||||
#
|
|
||||||
# werft job run github -j .werft/platform-trigger-werft-cleanup.yaml
|
|
||||||
#
|
|
||||||
pod:
|
|
||||||
serviceAccount: werft
|
|
||||||
restartPolicy: Never
|
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: dev/workload
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- "builds"
|
|
||||||
volumes:
|
|
||||||
# Needed to talk to the core-dev cluster and SSH to underlying instances
|
|
||||||
- name: gcp-sa
|
|
||||||
secret:
|
|
||||||
secretName: gcp-sa-gitpod-dev-deployer
|
|
||||||
containers:
|
|
||||||
- name: build
|
|
||||||
image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:aledbf-oci-tool-gha.14121
|
|
||||||
workingDir: /workspace
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
volumeMounts:
|
|
||||||
- name: gcp-sa
|
|
||||||
mountPath: /mnt/secrets/gcp-sa
|
|
||||||
readOnly: true
|
|
||||||
command:
|
|
||||||
- bash
|
|
||||||
- .werft/platform-trigger-werft-cleanup.sh
|
|
||||||
|
|
||||||
plugins:
|
|
||||||
cron: "15 * * * *"
|
|
||||||
@ -1,8 +0,0 @@
|
|||||||
{
|
|
||||||
"compilerOptions": {
|
|
||||||
"target": "es2019",
|
|
||||||
"skipLibCheck": true,
|
|
||||||
"moduleResolution": "node",
|
|
||||||
"noUnusedLocals": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,145 +0,0 @@
|
|||||||
import {createHash} from "crypto";
|
|
||||||
import * as VM from "../vm/vm";
|
|
||||||
import {exec, execStream} from "./shell";
|
|
||||||
import {Werft} from "./werft";
|
|
||||||
import {
|
|
||||||
CORE_DEV_KUBECONFIG_PATH,
|
|
||||||
GCLOUD_SERVICE_ACCOUNT_PATH,
|
|
||||||
GLOBAL_KUBECONFIG_PATH,
|
|
||||||
HARVESTER_KUBECONFIG_PATH
|
|
||||||
} from "../jobs/build/const";
|
|
||||||
|
|
||||||
const SLICES = {
|
|
||||||
CONFIGURE_DOCKER: "Configuring Docker",
|
|
||||||
CONFIGURE_GCP_ACCESS: "Activating service account",
|
|
||||||
CONFIGURE_K8S_ACCESS: "Installing dev/harvester contexts",
|
|
||||||
INSTALL_PREVIEWCTL: "Install previewctl",
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Based on the current branch name this will compute the name of the associated
|
|
||||||
* preview environment.
|
|
||||||
*
|
|
||||||
* NOTE: This needs to produce the same result as the function in dev/preview/util/preview-name-from-branch.sh
|
|
||||||
*/
|
|
||||||
export function previewNameFromBranchName(branchName: string): string {
|
|
||||||
// Due to various limitations we have to ensure that we only use 20 characters
|
|
||||||
// for the preview environment name.
|
|
||||||
//
|
|
||||||
// If the branch name is 20 characters or less we just use it.
|
|
||||||
//
|
|
||||||
// Otherwise:
|
|
||||||
//
|
|
||||||
// We use the first 10 chars of the sanitized branch name
|
|
||||||
// and then the 10 first chars of the hash of the sanitized branch name
|
|
||||||
//
|
|
||||||
// That means collisions can happen. If they do, two jobs would try to deploy to the same
|
|
||||||
// environment.
|
|
||||||
//
|
|
||||||
// see https://github.com/gitpod-io/ops/issues/1252 for details.
|
|
||||||
const sanitizedBranchName = branchName
|
|
||||||
.replace(/^refs\/heads\//, "")
|
|
||||||
.toLocaleLowerCase()
|
|
||||||
.replace(/[^-a-z0-9]/g, "-");
|
|
||||||
|
|
||||||
if (sanitizedBranchName.length <= 20) {
|
|
||||||
return sanitizedBranchName;
|
|
||||||
}
|
|
||||||
|
|
||||||
const hashed = createHash("sha256").update(sanitizedBranchName).digest("hex");
|
|
||||||
return `${sanitizedBranchName.substring(0, 10)}${hashed.substring(0, 10)}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
export class HarvesterPreviewEnvironment {
|
|
||||||
// The prefix we use for the namespace
|
|
||||||
static readonly namespacePrefix: string = "preview-";
|
|
||||||
|
|
||||||
// The name of the namespace that the VM and related resources are in, e.g. preview-my-branch
|
|
||||||
namespace: string;
|
|
||||||
|
|
||||||
// The name of the preview environment, e.g. my-branch
|
|
||||||
name: string;
|
|
||||||
|
|
||||||
werft: Werft;
|
|
||||||
|
|
||||||
constructor(werft: Werft, namespace: string) {
|
|
||||||
this.werft = werft;
|
|
||||||
this.namespace = namespace;
|
|
||||||
this.name = namespace;
|
|
||||||
if (this.namespace.startsWith(HarvesterPreviewEnvironment.namespacePrefix)) {
|
|
||||||
this.name = namespace.slice(HarvesterPreviewEnvironment.namespacePrefix.length);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async delete(): Promise<void> {
|
|
||||||
await VM.destroyPreview({name: this.name});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function configureAccess(werft: Werft) {
|
|
||||||
werft.phase("Configure access");
|
|
||||||
try {
|
|
||||||
exec(`gcloud auth activate-service-account --key-file "${GCLOUD_SERVICE_ACCOUNT_PATH}"`, {
|
|
||||||
slice: SLICES.CONFIGURE_GCP_ACCESS,
|
|
||||||
});
|
|
||||||
werft.done(SLICES.CONFIGURE_GCP_ACCESS);
|
|
||||||
} catch (err) {
|
|
||||||
werft.fail(SLICES.CONFIGURE_GCP_ACCESS, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
await installPreviewCTL()
|
|
||||||
} catch (e) {
|
|
||||||
werft.fail(SLICES.INSTALL_PREVIEWCTL, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
exec(`KUBECONFIG=${GLOBAL_KUBECONFIG_PATH} previewctl get-credentials --gcp-service-account=${GCLOUD_SERVICE_ACCOUNT_PATH}`, {
|
|
||||||
slice: SLICES.CONFIGURE_K8S_ACCESS
|
|
||||||
});
|
|
||||||
|
|
||||||
exec(`mkdir -p $(dirname ${HARVESTER_KUBECONFIG_PATH})`)
|
|
||||||
|
|
||||||
exec(
|
|
||||||
`kubectl --context=harvester config view --minify --flatten > ${HARVESTER_KUBECONFIG_PATH}`, {
|
|
||||||
slice: SLICES.CONFIGURE_K8S_ACCESS
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
exec(
|
|
||||||
`kubectl --context=dev config view --minify --flatten > ${CORE_DEV_KUBECONFIG_PATH}`, {
|
|
||||||
slice: SLICES.CONFIGURE_K8S_ACCESS
|
|
||||||
},
|
|
||||||
)
|
|
||||||
werft.done(SLICES.CONFIGURE_K8S_ACCESS);
|
|
||||||
} catch (e) {
|
|
||||||
werft.fail(SLICES.CONFIGURE_K8S_ACCESS, e);
|
|
||||||
throw new Error("Failed to configure kubernetes contexts");
|
|
||||||
}
|
|
||||||
|
|
||||||
werft.done("Configure access");
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function installPreviewCTL() {
|
|
||||||
try {
|
|
||||||
await execStream(`leeway run dev/preview/previewctl:install`, {
|
|
||||||
slice: SLICES.INSTALL_PREVIEWCTL,
|
|
||||||
})
|
|
||||||
} catch (e) {
|
|
||||||
throw new Error(`Failed to install previewctl: ${e}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export function configureDocker() {
|
|
||||||
const rcDocker = exec("gcloud auth configure-docker --quiet", {slice: SLICES.CONFIGURE_DOCKER}).code;
|
|
||||||
const rcDockerRegistry = exec("gcloud auth configure-docker europe-docker.pkg.dev --quiet", {
|
|
||||||
slice: SLICES.CONFIGURE_DOCKER,
|
|
||||||
}).code;
|
|
||||||
|
|
||||||
if (rcDocker != 0 || rcDockerRegistry != 0) {
|
|
||||||
throw new Error("Failed to configure docker with gcloud.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
export type PreviewEnvironment = HarvesterPreviewEnvironment;
|
|
||||||
@ -1,151 +0,0 @@
|
|||||||
import * as shell from "shelljs";
|
|
||||||
import * as fs from "fs";
|
|
||||||
import { ChildProcess } from "child_process";
|
|
||||||
import { getGlobalWerftInstance } from "./werft";
|
|
||||||
|
|
||||||
export type ExecOptions = shell.ExecOptions & {
|
|
||||||
slice?: string;
|
|
||||||
dontCheckRc?: boolean;
|
|
||||||
};
|
|
||||||
export type ExecResult = {
|
|
||||||
code: number;
|
|
||||||
stdout: string;
|
|
||||||
stderr: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
// exec executes a command and throws an exception if that command exits with a non-zero exit code
|
|
||||||
export function exec(command: string): shell.ShellString;
|
|
||||||
export function exec(command: string, options: ExecOptions & { async?: false }): shell.ShellString;
|
|
||||||
export function exec(command: string, options: ExecOptions & { async: true }): Promise<ExecResult>;
|
|
||||||
export function exec(command: string, options: ExecOptions): shell.ShellString | ChildProcess;
|
|
||||||
export function exec(cmd: string, options?: ExecOptions): ChildProcess | shell.ShellString | Promise<ExecResult> {
|
|
||||||
const werft = getGlobalWerftInstance();
|
|
||||||
|
|
||||||
if (options && options.slice) {
|
|
||||||
options.silent = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
const handleResult = (result, options) => {
|
|
||||||
let output = [];
|
|
||||||
if (result.stdout) {
|
|
||||||
output.push("STDOUT: " + result.stdout);
|
|
||||||
}
|
|
||||||
if (result.stderr) {
|
|
||||||
output.push("STDERR: " + result.stderr);
|
|
||||||
}
|
|
||||||
if (options && options.slice) {
|
|
||||||
werft.logOutput(options.slice, output.join("\n"));
|
|
||||||
output = []; // don't show the same output as part of the exception again.
|
|
||||||
}
|
|
||||||
if ((!options || !options.dontCheckRc) && result.code !== 0) {
|
|
||||||
output.unshift(`${cmd} exit with non-zero status code.`);
|
|
||||||
throw new Error(output.join("\n"));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if (options && options.async) {
|
|
||||||
return new Promise<ExecResult>((resolve, reject) => {
|
|
||||||
shell.exec(cmd, options, (code, stdout, stderr) => {
|
|
||||||
try {
|
|
||||||
const result: ExecResult = { code, stdout, stderr };
|
|
||||||
handleResult(result, options);
|
|
||||||
resolve(result);
|
|
||||||
} catch (err) {
|
|
||||||
reject(err);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
const result = shell.exec(cmd, options);
|
|
||||||
handleResult(result, options);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Execute a command and stream output logs.
|
|
||||||
*
|
|
||||||
* If a slice is given logs are streamed using the werft log syntax; else they're streamed directly
|
|
||||||
* to stderr/stdout.
|
|
||||||
*
|
|
||||||
* @return The process exit code
|
|
||||||
*/
|
|
||||||
export async function execStream(command: string, options: ExecOptions ): Promise<number> {
|
|
||||||
const werft = getGlobalWerftInstance();
|
|
||||||
|
|
||||||
options = options || {};
|
|
||||||
|
|
||||||
if (options.slice) {
|
|
||||||
options.silent = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
const child = shell.exec(command, {...options, async: true});
|
|
||||||
|
|
||||||
// Collect output from a subprocess file and print newline terminated log messages.
|
|
||||||
//
|
|
||||||
// The event handlers attached to the child process stdout/stderr aren't guaranteed to be newline
|
|
||||||
// terminated; this can have odd interactions with `werft.logOutput` which appends newlines.
|
|
||||||
// to log messages. To ensure faithful reproduction of the underlying command output we
|
|
||||||
// perform our own buffer management to emit newline delimited logs accurately.
|
|
||||||
const bufferedLog = (slice: string, buffer: string, data: string) => {
|
|
||||||
buffer += data;
|
|
||||||
const lastIndex = buffer.lastIndexOf("\n");
|
|
||||||
if (lastIndex >= 0) {
|
|
||||||
// Extract the substring till the last newline in the buffer, and trim off the newline
|
|
||||||
// as werft.logOutput will append a newline to the log message.
|
|
||||||
let msg = buffer.slice(0, lastIndex + 1).trimEnd();
|
|
||||||
werft.logOutput(slice, msg);
|
|
||||||
|
|
||||||
buffer = buffer.slice(lastIndex + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
let stdoutBuffer: string = '';
|
|
||||||
child.stdout.on('data', (data) => {
|
|
||||||
if (options.slice) bufferedLog(options.slice, stdoutBuffer, data);
|
|
||||||
});
|
|
||||||
|
|
||||||
let stderrBuffer: string = '';
|
|
||||||
child.stderr.on('data', (data) => {
|
|
||||||
if (options.slice) bufferedLog(options.slice, stderrBuffer, data);
|
|
||||||
});
|
|
||||||
|
|
||||||
const code = await new Promise<number>((resolve, reject) => {
|
|
||||||
child.on('close', (code, _signal) => {
|
|
||||||
if (options.slice) {
|
|
||||||
// The child process stdout and stderr buffers may not be fully flushed as the child process
|
|
||||||
// may emit logs that aren't terminated with a newline; flush those buffers now.
|
|
||||||
if (stdoutBuffer.length > 0) werft.logOutput(options.slice, stdoutBuffer.trimEnd());
|
|
||||||
if (stderrBuffer.length > 0) werft.logOutput(options.slice, stderrBuffer.trimEnd());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (code === 0 || options.dontCheckRc) {
|
|
||||||
resolve(code)
|
|
||||||
} else {
|
|
||||||
reject(new Error(`Process exited non-zero exit code ${code}`))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
// gitTag tags the current state and pushes that tag to the repo origin
|
|
||||||
export const gitTag = (tag) => {
|
|
||||||
shell.mkdir("/root/.ssh");
|
|
||||||
fs.writeFileSync(
|
|
||||||
"/root/.ssh/config",
|
|
||||||
`Host github.com
|
|
||||||
UserKnownHostsFile=/dev/null
|
|
||||||
StrictHostKeyChecking no
|
|
||||||
IdentitiesOnly yes
|
|
||||||
IdentityFile /mnt/secrets/github-ssh-key/github-ssh-key.pem`,
|
|
||||||
);
|
|
||||||
shell.chmod(600, "/root/.ssh/*");
|
|
||||||
shell.chmod(700, "/root/.ssh");
|
|
||||||
|
|
||||||
exec("git config --global url.ssh://git@github.com/.insteadOf https://github.com/");
|
|
||||||
exec(`git tag -f ${tag}`);
|
|
||||||
exec(`git push -f origin ${tag}`);
|
|
||||||
};
|
|
||||||
@ -1,100 +0,0 @@
|
|||||||
import * as https from "https";
|
|
||||||
|
|
||||||
export function reportBuildFailureInSlack(context, err: Error): Promise<void> {
|
|
||||||
const repo = context.Repository.host + "/" + context.Repository.owner + "/" + context.Repository.repo;
|
|
||||||
const data = JSON.stringify({
|
|
||||||
blocks: [
|
|
||||||
{
|
|
||||||
type: "section",
|
|
||||||
text: {
|
|
||||||
type: "mrkdwn",
|
|
||||||
text: ":X: *build failure*\n_Repo:_ `" + repo + "`\n_Build:_ `" + context.Name + "`",
|
|
||||||
},
|
|
||||||
accessory: {
|
|
||||||
type: "button",
|
|
||||||
text: {
|
|
||||||
type: "plain_text",
|
|
||||||
text: "Go to Werft",
|
|
||||||
emoji: true,
|
|
||||||
},
|
|
||||||
value: "click_me_123",
|
|
||||||
url: "https://werft.gitpod-dev.com/job/" + context.Name,
|
|
||||||
action_id: "button-action",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
type: "section",
|
|
||||||
text: {
|
|
||||||
type: "mrkdwn",
|
|
||||||
text: "```\n" + err + "\n```",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
});
|
|
||||||
return new Promise((resolve, reject) => {
|
|
||||||
const req = https.request(
|
|
||||||
{
|
|
||||||
hostname: "hooks.slack.com",
|
|
||||||
port: 443,
|
|
||||||
path: process.env.SLACK_NOTIFICATION_PATH.trim(),
|
|
||||||
method: "POST",
|
|
||||||
headers: {
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
"Content-Length": data.length,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
() => resolve(),
|
|
||||||
);
|
|
||||||
req.on("error", (error: Error) => reject(error));
|
|
||||||
req.write(data);
|
|
||||||
req.end();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
export function reportCertificateError(options: { certificateName: string; certifiateYAML: string, certificateDebug: string }): Promise<void> {
|
|
||||||
const data = JSON.stringify({
|
|
||||||
channel: "C03MWBB5MP1",
|
|
||||||
blocks: [
|
|
||||||
{
|
|
||||||
type: "section",
|
|
||||||
text: {
|
|
||||||
type: "mrkdwn",
|
|
||||||
text: `A preview environment's certificate ${options.certificateName} never reached the Ready state. @ask-devx please investigate using our [Debugging certificate issues guide](https://www.notion.so/gitpod/Debugging-certificate-issues-9453d1c8ac914ce7962557b67f7b49b3) :hug:`,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
type: "section",
|
|
||||||
text: {
|
|
||||||
type: "mrkdwn",
|
|
||||||
text: "```\n" + options.certifiateYAML + "\n```",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
type: "section",
|
|
||||||
text: {
|
|
||||||
type: "mrkdwn",
|
|
||||||
text: "```\n" + options.certificateDebug + "\n```",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
});
|
|
||||||
return new Promise((resolve, reject) => {
|
|
||||||
const req = https.request(
|
|
||||||
{
|
|
||||||
hostname: "slack.com",
|
|
||||||
path: "api/chat.postMessage",
|
|
||||||
method: "POST",
|
|
||||||
port: 443,
|
|
||||||
headers: {
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
"Content-Length": data.length,
|
|
||||||
"Authorization": "Bearer " + process.env.DEVX_SLACK_NOTIFICATION_PATH.trim(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
() => resolve(),
|
|
||||||
);
|
|
||||||
req.on("error", (error: Error) => reject(error));
|
|
||||||
req.write(data);
|
|
||||||
req.end();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@ -1,21 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
NAMESPACE=$1
|
|
||||||
KUBECONFIG=$2
|
|
||||||
|
|
||||||
if [[ -z ${NAMESPACE} ]]; then
|
|
||||||
echo "One or more input params were invalid. The params we received were: ${NAMESPACE}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Removing Gitpod in namespace ${NAMESPACE}"
|
|
||||||
kubectl --kubeconfig "$KUBECONFIG" get configmap gitpod-app -n "${NAMESPACE}" -o jsonpath='{.data.app\.yaml}' | kubectl --kubeconfig "$KUBECONFIG" delete --ignore-not-found=true -f -
|
|
||||||
|
|
||||||
echo "Removing Gitpod storage from ${NAMESPACE}"
|
|
||||||
kubectl --kubeconfig "$KUBECONFIG" -n "${NAMESPACE}" --ignore-not-found=true delete pvc data-mysql-0
|
|
||||||
# the installer includes the minio PVC in it's config mpap, this is a "just in case"
|
|
||||||
kubectl --kubeconfig "$KUBECONFIG" -n "${NAMESPACE}" delete pvc minio || true
|
|
||||||
|
|
||||||
echo "Successfully removed Gitpod from ${NAMESPACE}"
|
|
||||||
@ -1,199 +0,0 @@
|
|||||||
import { Span, Tracer, trace, context, SpanStatusCode, SpanAttributes } from "@opentelemetry/api";
|
|
||||||
import { exec } from "./shell";
|
|
||||||
|
|
||||||
let werft: Werft;
|
|
||||||
|
|
||||||
export class FailedSliceError extends Error {
|
|
||||||
constructor(message: string) {
|
|
||||||
super(message);
|
|
||||||
this.name = "FailedSliceError";
|
|
||||||
Object.setPrototypeOf(this, FailedSliceError.prototype);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* For backwards compatibility with existing code we expose a global Werft instance
|
|
||||||
*/
|
|
||||||
export function getGlobalWerftInstance() {
|
|
||||||
if (!werft) {
|
|
||||||
throw new Error("Trying to fetch global Werft instance but it hasn't been instantiated yet");
|
|
||||||
}
|
|
||||||
return werft;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Class for producing Werft compatible log output and generating traces
|
|
||||||
*/
|
|
||||||
export class Werft {
|
|
||||||
private tracer: Tracer;
|
|
||||||
public rootSpan: Span;
|
|
||||||
private sliceSpans: { [slice: string]: Span } = {};
|
|
||||||
public currentPhaseSpan: Span;
|
|
||||||
private globalSpanAttributes: SpanAttributes = {};
|
|
||||||
|
|
||||||
constructor(job: string) {
|
|
||||||
if (werft) {
|
|
||||||
throw new Error("Only one Werft instance should be instantiated per job");
|
|
||||||
}
|
|
||||||
this.tracer = trace.getTracer("default");
|
|
||||||
this.rootSpan = this.tracer.startSpan(`job: ${job}`, { root: true, attributes: { "werft.job.name": job } });
|
|
||||||
|
|
||||||
// Expose this instance as part of getGlobalWerftInstance
|
|
||||||
werft = this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public phase(name, desc?: string) {
|
|
||||||
// When you start a new phase the previous phase is implicitly closed.
|
|
||||||
if (this.currentPhaseSpan) {
|
|
||||||
this.endPhase();
|
|
||||||
}
|
|
||||||
|
|
||||||
const rootSpanCtx = trace.setSpan(context.active(), this.rootSpan);
|
|
||||||
this.currentPhaseSpan = this.tracer.startSpan(
|
|
||||||
`phase: ${name}`,
|
|
||||||
{
|
|
||||||
attributes: {
|
|
||||||
"werft.phase.name": name,
|
|
||||||
"werft.phase.description": desc,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
rootSpanCtx,
|
|
||||||
);
|
|
||||||
this.currentPhaseSpan.setAttributes(this.globalSpanAttributes);
|
|
||||||
|
|
||||||
console.log(`[${name}|PHASE] ${desc || name}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
public log(slice, msg) {
|
|
||||||
if (!this.sliceSpans[slice]) {
|
|
||||||
const parentSpanCtx = trace.setSpan(context.active(), this.currentPhaseSpan);
|
|
||||||
const sliceSpan = this.tracer.startSpan(`slice: ${slice}`, undefined, parentSpanCtx);
|
|
||||||
sliceSpan.setAttributes(this.globalSpanAttributes);
|
|
||||||
this.sliceSpans[slice] = sliceSpan;
|
|
||||||
}
|
|
||||||
console.log(`[${slice}] ${msg}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
public logOutput(slice, cmd) {
|
|
||||||
cmd.toString()
|
|
||||||
.split("\n")
|
|
||||||
.forEach((line: string) => this.log(slice, line));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Use this when you intend to fail the werft job
|
|
||||||
*/
|
|
||||||
public fail(slice: string, err: string | Error) {
|
|
||||||
const span = this.sliceSpans[slice];
|
|
||||||
|
|
||||||
if (span) {
|
|
||||||
span.end();
|
|
||||||
} else {
|
|
||||||
console.log(`[${slice}] tracing warning: No slice span by name ${slice}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the status on the span for the slice and also propagate the status to the phase and root span
|
|
||||||
// as well so we can query on all phases that had an error regardless of which slice produced the error.
|
|
||||||
[span, this.rootSpan, this.currentPhaseSpan].forEach((span: Span) => {
|
|
||||||
if (!span) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
span.setStatus({
|
|
||||||
code: SpanStatusCode.ERROR,
|
|
||||||
message: err.toString(),
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
// In case the error message is a multi-line string we want to ensure that we contain
|
|
||||||
// the error message within the slice (otherwise they'll be moved to the "default" slice of the phase)
|
|
||||||
err.toString()
|
|
||||||
.split("\n")
|
|
||||||
.forEach((line: string) => console.log(`[${slice}] ${line}`));
|
|
||||||
|
|
||||||
// The UI shows the last log of the slice which might not make a lot of sense
|
|
||||||
// for multi-line error messages, so instead we tell the user to expand the slice.
|
|
||||||
console.log(`[${slice}] Failed. Expand to see why`);
|
|
||||||
console.log(`[${slice}|FAIL]`);
|
|
||||||
throw new FailedSliceError(slice);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Use this when you intend to fail a single slice, but not the entire Werft job.
|
|
||||||
*/
|
|
||||||
public failSlice(slice: string, error: Error) {
|
|
||||||
const span = this.sliceSpans[slice];
|
|
||||||
if (span) {
|
|
||||||
span.setStatus({
|
|
||||||
code: SpanStatusCode.ERROR,
|
|
||||||
message: error.message,
|
|
||||||
});
|
|
||||||
span.end();
|
|
||||||
delete this.sliceSpans[slice];
|
|
||||||
}
|
|
||||||
console.log(`[${slice}|FAIL] ${error}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
public done(slice: string) {
|
|
||||||
const span = this.sliceSpans[slice];
|
|
||||||
if (span) {
|
|
||||||
span.end();
|
|
||||||
delete this.sliceSpans[slice];
|
|
||||||
}
|
|
||||||
console.log(`[${slice}|DONE]`);
|
|
||||||
}
|
|
||||||
|
|
||||||
public result(description: string, channel: string, value: string) {
|
|
||||||
exec(`werft log result -d "${description}" -c "${channel}" ${value}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
private endPhase() {
|
|
||||||
// End all open slices
|
|
||||||
Object.entries(this.sliceSpans).forEach((kv) => {
|
|
||||||
const [id, span] = kv;
|
|
||||||
span.end();
|
|
||||||
delete this.sliceSpans[id];
|
|
||||||
});
|
|
||||||
// End the phase
|
|
||||||
this.currentPhaseSpan.end();
|
|
||||||
}
|
|
||||||
|
|
||||||
public endAllSpans() {
|
|
||||||
const traceID = this.rootSpan.spanContext().traceId;
|
|
||||||
const nowUnix = Math.round(new Date().getTime() / 1000);
|
|
||||||
// At the moment we're just looking for traces in a 30 minutes timerange with the specific traceID
|
|
||||||
// A smarter approach would be to get a start timestamp from tracing.Initialize()
|
|
||||||
exec(
|
|
||||||
`werft log result -d "Honeycomb trace" -c github-check-honeycomb-trace url "https://ui.honeycomb.io/gitpod/datasets/werft/trace?trace_id=${traceID}&trace_start_ts=${
|
|
||||||
nowUnix - 1800
|
|
||||||
}&trace_end_ts=${nowUnix + 5}"`,
|
|
||||||
);
|
|
||||||
this.endPhase();
|
|
||||||
this.rootSpan.end();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This allows you to set attributes on all open and future Werft spans.
|
|
||||||
* Any spans in phases that have already been closed won't get the attributes.
|
|
||||||
*/
|
|
||||||
public addAttributes(attributes: SpanAttributes): void {
|
|
||||||
// Add the attributes to the root span.
|
|
||||||
this.rootSpan.setAttributes(attributes);
|
|
||||||
|
|
||||||
// Set the attribute on all spans for the current phase.
|
|
||||||
this.currentPhaseSpan.setAttributes(attributes);
|
|
||||||
Object.entries(this.sliceSpans).forEach((kv) => {
|
|
||||||
const [_, span] = kv;
|
|
||||||
span.setAttributes(attributes);
|
|
||||||
});
|
|
||||||
|
|
||||||
this.globalSpanAttributes = { ...this.globalSpanAttributes, ...attributes };
|
|
||||||
}
|
|
||||||
|
|
||||||
public getSpanForSlice(slice: string): Span {
|
|
||||||
const span = this.sliceSpans[slice];
|
|
||||||
if (!span) {
|
|
||||||
throw new Error(`No open span for ${slice}`);
|
|
||||||
}
|
|
||||||
return span;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,96 +0,0 @@
|
|||||||
import {GCLOUD_SERVICE_ACCOUNT_PATH, HARVESTER_KUBECONFIG_PATH, PREVIEW_K3S_KUBECONFIG_PATH} from "../jobs/build/const";
|
|
||||||
import {exec, execStream} from "../util/shell";
|
|
||||||
import {getGlobalWerftInstance} from "../util/werft";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Remove all VM resources - Namespace+VM+Proxy svc on Harvester, LB+SVC on DEV
|
|
||||||
*/
|
|
||||||
export async function destroyPreview(options: { name: string }) {
|
|
||||||
const werft = getGlobalWerftInstance();
|
|
||||||
|
|
||||||
try {
|
|
||||||
await execStream(`TF_VAR_preview_name=${options.name} \
|
|
||||||
GOOGLE_APPLICATION_CREDENTIALS=${GCLOUD_SERVICE_ACCOUNT_PATH} \
|
|
||||||
GOOGLE_BACKEND_CREDENTIALS=${GCLOUD_SERVICE_ACCOUNT_PATH} \
|
|
||||||
leeway run dev/preview:delete-preview`,
|
|
||||||
{slice: "Deleting VM."})
|
|
||||||
} catch (err) {
|
|
||||||
werft.currentPhaseSpan.setAttribute("preview.deleted_vm", false);
|
|
||||||
werft.fail("Deleting VM.", new Error(`Failed deleting VM: ${err}`))
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
werft.currentPhaseSpan.setAttribute("preview.deleted_vm", true);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if a VM with the given name already exists.
|
|
||||||
* @returns true if the VM already exists
|
|
||||||
*/
|
|
||||||
export function vmExists(options: { name: string }) {
|
|
||||||
const namespace = `preview-${options.name}`;
|
|
||||||
const status = exec(`kubectl --kubeconfig ${HARVESTER_KUBECONFIG_PATH} -n ${namespace} get svc proxy`, {
|
|
||||||
dontCheckRc: true,
|
|
||||||
silent: true,
|
|
||||||
});
|
|
||||||
return status.code == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Wait until the VM Instance reaches the Running status.
|
|
||||||
* If the VM Instance doesn't reach Running before the timeoutMS it will throw an Error.
|
|
||||||
*/
|
|
||||||
export function waitForVMReadiness(options: { name: string; timeoutSeconds: number; slice: string }) {
|
|
||||||
const werft = getGlobalWerftInstance();
|
|
||||||
const namespace = `preview-${options.name}`;
|
|
||||||
|
|
||||||
const startTime = Date.now();
|
|
||||||
const ready = exec(
|
|
||||||
`kubectl --kubeconfig ${HARVESTER_KUBECONFIG_PATH} -n ${namespace} wait --for=condition=ready --timeout=${options.timeoutSeconds}s pod -l kubevirt.io=virt-launcher -l harvesterhci.io/vmName=${options.name}`,
|
|
||||||
{dontCheckRc: true, silent: true},
|
|
||||||
);
|
|
||||||
|
|
||||||
if (ready.code == 0) {
|
|
||||||
werft.log(options.slice, `VM is ready after ${(Date.now() - startTime) / 1000} seconds`);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
werft.log(
|
|
||||||
options.slice,
|
|
||||||
`Timeout while waiting for VM to get ready. Timeout: ${options.timeoutSeconds}. Stderr: ${ready.stderr}. Stdout: ${ready.stdout}`,
|
|
||||||
);
|
|
||||||
throw new Error("VM didn't reach 'Ready' status before the timeout.");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Installs the preview environment's context
|
|
||||||
* If it doesn't manage to do so before the timeout it will throw an Error
|
|
||||||
*/
|
|
||||||
export async function installPreviewContext(options: { name: string; slice: string }) {
|
|
||||||
try {
|
|
||||||
await execStream(
|
|
||||||
`previewctl install-context --private-key-path=/workspace/.ssh/id_rsa_harvester_vm --gcp-service-account=${GCLOUD_SERVICE_ACCOUNT_PATH} --branch=${options.name} --timeout=10m`,
|
|
||||||
{slice: options.slice},
|
|
||||||
);
|
|
||||||
|
|
||||||
exec(`mkdir -p $(dirname ${PREVIEW_K3S_KUBECONFIG_PATH})`)
|
|
||||||
|
|
||||||
exec(
|
|
||||||
`kubectl --context=${options.name} config view --minify --flatten > ${PREVIEW_K3S_KUBECONFIG_PATH}`,
|
|
||||||
{dontCheckRc: true, slice: options.slice},
|
|
||||||
)
|
|
||||||
|
|
||||||
return;
|
|
||||||
} catch (e) {
|
|
||||||
throw new Error(
|
|
||||||
`Wasn't able to copy out the kubeconfig before the timeout. `,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Terminates all running kubectl proxies
|
|
||||||
*/
|
|
||||||
export function stopKubectlPortForwards() {
|
|
||||||
exec(`sudo killall kubectl || true`);
|
|
||||||
}
|
|
||||||
1425
.werft/yarn.lock
1425
.werft/yarn.lock
File diff suppressed because it is too large
Load Diff
@ -132,7 +132,7 @@ EOF
|
|||||||
}
|
}
|
||||||
|
|
||||||
function installRookCeph {
|
function installRookCeph {
|
||||||
diff-apply "${PREVIEW_K3S_KUBE_CONTEXT}" "$ROOT/.werft/vm/manifests/rook-ceph/crds.yaml"
|
diff-apply "${PREVIEW_K3S_KUBE_CONTEXT}" "$SCRIPT_PATH/../vm/manifests/rook-ceph/crds.yaml"
|
||||||
|
|
||||||
kubectl \
|
kubectl \
|
||||||
--kubeconfig "${PREVIEW_K3S_KUBE_PATH}" \
|
--kubeconfig "${PREVIEW_K3S_KUBE_PATH}" \
|
||||||
@ -140,7 +140,7 @@ function installRookCeph {
|
|||||||
wait --for condition=established --timeout=120s crd/cephclusters.ceph.rook.io
|
wait --for condition=established --timeout=120s crd/cephclusters.ceph.rook.io
|
||||||
|
|
||||||
for file in common operator cluster-test storageclass-test snapshotclass;do
|
for file in common operator cluster-test storageclass-test snapshotclass;do
|
||||||
diff-apply "${PREVIEW_K3S_KUBE_CONTEXT}" "$ROOT/.werft/vm/manifests/rook-ceph/$file.yaml"
|
diff-apply "${PREVIEW_K3S_KUBE_CONTEXT}" "$SCRIPT_PATH/../vm/manifests/rook-ceph/$file.yaml"
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -175,7 +175,7 @@ function installFluentBit {
|
|||||||
helm3 \
|
helm3 \
|
||||||
--kubeconfig "${PREVIEW_K3S_KUBE_PATH}" \
|
--kubeconfig "${PREVIEW_K3S_KUBE_PATH}" \
|
||||||
--kube-context "${PREVIEW_K3S_KUBE_CONTEXT}" \
|
--kube-context "${PREVIEW_K3S_KUBE_CONTEXT}" \
|
||||||
upgrade --install fluent-bit fluent/fluent-bit --version 0.21.6 -n "${PREVIEW_NAMESPACE}" -f "$ROOT/.werft/vm/charts/fluentbit/values.yaml"
|
upgrade --install fluent-bit fluent/fluent-bit --version 0.21.6 -n "${PREVIEW_NAMESPACE}" -f "$SCRIPT_PATH/../vm/charts/fluentbit/values.yaml"
|
||||||
}
|
}
|
||||||
|
|
||||||
# ====================================
|
# ====================================
|
||||||
@ -553,7 +553,7 @@ done
|
|||||||
# Run post-process script
|
# Run post-process script
|
||||||
#
|
#
|
||||||
|
|
||||||
WITH_VM=true "$ROOT/.werft/jobs/build/installer/post-process.sh" "${PREVIEW_NAME}" "${GITPOD_AGENT_SMITH_TOKEN}"
|
WITH_VM=true "$SCRIPT_PATH/post-process.sh" "${PREVIEW_NAME}" "${GITPOD_AGENT_SMITH_TOKEN}"
|
||||||
|
|
||||||
#
|
#
|
||||||
# Cleanup from post-processing
|
# Cleanup from post-processing
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user