Kubeflow 설치
목차
자세한 단계
{변수}의 자세한 설명은 Terminology 페이지를 참고하세요
1. 사전 준비
설치를 위한 환경 설정이 완료 되어야 합니다. (1. 설치 환경 셋업하기 참고)
2. Kubeflow 용 Database 및 table 셋업
-
kubeflow-table.sql 파일을 생성합니다.
[Expand kubeflow-table.sql]
cat <<EOT > kubeflow-table.sql
CREATE DATABASE IF NOT EXISTS ai_conductor;
CREATE DATABASE IF NOT EXISTS cachedb;
CREATE DATABASE IF NOT EXISTS edge_conductor;
CREATE DATABASE IF NOT EXISTS kubeflow;
CREATE DATABASE IF NOT EXISTS metadb;
CREATE DATABASE IF NOT EXISTS mlpipeline;
create table if not exists metadb.Artifact
(
id int auto_increment
primary key,
type_id int not null,
uri text null,
state int null,
name varchar(255) null,
create_time_since_epoch bigint default 0 not null,
last_update_time_since_epoch bigint default 0 not null,
external_id varchar(255) null,
constraint UniqueArtifactExternalId
unique (external_id),
constraint UniqueArtifactTypeName
unique (type_id, name)
);
create index idx_artifact_create_time_since_epoch
on metadb.Artifact (create_time_since_epoch);
create index idx_artifact_external_id
on metadb.Artifact (external_id);
create index idx_artifact_last_update_time_since_epoch
on metadb.Artifact (last_update_time_since_epoch);
create index idx_artifact_uri
on metadb.Artifact (uri(255));
create table if not exists metadb.ArtifactProperty
(
artifact_id int not null,
name varchar(255) not null,
is_custom_property tinyint(1) not null,
int_value int null,
double_value double null,
string_value mediumtext null,
byte_value mediumblob null,
proto_value mediumblob null,
bool_value tinyint(1) null,
primary key (artifact_id, name, is_custom_property)
);
create index idx_artifact_property_double
on metadb.ArtifactProperty (name, is_custom_property, double_value);
create index idx_artifact_property_int
on metadb.ArtifactProperty (name, is_custom_property, int_value);
create index idx_artifact_property_string
on metadb.ArtifactProperty (name, is_custom_property, string_value(255));
create table if not exists metadb.Association
(
id int auto_increment
primary key,
context_id int not null,
execution_id int not null,
constraint context_id
unique (context_id, execution_id)
);
create table if not exists metadb.Attribution
(
id int auto_increment
primary key,
context_id int not null,
artifact_id int not null,
constraint context_id
unique (context_id, artifact_id)
);
create table if not exists metadb.Context
(
id int auto_increment
primary key,
type_id int not null,
name varchar(255) not null,
create_time_since_epoch bigint default 0 not null,
last_update_time_since_epoch bigint default 0 not null,
external_id varchar(255) null,
constraint UniqueContextExternalId
unique (external_id),
constraint type_id
unique (type_id, name)
);
create index idx_context_create_time_since_epoch
on metadb.Context (create_time_since_epoch);
create index idx_context_external_id
on metadb.Context (external_id);
create index idx_context_last_update_time_since_epoch
on metadb.Context (last_update_time_since_epoch);
create table if not exists metadb.ContextProperty
(
context_id int not null,
name varchar(255) not null,
is_custom_property tinyint(1) not null,
int_value int null,
double_value double null,
string_value mediumtext null,
byte_value mediumblob null,
proto_value mediumblob null,
bool_value tinyint(1) null,
primary key (context_id, name, is_custom_property)
);
create index idx_context_property_double
on metadb.ContextProperty (name, is_custom_property, double_value);
create index idx_context_property_int
on metadb.ContextProperty (name, is_custom_property, int_value);
create index idx_context_property_string
on metadb.ContextProperty (name, is_custom_property, string_value(255));
create table if not exists metadb.Event
(
id int auto_increment
primary key,
artifact_id int not null,
execution_id int not null,
type int not null,
milliseconds_since_epoch bigint null,
constraint UniqueEvent
unique (artifact_id, execution_id, type)
);
create index idx_event_execution_id
on metadb.Event (execution_id);
create table if not exists metadb.EventPath
(
event_id int not null,
is_index_step tinyint(1) not null,
step_index int null,
step_key text null
);
create index idx_eventpath_event_id
on metadb.EventPath (event_id);
create table if not exists metadb.Execution
(
id int auto_increment
primary key,
type_id int not null,
last_known_state int null,
name varchar(255) null,
create_time_since_epoch bigint default 0 not null,
last_update_time_since_epoch bigint default 0 not null,
external_id varchar(255) null,
constraint UniqueExecutionExternalId
unique (external_id),
constraint UniqueExecutionTypeName
unique (type_id, name)
);
create index idx_execution_create_time_since_epoch
on metadb.Execution (create_time_since_epoch);
create index idx_execution_external_id
on metadb.Execution (external_id);
create index idx_execution_last_update_time_since_epoch
on metadb.Execution (last_update_time_since_epoch);
create table if not exists metadb.ExecutionProperty
(
execution_id int not null,
name varchar(255) not null,
is_custom_property tinyint(1) not null,
int_value int null,
double_value double null,
string_value mediumtext null,
byte_value mediumblob null,
proto_value mediumblob null,
bool_value tinyint(1) null,
primary key (execution_id, name, is_custom_property)
);
create index idx_execution_property_double
on metadb.ExecutionProperty (name, is_custom_property, double_value);
create index idx_execution_property_int
on metadb.ExecutionProperty (name, is_custom_property, int_value);
create index idx_execution_property_string
on metadb.ExecutionProperty (name, is_custom_property, string_value(255));
create table if not exists metadb.MLMDEnv
(
schema_version int not null
primary key
);
insert into metadb.MLMDEnv (schema_version) value(10);
create table if not exists metadb.ParentContext
(
context_id int not null,
parent_context_id int not null,
primary key (context_id, parent_context_id)
);
create index idx_parentcontext_parent_context_id
on metadb.ParentContext (parent_context_id);
create table if not exists metadb.ParentType
(
type_id int not null,
parent_type_id int not null,
primary key (type_id, parent_type_id)
);
create table if not exists metadb.Type
(
id int auto_increment
primary key,
name varchar(255) not null,
version varchar(255) null,
type_kind tinyint(1) not null,
description text null,
input_type text null,
output_type text null,
external_id varchar(255) null,
constraint UniqueTypeExternalId
unique (external_id)
);
create index idx_type_external_id
on metadb.Type (external_id);
create index idx_type_name
on metadb.Type (name);
create table if not exists metadb.TypeProperty
(
type_id int not null,
name varchar(255) not null,
data_type int null,
primary key (type_id, name)
);
create table if not exists mlpipeline.db_statuses
(
HaveSamplesLoaded tinyint(1) not null
primary key
);
create table if not exists mlpipeline.default_experiments
(
DefaultExperimentId varchar(255) not null
primary key
);
create table if not exists cachedb.execution_caches
(
ID bigint auto_increment
primary key,
ExecutionCacheKey varchar(255) not null,
ExecutionTemplate longtext not null,
ExecutionOutput longtext null,
MaxCacheStaleness bigint not null,
StartedAtInSec bigint not null,
EndedAtInSec bigint not null
);
create index idx_cache_key
on cachedb.execution_caches (ExecutionCacheKey);
create table if not exists mlpipeline.experiments
(
UUID varchar(255) not null
primary key,
Name varchar(255) not null,
Description varchar(255) not null,
CreatedAtInSec bigint not null,
Namespace varchar(255) not null,
StorageState varchar(255) not null,
constraint idx_name_namespace
unique (Name, Namespace)
);
create table if not exists mlpipeline.jobs
(
UUID varchar(255) not null
primary key,
DisplayName varchar(255) not null,
Name varchar(255) not null,
Namespace varchar(255) not null,
ServiceAccount varchar(255) not null,
Description varchar(255) not null,
MaxConcurrency bigint not null,
NoCatchup tinyint(1) not null,
CreatedAtInSec bigint not null,
UpdatedAtInSec bigint not null,
Enabled tinyint(1) not null,
CronScheduleStartTimeInSec bigint null,
CronScheduleEndTimeInSec bigint null,
Schedule varchar(255) null,
PeriodicScheduleStartTimeInSec bigint null,
PeriodicScheduleEndTimeInSec bigint null,
IntervalSecond bigint null,
PipelineId varchar(255) not null,
PipelineName varchar(255) not null,
PipelineSpecManifest longtext null,
WorkflowSpecManifest longtext not null,
Parameters longtext null,
RuntimeParameters longtext null,
PipelineRoot longtext null,
Conditions varchar(255) not null,
ExperimentUUID varchar(255) not null,
PipelineVersionId varchar(255) null
);
create table if not exists kubeflow.observation_logs
(
trial_name varchar(255) not null,
id int auto_increment
primary key,
time datetime(6) null,
metric_name varchar(255) not null,
value text not null
);
create table if not exists mlpipeline.pipelines
(
UUID varchar(255) not null
primary key,
CreatedAtInSec bigint not null,
Name varchar(255) not null,
Description longtext not null,
Parameters longtext not null,
Status varchar(255) not null,
DefaultVersionId varchar(255) null,
Namespace varchar(63) default '' null,
constraint name_namespace_index
unique (Name, Namespace),
constraint namespace_name
unique (Name, Namespace)
);
create table if not exists mlpipeline.pipeline_versions
(
UUID varchar(255) not null
primary key,
CreatedAtInSec bigint not null,
Name varchar(255) not null,
Parameters longtext not null,
PipelineId varchar(255) not null,
Status varchar(255) not null,
CodeSourceUrl varchar(255) null,
Description longtext not null,
PipelineSpec longtext not null,
PipelineSpecURI longtext not null,
constraint idx_pipelineid_name
unique (Name, PipelineId),
constraint pipeline_versions_PipelineId_pipelines_UUID_foreign
foreign key (PipelineId) references mlpipeline.pipelines (UUID)
on update cascade on delete cascade
);
create index idx_pipeline_versions_CreatedAtInSec
on mlpipeline.pipeline_versions (CreatedAtInSec);
create index idx_pipeline_versions_PipelineId
on mlpipeline.pipeline_versions (PipelineId);
create table if not exists mlpipeline.resource_references
(
ResourceUUID varchar(255) not null,
ResourceType varchar(255) not null,
ReferenceUUID varchar(255) not null,
ReferenceName varchar(255) not null,
ReferenceType varchar(255) not null,
Relationship varchar(255) not null,
Payload longtext not null,
primary key (ResourceUUID, ResourceType, ReferenceType)
);
create index referencefilter
on mlpipeline.resource_references (ResourceType, ReferenceUUID, ReferenceType);
create table if not exists mlpipeline.run_details
(
UUID varchar(255) not null
primary key,
ExperimentUUID varchar(255) not null,
DisplayName varchar(255) not null,
Name varchar(255) not null,
StorageState varchar(255) not null,
Namespace varchar(255) not null,
ServiceAccount varchar(255) not null,
Description varchar(255) not null,
CreatedAtInSec bigint not null,
ScheduledAtInSec bigint default 0 null,
FinishedAtInSec bigint default 0 null,
Conditions varchar(255) not null,
PipelineId varchar(255) not null,
PipelineName varchar(255) not null,
PipelineSpecManifest longtext null,
WorkflowSpecManifest longtext not null,
Parameters longtext null,
RuntimeParameters longtext null,
PipelineRoot longtext null,
PipelineRuntimeManifest longtext not null,
WorkflowRuntimeManifest longtext not null,
JobUUID varchar(255) null,
PipelineVersionId varchar(255) null,
State varchar(255) null,
StateHistory longtext null,
PipelineContextId bigint default 0 null,
PipelineRunContextId bigint default 0 null
);
create index experimentuuid_conditions_finishedatinsec
on mlpipeline.run_details (ExperimentUUID, Conditions, FinishedAtInSec);
create index experimentuuid_createatinsec
on mlpipeline.run_details (ExperimentUUID, CreatedAtInSec);
create index namespace_conditions_finishedatinsec
on mlpipeline.run_details (Namespace, Conditions, FinishedAtInSec);
create index namespace_createatinsec
on mlpipeline.run_details (Namespace, CreatedAtInSec);
create table if not exists mlpipeline.run_metrics
(
RunUUID varchar(255) not null,
NodeID varchar(255) not null,
Name varchar(255) not null,
NumberValue double null,
Format varchar(255) null,
Payload longtext not null,
primary key (RunUUID, NodeID, Name),
constraint run_metrics_RunUUID_run_details_UUID_foreign
foreign key (RunUUID) references mlpipeline.run_details (UUID)
on update cascade on delete cascade
);
create table if not exists mlpipeline.tasks
(
UUID varchar(255) not null
primary key,
Namespace varchar(255) not null,
PipelineName varchar(255) not null,
RunUUID varchar(255) not null,
MLMDExecutionID varchar(255) not null,
CreatedTimestamp bigint not null,
FinishedTimestamp bigint null,
Fingerprint varchar(255) not null,
PodName varchar(255) not null,
StartedTimestamp bigint default 0 null,
Name varchar(255) null,
ParentTaskUUID varchar(255) null,
State varchar(255) null,
StateHistory longtext null,
MLMDInputs longtext null,
MLMDOutputs longtext null,
ChildrenPods longtext null,
Payload longtext null,
constraint tasks_RunUUID_run_details_UUID_foreign
foreign key (RunUUID) references mlpipeline.run_details (UUID)
on update cascade on delete cascade
);
EOT -
mellerikat (Kubeflow, AIConductor, Edge Conductor) 에서 사용하는 Database 및 Table 을 셋업합니다.
# path : where kubeflow-table.sql is located
mysql -h ${DB_HOST} -P ${DB_PORT} -u ${DB_ADMIN_USERNAME} -p < kubeflow-table.sql
# insert : ${DB_ADMIN_PASSWORD}
3. Kubeflow 설치하기
-
{변수} 설정을 합니다.
- NOTE : 비어있는 변수의 값은 입력해주세요
export KUBEFLOW_MANIFEST_GIT_URL=https://github.com/mellerikat/kubeflow-on-aws-manifest.git
export KUBEFLOW_RELEASE_VERSION=v1.8-branch
export KUBEFLOW_VERSION_NUM=
export AWS_CLUSTER_VERSION_NUM=
export KUBEFLOW_INSTALL_VERSION=kf${KUBEFLOW_VERSION_NUM}-eks${AWS_CLUSTER_VERSION_NUM}
export KUBEFLOW_KATIB_IMAGE=${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/ecr-repo-${AWS_DEFAULT_REGION_ALIAS}-${INFRA_NAME}-${DEPLOY_ENV}/kubeflowkatib/katib-db-manager
export KUBEFLOW_OIDC_IMAGE=${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/ecr-repo-${AWS_DEFAULT_REGION_ALIAS}-${INFRA_NAME}-${DEPLOY_ENV}/oidc-authservice
export KUBEFLOW_API_SERVER_IMAGE=${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/ecr-repo-${AWS_DEFAULT_REGION_ALIAS}-${INFRA_NAME}-${DEPLOY_ENV}/ml-pipeline/api-server
export KUBEFLOW_CACHE_SERVER_IMAGE=${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/ecr-repo-${AWS_DEFAULT_REGION_ALIAS}-${INFRA_NAME}-${DEPLOY_ENV}/ml-pipeline/cache-server
-
생성해뒀던 Docker Container에서 설치를 진행합니다.
-
kubeflow on aws manifest 를 clone 합니다.
# path : ${TOP}
git clone ${KUBEFLOW_MANIFEST_GIT_URL} -b ${KUBEFLOW_INSTALL_VERSION}
cd kubeflow-on-aws-manifest
git clone https://github.com/kubeflow/manifests.git upstream -b ${KUBEFLOW_RELEASE_VERSION} -
필요한 도구들을 설치합니다.
# path : ${TOP}/kubeflow-on-aws-manifest
# make install-tools
make install-kustomize install-yq install-python
# NOTE: If you have other versions of python installed then make sure the default is set to python3.8
alias python=python3.8 -
설치에 앞서 설정된 환경변수가 설치 파일에 입력될 수 있도록 아래 명령어를 실행합니다.
# path : ${TOP}/kubeflow-on-aws-manifest
yq e -i '.metadata.annotations."eks.amazonaws.com/role-arn" = "arn:aws:iam::'${AWS_ACCOUNT_ID}':role/role-'${INFRA_NAME}'-'${DEPLOY_ENV}'-kubeflow-secrets-manager-sa"' ai-conductor-configs/common/aws-secrets-manager/kubeflow-secrets-manager-sa.yaml
yq e -i '.spec.parameters.objects |= sub("/parameter.*s3",env(AWS_SECRETS_MANAGER_S3))' awsconfigs/common/aws-secrets-manager/s3/secret-provider.yaml
yq e -i '.spec.parameters.objects |= sub("/parameter.*rds",env(AWS_SECRETS_MANAGER_RDS))' awsconfigs/common/aws-secrets-manager/rds/secret-provider.yaml
directories=(awsconfigs/apps/pipeline/s3 awsconfigs/apps/pipeline-static/s3)
# The content to be written
content=$(printf '
bucketName=%s
minioServiceHost=%s
minioServiceRegion=%s
' "s3-${AWS_DEFAULT_REGION_ALIAS}-${INFRA_NAME}-${DEPLOY_ENV}-kubeflow" "s3.amazonaws.com" "${AWS_DEFAULT_REGION}")
# Loop through the directories and write the content to params.env
for dir in "${directories[@]}"; do
echo "$content" > "$dir/params.env"
done
sed -i "s|host.*|host: '${DB_HOST}'|g" ai-conductor-configs/apps/pipeline-static/server_config
sed -i "s|port.*|port: ${DB_PORT}|g" ai-conductor-configs/apps/pipeline-static/server_config
sed -i "s|user.*|user: '${DB_APP_USERNAME}'|g" ai-conductor-configs/apps/pipeline-static/server_config
sed -i "s|password.*|password: '${DB_APP_PASSWORD}'|g" ai-conductor-configs/apps/pipeline-static/server_config
sed -i "s|region:.*|region: '${AWS_DEFAULT_REGION}',|g" awsconfigs/apps/pipeline-static/s3/config
sed -i "s|region:.*|region: '${AWS_DEFAULT_REGION}',|g" ai-conductor-configs/apps/pipeline-static/workflow-controller-config
yq e -i '.images[].newName = env(KUBEFLOW_KATIB_IMAGE)' ai-conductor-configs/apps/katib/kustomization.yaml
yq e -i '.images[].newTag = env(KUBEFLOW_INSTALL_VERSION)' ai-conductor-configs/apps/katib/kustomization.yaml
yq e -i '.images[].newName = env(KUBEFLOW_OIDC_IMAGE)' ai-conductor-configs/common/oidc-authservice/kustomization.yaml
yq e -i '.images[].newTag = env(KUBEFLOW_INSTALL_VERSION)' ai-conductor-configs/common/oidc-authservice/kustomization.yaml
sed -i "s|SESSION_STORE_REDIS_ADDR=.*|SESSION_STORE_REDIS_ADDR=${REDIS_HOST}:${REDIS_PORT}|g" ai-conductor-configs/common/oidc-authservice/params.env
yq e -i '.images[0].newName = env(KUBEFLOW_API_SERVER_IMAGE)' ai-conductor-configs/apps/pipeline-static/kustomization.yaml
yq e -i '.images[0].newTag = env(KUBEFLOW_INSTALL_VERSION)' ai-conductor-configs/apps/pipeline-static/kustomization.yaml
yq e -i '.images[1].newName = env(KUBEFLOW_CACHE_SERVER_IMAGE)' ai-conductor-configs/apps/pipeline-static/kustomization.yaml
yq e -i '.images[1].newTag = env(KUBEFLOW_INSTALL_VERSION)' ai-conductor-configs/apps/pipeline-static/kustomization.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/secrets-store-csi-driver/v1.3.2/deploy/rbac-secretproviderclass.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/secrets-store-csi-driver/v1.3.2/deploy/csidriver.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/secrets-store-csi-driver/v1.3.2/deploy/secrets-store.csi.x-k8s.io_secretproviderclasses.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/secrets-store-csi-driver/v1.3.2/deploy/secrets-store.csi.x-k8s.io_secretproviderclasspodstatuses.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/secrets-store-csi-driver/v1.3.2/deploy/secrets-store-csi-driver.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/secrets-store-csi-driver/v1.3.2/deploy/rbac-secretprovidersyncing.yaml
kubectl apply -f https://raw.githubusercontent.com/aws/secrets-store-csi-driver-provider-aws/main/deployment/aws-provider-installer.yaml
printf '
dbHost='$DB_HOST'
mlmdDb='metadb'
' > awsconfigs/apps/pipeline/rds/params.env
printf '
dbPort='$DB_PORT'
mysqlPort='$DB_PORT'
mysqlHost='$DB_HOST'
' > ai-conductor-configs/apps/pipeline-static/params.env -
필요한 python 패키지를 설치합니다.
pip install -r tests/e2e/requirements.txt
-
Kubeflow 설치를 시작합니다.
# Set variables for Makefile.aiconductor
export CLUSTER_NAME=${AWS_CLUSTER_NAME}
export CLUSTER_REGION=${AWS_DEFAULT_REGION}
make -f Makefile.aiconductor deploy-ai-conductor INSTALLATION_OPTION=kustomize DEPLOYMENT_OPTION=rds-s3 PIPELINE_S3_CREDENTIAL_OPTION=static -
Kubeflow 설치 확인합니다.
- NOTE : kubeflow 대시보드는 AI Conductor 배포 이후에 접근 가능합니다.
kubectl get pod -n kubeflow
[Expand Kubeflow 설치 확인]
설치가 완료되면 아래와 같이 pod의 STATUS 가 'Running' 상태입니다.
NAME READY STATUS RESTARTS AGE
admission-webhook-deployment-6d5d56594c-zq8rm 1/1 Running 0 15s
aws-secrets-sync-6f5c68bc86-4n8g7 2/2 Running 0 15s
cache-server-5d594c7fd-mj7ph 2/2 Running 0 15s
centraldashboard-7fbdfffd44-hcx68 2/2 Running 0 15s
jupyter-web-app-deployment-66f5df6dcb-x66d8 2/2 Running 0 15s
katib-controller-dc948b86-mcmw6 1/1 Running 0 15s
katib-db-manager-6b5c46597b-z9k5p 1/1 Running 0 15s
katib-ui-644b544659-7tcb8 2/2 Running 0 15s
kserve-controller-manager-5887b6d64-5dlgv 2/2 Running 0 15s
kserve-models-web-app-669d78b778-dthwm 2/2 Running 0 15s
kubeflow-pipelines-profile-controller-fd55c5b8f-vb4z8 1/1 Running 0 15s
metacontroller-0 1/1 Running 0 15s
metadata-envoy-deployment-df9cc8756-wgcpg 1/1 Running 0 15s
metadata-grpc-deployment-84584b656f-rf7w2 2/2 Running 0 15s
metadata-writer-8d848b6ff-h746k 2/2 Running 0 15s
ml-pipeline-59dbd488f8-ft4p8 2/2 Running 0 15s
ml-pipeline-persistenceagent-7cc6dcd89b-xvkpp 2/2 Running 0 15s
ml-pipeline-scheduledworkflow-6d88c5665d-gjrz7 2/2 Running 0 15s
ml-pipeline-ui-774489c54b-q24ml 2/2 Running 0 15s
ml-pipeline-viewer-crd-5f45cd49c-7wkhm 2/2 Running 0 15s
ml-pipeline-visualizationserver-6744c57c9d-pshbn 2/2 Running 0 15s
notebook-controller-deployment-78668b8d48-x8zkz 2/2 Running 0 15s
profiles-deployment-6b88ffb46c-8mvlc 3/3 Running 0 15s
tensorboard-controller-deployment-74c8b8d44f-8tl4r 3/3 Running 0 15s
tensorboards-web-app-deployment-5646975d68-8sppp 2/2 Running 0 15s
training-operator-5b78f8948b-66sqt 1/1 Running 0 15s
volumes-web-app-deployment-7c4db4d478-ndc4q 2/2 Running 0 15s
workflow-controller-656cffb8b-c4q6n 2/2 Running 0 15s