kubeflow pipeline Failed to execute component: unable to get pipeline with PipelineName - kubeflow

Install follow https://github.com/kubeflow/manifests in v1.4.1
KFP version: 1.7.0
KFP SDK version: build version dev_local
k3s Kubernetes 1.19
use demo example to add pipline
kfp 1.8.10
kfp-pipeline-spec 0.1.13
kfp-server-api 1.7.1
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import kfp
import kfp.dsl as dsl
from kfp.v2.dsl import component
from kfp import compiler
#component(
base_image="library/python:3.7"
)
def add(a: float, b: float) -> float:
'''Calculates sum of two arguments'''
return a + b
#dsl.pipeline(
name='v2add',
description='An example pipeline that performs addition calculations.',
# pipeline_root='gs://my-pipeline-root/example-pipeline'
)
def add_pipeline(a: float = 1, b: float = 7):
add_task = add(a, b)
compiler.Compiler(
mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE,
launcher_image='library/gcr.io/ml-pipeline/kfp-launcher:1.8.7'
).compile(pipeline_func=add_pipeline, package_path='pipeline.yaml')
I upload the pipeline.yaml and start a run get error
logs
I1231 10:12:23.830486 1 launcher.go:144] PipelineRoot defaults to "minio://mlpipeline/v2/artifacts".
I1231 10:12:23.830866 1 cache.go:143] Cannot detect ml-pipeline in the same namespace, default to ml-pipeline.kubeflow:8887 as KFP endpoint.
I1231 10:12:23.830880 1 cache.go:120] Connecting to cache endpoint ml-pipeline.kubeflow:8887
F1231 10:12:23.832000 1 main.go:50] Failed to execute component: unable to get pipeline with PipelineName "pipeline/v2add" PipelineRunID "7e2bdeeb-aa6f-4109-a508-63a1be22267c": Failed GetContextByTypeAndName(type="system.Pipeline", name="pipeline/v2add")
pod
kind: Pod
apiVersion: v1
metadata:
name: v2add-rzrht-37236994
namespace: kubeflow-user-example-com
selfLink: /api/v1/namespaces/kubeflow-user-example-com/pods/v2add-rzrht-37236994
uid: 3ceb73e5-80b5-4844-8cc8-8f2bf61319d2
resourceVersion: '28824661'
creationTimestamp: '2021-12-31T10:12:21Z'
labels:
pipeline/runid: 7e2bdeeb-aa6f-4109-a508-63a1be22267c
pipelines.kubeflow.org/cache_enabled: 'true'
pipelines.kubeflow.org/enable_caching: 'true'
pipelines.kubeflow.org/kfp_sdk_version: 1.8.10
pipelines.kubeflow.org/pipeline-sdk-type: kfp
pipelines.kubeflow.org/v2_component: 'true'
workflows.argoproj.io/completed: 'true'
workflows.argoproj.io/workflow: v2add-rzrht
annotations:
pipelines.kubeflow.org/arguments.parameters: '{"a": "1", "b": "7"}'
pipelines.kubeflow.org/component_ref: '{}'
pipelines.kubeflow.org/v2_component: 'true'
sidecar.istio.io/inject: 'false'
workflows.argoproj.io/node-name: v2add-rzrht.add
workflows.argoproj.io/outputs: >-
{"artifacts":[{"name":"add-Output","path":"/tmp/outputs/Output/data"},{"name":"main-logs","s3":{"key":"artifacts/v2add-rzrht/2021/12/31/v2add-rzrht-37236994/main.log"}}]}
workflows.argoproj.io/template: >-
{"name":"add","inputs":{"parameters":[{"name":"a","value":"1"},{"name":"b","value":"7"},{"name":"pipeline-name","value":"pipeline/v2add"},{"name":"pipeline-root","value":""}]},"outputs":{"artifacts":[{"name":"add-Output","path":"/tmp/outputs/Output/data"}]},"metadata":{"annotations":{"pipelines.kubeflow.org/arguments.parameters":"{\"a\":
\"1\", \"b\":
\"7\"}","pipelines.kubeflow.org/component_ref":"{}","pipelines.kubeflow.org/v2_component":"true","sidecar.istio.io/inject":"false"},"labels":{"pipelines.kubeflow.org/cache_enabled":"true","pipelines.kubeflow.org/enable_caching":"true","pipelines.kubeflow.org/kfp_sdk_version":"1.8.10","pipelines.kubeflow.org/pipeline-sdk-type":"kfp","pipelines.kubeflow.org/v2_component":"true"}},"container":{"name":"","image":"library/python:3.7","command":["/kfp-launcher/launch","--mlmd_server_address","$(METADATA_GRPC_SERVICE_HOST)","--mlmd_server_port","$(METADATA_GRPC_SERVICE_PORT)","--runtime_info_json","$(KFP_V2_RUNTIME_INFO)","--container_image","$(KFP_V2_IMAGE)","--task_name","add","--pipeline_name","pipeline/v2add","--run_id","$(KFP_RUN_ID)","--run_resource","workflows.argoproj.io/$(WORKFLOW_ID)","--namespace","$(KFP_NAMESPACE)","--pod_name","$(KFP_POD_NAME)","--pod_uid","$(KFP_POD_UID)","--pipeline_root","","--enable_caching","$(ENABLE_CACHING)","--","a=1","b=7","--"],"args":["sh","-c","\nif
! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip || python3
-m ensurepip --user || apt-get install
python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install
--quiet --no-warn-script-location 'kfp==1.8.10' \u0026\u0026 \"$0\"
\"$#\"\n","sh","-ec","program_path=$(mktemp -d)\nprintf \"%s\" \"$0\"
\u003e \"$program_path/ephemeral_component.py\"\npython3 -m
kfp.v2.components.executor_main
--component_module_path
\"$program_path/ephemeral_component.py\"
\"$#\"\n","\nimport kfp\nfrom kfp.v2 import dsl\nfrom kfp.v2.dsl import
*\nfrom typing import *\n\ndef add(a: float, b: float) -\u003e float:\n
'''Calculates sum of two arguments'''\n return a +
b\n\n","--executor_input","{{$}}","--function_to_execute","add"],"envFrom":[{"configMapRef":{"name":"metadata-grpc-configmap","optional":true}}],"env":[{"name":"KFP_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"KFP_POD_UID","valueFrom":{"fieldRef":{"fieldPath":"metadata.uid"}}},{"name":"KFP_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}},{"name":"WORKFLOW_ID","valueFrom":{"fieldRef":{"fieldPath":"metadata.labels['workflows.argoproj.io/workflow']"}}},{"name":"KFP_RUN_ID","valueFrom":{"fieldRef":{"fieldPath":"metadata.labels['pipeline/runid']"}}},{"name":"ENABLE_CACHING","valueFrom":{"fieldRef":{"fieldPath":"metadata.labels['pipelines.kubeflow.org/enable_caching']"}}},{"name":"KFP_V2_IMAGE","value":"library/python:3.7"},{"name":"KFP_V2_RUNTIME_INFO","value":"{\"inputParameters\":
{\"a\": {\"type\": \"DOUBLE\"}, \"b\": {\"type\": \"DOUBLE\"}},
\"inputArtifacts\": {}, \"outputParameters\": {\"Output\": {\"type\":
\"DOUBLE\", \"path\": \"/tmp/outputs/Output/data\"}}, \"outputArtifacts\":
{}}"}],"resources":{},"volumeMounts":[{"name":"kfp-launcher","mountPath":"/kfp-launcher"}]},"volumes":[{"name":"kfp-launcher"}],"initContainers":[{"name":"kfp-launcher","image":"library/gcr.io/ml-pipeline/kfp-launcher:1.8.7","command":["launcher","--copy","/kfp-launcher/launch"],"resources":{},"mirrorVolumeMounts":true}],"archiveLocation":{"archiveLogs":true,"s3":{"endpoint":"minio-service.kubeflow:9000","bucket":"mlpipeline","insecure":true,"accessKeySecret":{"name":"mlpipeline-minio-artifact","key":"accesskey"},"secretKeySecret":{"name":"mlpipeline-minio-artifact","key":"secretkey"},"key":"artifacts/v2add-rzrht/2021/12/31/v2add-rzrht-37236994"}}}
ownerReferences:
- apiVersion: argoproj.io/v1alpha1
kind: Workflow
name: v2add-rzrht
uid: 9a806b04-d5fa-49eb-9e46-7502bc3e7ac5
controller: true
blockOwnerDeletion: true
managedFields:
- manager: workflow-controller
operation: Update
apiVersion: v1
time: '2021-12-31T10:12:21Z'
fieldsType: FieldsV1
fieldsV1:
'f:metadata':
'f:annotations':
.: {}
'f:pipelines.kubeflow.org/arguments.parameters': {}
'f:pipelines.kubeflow.org/component_ref': {}
'f:pipelines.kubeflow.org/v2_component': {}
'f:sidecar.istio.io/inject': {}
'f:workflows.argoproj.io/node-name': {}
'f:workflows.argoproj.io/template': {}
'f:labels':
.: {}
'f:pipeline/runid': {}
'f:pipelines.kubeflow.org/cache_enabled': {}
'f:pipelines.kubeflow.org/enable_caching': {}
'f:pipelines.kubeflow.org/kfp_sdk_version': {}
'f:pipelines.kubeflow.org/pipeline-sdk-type': {}
'f:pipelines.kubeflow.org/v2_component': {}
'f:workflows.argoproj.io/completed': {}
'f:workflows.argoproj.io/workflow': {}
'f:ownerReferences':
.: {}
'k:{"uid":"9a806b04-d5fa-49eb-9e46-7502bc3e7ac5"}':
.: {}
'f:apiVersion': {}
'f:blockOwnerDeletion': {}
'f:controller': {}
'f:kind': {}
'f:name': {}
'f:uid': {}
'f:spec':
'f:containers':
'k:{"name":"main"}':
.: {}
'f:args': {}
'f:command': {}
'f:env':
.: {}
'k:{"name":"ARGO_CONTAINER_NAME"}':
.: {}
'f:name': {}
'f:value': {}
'k:{"name":"ARGO_INCLUDE_SCRIPT_OUTPUT"}':
.: {}
'f:name': {}
'f:value': {}
'k:{"name":"ENABLE_CACHING"}':
.: {}
'f:name': {}
'f:valueFrom':
.: {}
'f:fieldRef':
.: {}
'f:apiVersion': {}
'f:fieldPath': {}
'k:{"name":"KFP_NAMESPACE"}':
.: {}
'f:name': {}
'f:valueFrom':
.: {}
'f:fieldRef':
.: {}
'f:apiVersion': {}
'f:fieldPath': {}
'k:{"name":"KFP_POD_NAME"}':
.: {}
'f:name': {}
'f:valueFrom':
.: {}
'f:fieldRef':
.: {}
'f:apiVersion': {}
'f:fieldPath': {}
'k:{"name":"KFP_POD_UID"}':
.: {}
'f:name': {}
'f:valueFrom':
.: {}
'f:fieldRef':
.: {}
'f:apiVersion': {}
'f:fieldPath': {}
'k:{"name":"KFP_RUN_ID"}':
.: {}
'f:name': {}
'f:valueFrom':
.: {}
'f:fieldRef':
.: {}
'f:apiVersion': {}
'f:fieldPath': {}
'k:{"name":"KFP_V2_IMAGE"}':
.: {}
'f:name': {}
'f:value': {}
'k:{"name":"KFP_V2_RUNTIME_INFO"}':
.: {}
'f:name': {}
'f:value': {}
'k:{"name":"WORKFLOW_ID"}':
.: {}
'f:name': {}
'f:valueFrom':
.: {}
'f:fieldRef':
.: {}
'f:apiVersion': {}
'f:fieldPath': {}
'f:envFrom': {}
'f:image': {}
'f:imagePullPolicy': {}
'f:name': {}
'f:resources': {}
'f:terminationMessagePath': {}
'f:terminationMessagePolicy': {}
'f:volumeMounts':
.: {}
'k:{"mountPath":"/kfp-launcher"}':
.: {}
'f:mountPath': {}
'f:name': {}
'k:{"name":"wait"}':
.: {}
'f:command': {}
'f:env':
.: {}
'k:{"name":"ARGO_CONTAINER_NAME"}':
.: {}
'f:name': {}
'f:value': {}
'k:{"name":"ARGO_CONTAINER_RUNTIME_EXECUTOR"}':
.: {}
'f:name': {}
'f:value': {}
'k:{"name":"ARGO_INCLUDE_SCRIPT_OUTPUT"}':
.: {}
'f:name': {}
'f:value': {}
'k:{"name":"ARGO_POD_NAME"}':
.: {}
'f:name': {}
'f:valueFrom':
.: {}
'f:fieldRef':
.: {}
'f:apiVersion': {}
'f:fieldPath': {}
'k:{"name":"GODEBUG"}':
.: {}
'f:name': {}
'f:value': {}
'f:image': {}
'f:imagePullPolicy': {}
'f:name': {}
'f:resources':
.: {}
'f:limits':
.: {}
'f:cpu': {}
'f:memory': {}
'f:requests':
.: {}
'f:cpu': {}
'f:memory': {}
'f:terminationMessagePath': {}
'f:terminationMessagePolicy': {}
'f:volumeMounts':
.: {}
'k:{"mountPath":"/argo/podmetadata"}':
.: {}
'f:mountPath': {}
'f:name': {}
'k:{"mountPath":"/argo/secret/mlpipeline-minio-artifact"}':
.: {}
'f:mountPath': {}
'f:name': {}
'f:readOnly': {}
'k:{"mountPath":"/mainctrfs/kfp-launcher"}':
.: {}
'f:mountPath': {}
'f:name': {}
'k:{"mountPath":"/var/run/docker.sock"}':
.: {}
'f:mountPath': {}
'f:name': {}
'f:readOnly': {}
'f:dnsPolicy': {}
'f:enableServiceLinks': {}
'f:initContainers':
.: {}
'k:{"name":"kfp-launcher"}':
.: {}
'f:command': {}
'f:env':
.: {}
'k:{"name":"ARGO_CONTAINER_NAME"}':
.: {}
'f:name': {}
'f:value': {}
'k:{"name":"ARGO_INCLUDE_SCRIPT_OUTPUT"}':
.: {}
'f:name': {}
'f:value': {}
'f:image': {}
'f:imagePullPolicy': {}
'f:name': {}
'f:resources': {}
'f:terminationMessagePath': {}
'f:terminationMessagePolicy': {}
'f:volumeMounts':
.: {}
'k:{"mountPath":"/kfp-launcher"}':
.: {}
'f:mountPath': {}
'f:name': {}
'f:restartPolicy': {}
'f:schedulerName': {}
'f:securityContext': {}
'f:serviceAccount': {}
'f:serviceAccountName': {}
'f:terminationGracePeriodSeconds': {}
'f:volumes':
.: {}
'k:{"name":"docker-sock"}':
.: {}
'f:hostPath':
.: {}
'f:path': {}
'f:type': {}
'f:name': {}
'k:{"name":"kfp-launcher"}':
.: {}
'f:emptyDir': {}
'f:name': {}
'k:{"name":"mlpipeline-minio-artifact"}':
.: {}
'f:name': {}
'f:secret':
.: {}
'f:defaultMode': {}
'f:items': {}
'f:secretName': {}
'k:{"name":"podmetadata"}':
.: {}
'f:downwardAPI':
.: {}
'f:defaultMode': {}
'f:items': {}
'f:name': {}
- manager: k3s
operation: Update
apiVersion: v1
time: '2021-12-31T10:12:24Z'
fieldsType: FieldsV1
fieldsV1:
'f:status':
'f:conditions':
'k:{"type":"ContainersReady"}':
.: {}
'f:lastProbeTime': {}
'f:lastTransitionTime': {}
'f:message': {}
'f:reason': {}
'f:status': {}
'f:type': {}
'k:{"type":"Initialized"}':
.: {}
'f:lastProbeTime': {}
'f:lastTransitionTime': {}
'f:status': {}
'f:type': {}
'k:{"type":"Ready"}':
.: {}
'f:lastProbeTime': {}
'f:lastTransitionTime': {}
'f:message': {}
'f:reason': {}
'f:status': {}
'f:type': {}
'f:containerStatuses': {}
'f:hostIP': {}
'f:initContainerStatuses': {}
'f:phase': {}
'f:podIP': {}
'f:podIPs':
.: {}
'k:{"ip":"10.42.0.101"}':
.: {}
'f:ip': {}
'f:startTime': {}
- manager: argoexec
operation: Update
apiVersion: v1
time: '2021-12-31T10:12:25Z'
fieldsType: FieldsV1
fieldsV1:
'f:metadata':
'f:annotations':
'f:workflows.argoproj.io/outputs': {}
status:
phase: Failed
conditions:
- type: Initialized
status: 'True'
lastProbeTime: null
lastTransitionTime: '2021-12-31T10:12:23Z'
- type: Ready
status: 'False'
lastProbeTime: null
lastTransitionTime: '2021-12-31T10:12:21Z'
reason: ContainersNotReady
message: 'containers with unready status: [wait main]'
- type: ContainersReady
status: 'False'
lastProbeTime: null
lastTransitionTime: '2021-12-31T10:12:21Z'
reason: ContainersNotReady
message: 'containers with unready status: [wait main]'
- type: PodScheduled
status: 'True'
lastProbeTime: null
lastTransitionTime: '2021-12-31T10:12:21Z'
hostIP: 10.19.64.214
podIP: 10.42.0.101
podIPs:
- ip: 10.42.0.101
startTime: '2021-12-31T10:12:21Z'
initContainerStatuses:
- name: kfp-launcher
state:
terminated:
exitCode: 0
reason: Completed
startedAt: '2021-12-31T10:12:22Z'
finishedAt: '2021-12-31T10:12:22Z'
containerID: >-
docker://fbf8b39a3bab8065b54e9a3b25a678e07e0880ef61f9e78abe92f9fa205a73c4
lastState: {}
ready: true
restartCount: 0
image: 'library/gcr.io/ml-pipeline/kfp-launcher:1.8.7'
imageID: >-
docker-pullable://library/gcr.io/ml-pipeline/kfp-launcher#sha256:8b3f14d468a41c319e95ef4047b7823c64480fd1980c3d5b369c8412afbc684f
containerID: >-
docker://fbf8b39a3bab8065b54e9a3b25a678e07e0880ef61f9e78abe92f9fa205a73c4
containerStatuses:
- name: main
state:
terminated:
exitCode: 1
reason: Error
startedAt: '2021-12-31T10:12:23Z'
finishedAt: '2021-12-31T10:12:23Z'
containerID: >-
docker://26faae59907e5a4207960ee9d15d9d350587c5be7db31c3e8f0ec97e72c6d2cf
lastState: {}
ready: false
restartCount: 0
image: 'python:3.7'
imageID: >-
docker-pullable://python#sha256:3908249ce6b2d28284e3610b07bf406c3035bc2e3ce328711a2b42e1c5a75fc1
containerID: >-
docker://26faae59907e5a4207960ee9d15d9d350587c5be7db31c3e8f0ec97e72c6d2cf
started: false
- name: wait
state:
terminated:
exitCode: 1
reason: Error
message: >-
path /tmp/outputs/Output/data does not exist in archive
/tmp/argo/outputs/artifacts/add-Output.tgz
startedAt: '2021-12-31T10:12:23Z'
finishedAt: '2021-12-31T10:12:25Z'
containerID: >-
docker://66b6306eb81ac2abb1fbf2609d7375a00f92891f1c827680a45962cbb1ec3c0a
lastState: {}
ready: false
restartCount: 0
image: 'library/gcr.io/ml-pipeline/argoexec:v3.1.6-patch-license-compliance'
imageID: >-
docker-pullable://library/gcr.io/ml-pipeline/argoexec#sha256:44cf8455a51aa5b961d1a86f65e39adf5ffca9bdcd33a745c3b79f430b7439e0
containerID: >-
docker://66b6306eb81ac2abb1fbf2609d7375a00f92891f1c827680a45962cbb1ec3c0a
started: false
qosClass: Burstable
spec:
volumes:
- name: podmetadata
downwardAPI:
items:
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: docker-sock
hostPath:
path: /var/run/docker.sock
type: Socket
- name: kfp-launcher
emptyDir: {}
- name: mlpipeline-minio-artifact
secret:
secretName: mlpipeline-minio-artifact
items:
- key: accesskey
path: accesskey
- key: secretkey
path: secretkey
defaultMode: 420
- name: default-editor-token-8lmfr
secret:
secretName: default-editor-token-8lmfr
defaultMode: 420
initContainers:
- name: kfp-launcher
image: 'library/gcr.io/ml-pipeline/kfp-launcher:1.8.7'
command:
- launcher
- '--copy'
- /kfp-launcher/launch
env:
- name: ARGO_CONTAINER_NAME
value: kfp-launcher
- name: ARGO_INCLUDE_SCRIPT_OUTPUT
value: 'false'
resources: {}
volumeMounts:
- name: kfp-launcher
mountPath: /kfp-launcher
- name: default-editor-token-8lmfr
readOnly: true
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
containers:
- name: wait
image: 'library/gcr.io/ml-pipeline/argoexec:v3.1.6-patch-license-compliance'
command:
- argoexec
- wait
- '--loglevel'
- info
env:
- name: ARGO_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: ARGO_CONTAINER_RUNTIME_EXECUTOR
value: docker
- name: GODEBUG
value: x509ignoreCN=0
- name: ARGO_CONTAINER_NAME
value: wait
- name: ARGO_INCLUDE_SCRIPT_OUTPUT
value: 'false'
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 10m
memory: 32Mi
volumeMounts:
- name: podmetadata
mountPath: /argo/podmetadata
- name: docker-sock
readOnly: true
mountPath: /var/run/docker.sock
- name: mlpipeline-minio-artifact
readOnly: true
mountPath: /argo/secret/mlpipeline-minio-artifact
- name: kfp-launcher
mountPath: /mainctrfs/kfp-launcher
- name: default-editor-token-8lmfr
readOnly: true
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
- name: main
image: 'library/python:3.7'
command:
- /kfp-launcher/launch
- '--mlmd_server_address'
- $(METADATA_GRPC_SERVICE_HOST)
- '--mlmd_server_port'
- $(METADATA_GRPC_SERVICE_PORT)
- '--runtime_info_json'
- $(KFP_V2_RUNTIME_INFO)
- '--container_image'
- $(KFP_V2_IMAGE)
- '--task_name'
- add
- '--pipeline_name'
- pipeline/v2add
- '--run_id'
- $(KFP_RUN_ID)
- '--run_resource'
- workflows.argoproj.io/$(WORKFLOW_ID)
- '--namespace'
- $(KFP_NAMESPACE)
- '--pod_name'
- $(KFP_POD_NAME)
- '--pod_uid'
- $(KFP_POD_UID)
- '--pipeline_root'
- ''
- '--enable_caching'
- $(ENABLE_CACHING)
- '--'
- a=1
- b=7
- '--'
args:
- sh
- '-c'
- >
if ! [ -x "$(command -v pip)" ]; then
python3 -m ensurepip || python3 -m ensurepip --user || apt-get install python3-pip
fi
PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet
--no-warn-script-location 'kfp==1.8.10' && "$0" "$#"
- sh
- '-ec'
- >
program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.v2.components.executor_main
--component_module_path
"$program_path/ephemeral_component.py" "$#"
- |+
import kfp
from kfp.v2 import dsl
from kfp.v2.dsl import *
from typing import *
def add(a: float, b: float) -> float:
'''Calculates sum of two arguments'''
return a + b
- '--executor_input'
- '{{$}}'
- '--function_to_execute'
- add
envFrom:
- configMapRef:
name: metadata-grpc-configmap
optional: true
env:
- name: KFP_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: KFP_POD_UID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.uid
- name: KFP_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: WORKFLOW_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: 'metadata.labels[''workflows.argoproj.io/workflow'']'
- name: KFP_RUN_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: 'metadata.labels[''pipeline/runid'']'
- name: ENABLE_CACHING
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: 'metadata.labels[''pipelines.kubeflow.org/enable_caching'']'
- name: KFP_V2_IMAGE
value: 'library/python:3.7'
- name: KFP_V2_RUNTIME_INFO
value: >-
{"inputParameters": {"a": {"type": "DOUBLE"}, "b": {"type":
"DOUBLE"}}, "inputArtifacts": {}, "outputParameters": {"Output":
{"type": "DOUBLE", "path": "/tmp/outputs/Output/data"}},
"outputArtifacts": {}}
- name: ARGO_CONTAINER_NAME
value: main
- name: ARGO_INCLUDE_SCRIPT_OUTPUT
value: 'false'
resources: {}
volumeMounts:
- name: kfp-launcher
mountPath: /kfp-launcher
- name: default-editor-token-8lmfr
readOnly: true
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Never
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
serviceAccountName: default-editor
serviceAccount: default-editor
nodeName: iz1bb01rvtheuakv3h25ntz
securityContext: {}
schedulerName: default-scheduler
tolerations:
- key: node.kubernetes.io/not-ready
operator: Exists
effect: NoExecute
tolerationSeconds: 300
- key: node.kubernetes.io/unreachable
operator: Exists
effect: NoExecute
tolerationSeconds: 300
priority: 0
enableServiceLinks: true
preemptionPolicy: PreemptLowerPriority
I don't know why it can't find the PipelineName?

Related

Error creating new user or granting for user permissions on Rancher

I'm having problems with creating an account on Rancher. When creating a new account I get the following error:
Internal error occurred: failed calling webhook "rancherauth.cattle.io": Post "https://rancher-webhook.cattle-system.svc:443/v1/webhook/validation?timeout=10s"
Detail:
Internal error occurred: failed calling webhook "rancherauth.cattle.io":
Post "https://rancher-webhook.cattle-system.svc:443/v1/webhook/validation?timeout=10s":
dial tcp 10.43.163.117:443: connect: connection refused
I'm use Rancher version v2.5.13.
Thank you,
Peter
This solved the problem for me.
Looks like deployment rancher-webhook in namespace cattle-system was removed for some reason.
You need to go to cluster local ==> project system ==> namespace cattle-system and check that again.
If deployment rancher-webhook does not exist, you can recreate it by importing yaml file contents from another rancher (go to item Import YAML from the rancher menu - top-right corner of the image) or you have to reinstall rancher to get deployment rancher-webhook.
This is the yaml file which I use:
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "2"
meta.helm.sh/release-name: rancher-webhook
meta.helm.sh/release-namespace: cattle-system
generation: 2
labels:
app.kubernetes.io/managed-by: Helm
managedFields:
- apiVersion: apps/v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
.: {}
f:meta.helm.sh/release-name: {}
f:meta.helm.sh/release-namespace: {}
f:labels:
.: {}
f:app.kubernetes.io/managed-by: {}
f:spec:
f:progressDeadlineSeconds: {}
f:replicas: {}
f:revisionHistoryLimit: {}
f:selector:
f:matchLabels:
.: {}
f:app: {}
f:strategy:
f:rollingUpdate:
.: {}
f:maxSurge: {}
f:maxUnavailable: {}
f:type: {}
f:template:
f:metadata:
f:labels:
.: {}
f:app: {}
f:spec:
f:containers:
k:{"name":"rancher-webhook"}:
.: {}
f:env:
.: {}
k:{"name":"NAMESPACE"}:
.: {}
f:name: {}
f:valueFrom:
.: {}
f:fieldRef:
.: {}
f:apiVersion: {}
f:fieldPath: {}
f:image: {}
f:imagePullPolicy: {}
f:name: {}
f:ports:
.: {}
k:{"containerPort":9443,"protocol":"TCP"}:
.: {}
f:containerPort: {}
f:name: {}
f:protocol: {}
f:resources: {}
f:terminationMessagePath: {}
f:terminationMessagePolicy: {}
f:dnsPolicy: {}
f:restartPolicy: {}
f:schedulerName: {}
f:securityContext: {}
f:serviceAccount: {}
f:serviceAccountName: {}
f:terminationGracePeriodSeconds: {}
manager: Go-http-client
operation: Update
time: "2021-07-22T19:25:06Z"
- apiVersion: apps/v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
f:deployment.kubernetes.io/revision: {}
f:status:
f:availableReplicas: {}
f:conditions:
.: {}
k:{"type":"Available"}:
.: {}
f:lastTransitionTime: {}
f:lastUpdateTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
k:{"type":"Progressing"}:
.: {}
f:lastTransitionTime: {}
f:lastUpdateTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
f:observedGeneration: {}
f:readyReplicas: {}
f:replicas: {}
f:updatedReplicas: {}
manager: k3s
operation: Update
time: "2022-06-23T03:38:49Z"
name: rancher-webhook
namespace: cattle-system
resourceVersion: "291873445"
selfLink: /apis/apps/v1/namespaces/cattle-system/deployments/rancher-webhook
uid: 9c9d68eb-1b0d-4371-9d02-a733c22d036c
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: rancher-webhook
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: rancher-webhook
spec:
containers:
- env:
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: rancher/rancher-webhook:v0.1.4
imagePullPolicy: IfNotPresent
name: rancher-webhook
ports:
- containerPort: 9443
name: https
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: rancher-webhook
serviceAccountName: rancher-webhook
terminationGracePeriodSeconds: 30
Note: If you copied the yaml of deployment rancher-webhook file from another rancher, remove the status section of the yaml file.
Thanks!

Jenkins + kubernetes plugin + connection issues with k8s api

We have installed jenkins inside a kubernetes cluster via a helm chart: https://github.com/jenkinsci/helm-charts
Kubernetes server version: 1.21.6
Installed helm chart version: jenkins helm chart: 3.8.9
jenkins version image: "jenkins/jenkins:2.303.3-alpine"
There are some issues with the underlying k8s api connection. Sometimes it works and sometimes we get k8s api error response:
Error testing connection https://kubernetes.default:
java.io.FileNotFoundException: /var/jenkins_home/.kube/config (No such
file or directory)
The jenkins helm chart does the autowring of jenkins serviceaccount + roles + rolebindings + jenkins secret token. So there is no need of a /var/jenkins_home/.kube/config file.
I'm a bit at a loss what the precise issue is.
Here are some logs:
The k8s api error:
Autoconfiguring Kubernetes client
Nov 25, 2021 1:02:43 PM FINE org.csanchez.jenkins.plugins.kubernetes.KubernetesFactoryAdapter
Creating Kubernetes client: KubernetesFactoryAdapter [serviceAddress=, namespace=kring-jenkins, caCertData=null, credentials=null, skipTlsVerify=false, connectTimeout=0, readTimeout=0]
Nov 25, 2021 1:02:43 PM FINE org.csanchez.jenkins.plugins.kubernetes.KubernetesFactoryAdapter
Proxy Settings for Cloud: false
Nov 25, 2021 1:02:43 PM FINE org.csanchez.jenkins.plugins.kubernetes.KubernetesCloud
Error testing connection
java.io.FileNotFoundException: /var/jenkins_home/.kube/config (No such file or directory)
at java.base/java.io.FileInputStream.open0(Native Method)
at java.base/java.io.FileInputStream.open(Unknown Source)
at java.base/java.io.FileInputStream.<init>(Unknown Source)
at com.fasterxml.jackson.dataformat.yaml.YAMLFactory.createParser(YAMLFactory.java:354)
at com.fasterxml.jackson.dataformat.yaml.YAMLFactory.createParser(YAMLFactory.java:15)
at com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:3495)
at io.fabric8.kubernetes.client.internal.KubeConfigUtils.parseConfig(KubeConfigUtils.java:42)
at io.fabric8.kubernetes.client.utils.TokenRefreshInterceptor.intercept(TokenRefreshInterceptor.java:44)
at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:147)
at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:121)
at io.fabric8.kubernetes.client.utils.ImpersonatorInterceptor.intercept(ImpersonatorInterceptor.java:68)
at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:147)
at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:121)
at io.fabric8.kubernetes.client.utils.HttpClientUtils.lambda$createApplicableInterceptors$6(HttpClientUtils.java:284)
at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:147)
at okhttp3.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:121)
at okhttp3.RealCall.getResponseWithInterceptorChain(RealCall.java:257)
at okhttp3.RealCall.execute(RealCall.java:93)
at io.fabric8.kubernetes.client.dsl.base.OperationSupport.handleResponse(OperationSupport.java:541)
at io.fabric8.kubernetes.client.dsl.base.OperationSupport.handleResponse(OperationSupport.java:504)
at io.fabric8.kubernetes.client.dsl.base.OperationSupport.handleResponse(OperationSupport.java:487)
at io.fabric8.kubernetes.client.dsl.base.BaseOperation.listRequestHelper(BaseOperation.java:163)
Caused: io.fabric8.kubernetes.client.KubernetesClientException: Operation: [list] for kind: [Pod] with name: [null] in namespace: [kring-jenkins] failed.
at io.fabric8.kubernetes.client.KubernetesClientException.launderThrowable(KubernetesClientException.java:64)
at io.fabric8.kubernetes.client.KubernetesClientException.launderThrowable(KubernetesClientException.java:72)
at io.fabric8.kubernetes.client.dsl.base.BaseOperation.listRequestHelper(BaseOperation.java:170)
at io.fabric8.kubernetes.client.dsl.base.BaseOperation.list(BaseOperation.java:672)
at io.fabric8.kubernetes.client.dsl.base.BaseOperation.list(BaseOperation.java:86)
at org.csanchez.jenkins.plugins.kubernetes.KubernetesCloud$DescriptorImpl.doTestConnection(KubernetesCloud.java:730)
at java.base/java.lang.invoke.MethodHandle.invokeWithArguments(Unknown Source)
at org.kohsuke.stapler.Function$MethodFunction.invoke(Function.java:393)
at org.kohsuke.stapler.Function$InstanceFunction.invoke(Function.java:405)
at org.kohsuke.stapler.interceptor.RequirePOST$Processor.invoke(RequirePOST.java:77)
at org.kohsuke.stapler.PreInvokeInterceptedFunction.invoke(PreInvokeInterceptedFunction.java:26)
at org.kohsuke.stapler.Function.bindAndInvoke(Function.java:208)
at org.kohsuke.stapler.Function.bindAndInvokeAndServeResponse(Function.java:141)
at org.kohsuke.stapler.MetaClass$11.doDispatch(MetaClass.java:536)
at org.kohsuke.stapler.NameBasedDispatcher.dispatch(NameBasedDispatcher.java:58)
at org.kohsuke.stapler.Stapler.tryInvoke(Stapler.java:766)
at org.kohsuke.stapler.Stapler.invoke(Stapler.java:898)
at org.kohsuke.stapler.MetaClass$4.doDispatch(MetaClass.java:281)
at org.kohsuke.stapler.NameBasedDispatcher.dispatch(NameBasedDispatcher.java:58)
at org.kohsuke.stapler.Stapler.tryInvoke(Stapler.java:766)
at org.kohsuke.stapler.Stapler.invoke(Stapler.java:898)
at org.kohsuke.stapler.Stapler.invoke(Stapler.java:694)
at org.kohsuke.stapler.Stapler.service(Stapler.java:240)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:790)
at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:799)
at org.eclipse.jetty.servlet.ServletHandler$ChainEnd.doFilter(ServletHandler.java:1626)
at hudson.util.PluginServletFilter$1.doFilter(PluginServletFilter.java:156)
at jenkins.security.ResourceDomainFilter.doFilter(ResourceDomainFilter.java:80)
at hudson.util.PluginServletFilter$1.doFilter(PluginServletFilter.java:153)
at jenkins.telemetry.impl.UserLanguages$AcceptLanguageFilter.doFilter(UserLanguages.java:128)
at hudson.util.PluginServletFilter$1.doFilter(PluginServletFilter.java:153)
at jenkins.metrics.impl.MetricsFilter.doFilter(MetricsFilter.java:125)
at hudson.util.PluginServletFilter$1.doFilter(PluginServletFilter.java:153)
at hudson.util.PluginServletFilter.doFilter(PluginServletFilter.java:159)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1601)
at hudson.security.csrf.CrumbFilter.doFilter(CrumbFilter.java:153)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1601)
at hudson.security.ChainedServletFilter$1.doFilter(ChainedServletFilter.java:92)
at jenkins.security.AcegiSecurityExceptionFilter.doFilter(AcegiSecurityExceptionFilter.java:52)
at hudson.security.ChainedServletFilter$1.doFilter(ChainedServletFilter.java:97)
at hudson.security.UnwrapSecurityExceptionFilter.doFilter(UnwrapSecurityExceptionFilter.java:53)
at hudson.security.ChainedServletFilter$1.doFilter(ChainedServletFilter.java:97)
at org.springframework.security.web.access.ExceptionTranslationFilter.doFilter(ExceptionTranslationFilter.java:121)
at org.springframework.security.web.access.ExceptionTranslationFilter.doFilter(ExceptionTranslationFilter.java:115)
at hudson.security.ChainedServletFilter$1.doFilter(ChainedServletFilter.java:97)
at org.springframework.security.web.authentication.AnonymousAuthenticationFilter.doFilter(AnonymousAuthenticationFilter.java:105)
at hudson.security.ChainedServletFilter$1.doFilter(ChainedServletFilter.java:97)
at org.springframework.security.web.authentication.rememberme.RememberMeAuthenticationFilter.doFilter(RememberMeAuthenticationFilter.java:101)
at org.springframework.security.web.authentication.rememberme.RememberMeAuthenticationFilter.doFilter(RememberMeAuthenticationFilter.java:92)
at hudson.security.ChainedServletFilter$1.doFilter(ChainedServletFilter.java:97)
at org.springframework.security.web.authentication.AbstractAuthenticationProcessingFilter.doFilter(AbstractAuthenticationProcessingFilter.java:218)
at org.springframework.security.web.authentication.AbstractAuthenticationProcessingFilter.doFilter(AbstractAuthenticationProcessingFilter.java:212)
at hudson.security.ChainedServletFilter$1.doFilter(ChainedServletFilter.java:97)
at jenkins.security.BasicHeaderProcessor.doFilter(BasicHeaderProcessor.java:97)
at hudson.security.ChainedServletFilter$1.doFilter(ChainedServletFilter.java:97)
at org.springframework.security.web.context.SecurityContextPersistenceFilter.doFilter(SecurityContextPersistenceFilter.java:110)
at org.springframework.security.web.context.SecurityContextPersistenceFilter.doFilter(SecurityContextPersistenceFilter.java:80)
at hudson.security.HttpSessionContextIntegrationFilter2.doFilter(HttpSessionContextIntegrationFilter2.java:62)
at hudson.security.ChainedServletFilter$1.doFilter(ChainedServletFilter.java:97)
at hudson.security.ChainedServletFilter.doFilter(ChainedServletFilter.java:109)
at hudson.security.HudsonFilter.doFilter(HudsonFilter.java:171)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1601)
at org.kohsuke.stapler.compression.CompressionFilter.doFilter(CompressionFilter.java:51)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1601)
at hudson.util.CharacterEncodingFilter.doFilter(CharacterEncodingFilter.java:85)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1601)
at org.kohsuke.stapler.DiagnosticThreadNameFilter.doFilter(DiagnosticThreadNameFilter.java:30)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1601)
at jenkins.security.SuspiciousRequestFilter.doFilter(SuspiciousRequestFilter.java:35)
at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193)
at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1601)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:548)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:578)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:235)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1624)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:233)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1434)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:188)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:501)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1594)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:186)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1349)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.Server.handle(Server.java:516)
at org.eclipse.jetty.server.HttpChannel.lambda$handle$1(HttpChannel.java:388)
at org.eclipse.jetty.server.HttpChannel.dispatch(HttpChannel.java:633)
at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:380)
at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:277)
at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:311)
at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:105)
at org.eclipse.jetty.io.ChannelEndPoint$1.run(ChannelEndPoint.java:104)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:338)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:315)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:131)
at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:386)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
at java.base/java.lang.Thread.run(Unknown Source)
Deployed helm chart
USER-SUPPLIED VALUES:
agent:
alwaysPullImage: false
componentName: jenkins-agent
containerCap: 5
customJenkinsLabels: []
defaultsProviderTemplate: ""
enabled: true
envVars:
- name: JENKINS_URL
value: http://jenkins.kring-jenkins.svc.cluster.local:8080/
- name: DIND
value: "false"
image: jenkins/inbound-agent
imagePullSecretName: null
jenkinsTunnel: jenkins-agent.kring-jenkins.svc.cluster.local:50000
jenkinsUrl: http://jenkins.kring-jenkins.svc.cluster.local:8080
kubernetesConnectTimeout: 90
kubernetesReadTimeout: 90
maxRequestsPerHostStr: "5"
namespace: kring-jenkins
nodeSelector: {}
nodeUsageMode: NORMAL
podRetention: Never
privileged: true
resources:
limits:
cpu: 512m
memory: 1024Mi
requests:
cpu: 256m
memory: 256Mi
runAsGroup: null
runAsUser: 1000
showRawYaml: true
tag: 4.11-1
volumes:
- hostPath: /var/run/docker.sock
mountPath: /var/run/docker.sock
type: HostPath
websocket: false
workingDir: /home/jenkins/agent
checkDeprecation: true
clusterZone: cluster.local
controller:
JCasC:
configScripts:
global-security: |
jenkins:
securityRealm:
local:
allowsSignup: false
enableCaptcha: false
users:
- id: ${ADMIN_USER}
password: ${ADMIN_PASSWORD}
- id: jenkins-job-builder
password: jenkins-job-builder
- id: operator
password: operator
authorizationStrategy:
loggedInUsersCanDoAnything:
allowAnonymousRead: false
jobs: |
jobs:
- script: >
folder('devops');
pipelineJob('devops/jenkins-job-creator') {
definition {
triggers {
scm('* * * * *')
}
cpsScm {
scm {
git {
remote {
url('git#bitbucket.org:kg_kring/jenkins-jobs.git')
credentials('k8s-secret-ssh-git')
}
branches('master')
scriptPath('Jenkinsfile')
}
}
}
}
}
unclassified: |
unclassified:
globalLibraries:
libraries:
- name: "jenkins-utils"
defaultVersion: "master"
includeInChangeSets: false
retriever:
modernSCM:
scm:
git:
id: "jenkins-utils"
remote: "git#bitbucket.org:kg_kring/jenkins-utils.git"
credentialsId: "k8s-secret-ssh-git"
bitbucketBuildStatusNotifier:
globalCredentialsId: "k8s-secret-bitbucket-status-notifier"
slackNotifier:
teamDomain: groeipakketteam # i.e. <worskpace-name>.slack.com
tokenCredentialId: k8s-secret-slack-token
welcome-message: |
jenkins:
systemMessage: Welkom op de Jenkins server van het KRING team!
enabled: true
adminSecret: true
adminUser: admin
agentListenerEnabled: true
agentListenerHostPort: null
agentListenerPort: 50000
agentListenerServiceType: ClusterIP
authorizationStrategy: |-
<authorizationStrategy class="hudson.security.FullControlOnceLoggedInAuthorizationStrategy">
<denyAnonymousReadAccess>true</denyAnonymousReadAccess>
</authorizationStrategy>
componentName: jenkins-master
csrf:
defaultCrumbIssuer:
enabled: true
proxyCompatability: true
enableXmlConfig: true
image: jenkins/jenkins
imagePullPolicy: Always
ingress:
annotations:
ingress.kubernetes.io/custom-request-headers: 'X-Forwarded-Proto: https||X-Forwarded-Port
443'
apiVersion: networking.k8s.io/v1
enabled: true
hostName: kring-jenkins.groeipakketapplicatie.be
tls: null
installPlugins:
- configuration-as-code:1.54
- workflow-durable-task-step:2.40
- command-launcher:1.6
- credentials-binding:1.27
- git:4.10.0
- jdk-tool:1.5
- job-dsl:1.77
- kubernetes:1.30.11
- mailer:1.34
- matrix-auth:2.6.8
- ssh-agent:1.23
- warnings-ng:9.8.0
- workflow-aggregator:2.6
- workflow-job:2.42
- bitbucket-build-status-notifier:1.4.2
- kubernetes-credentials-provider:0.20
- authorize-project:1.4.0
- slack:2.48
- parameterized-scheduler:1.0
- uno-choice:2.5.7
- scriptler:3.4
javaOpts: -Dorg.apache.commons.jelly.tags.fmt.timeZone=Europe/Brussels -Dorg.csanchez.jenkins.plugins.kubernetes.PodTemplate.connectionTimeout=60
-Dorg.csanchez.jenkins.plugins.kubernetes.pipeline.ContainerExecDecorator.websocketConnectionTimeout=60
jenkinsUrl: https://kring-jenkins.groeipakketapplicatie.be
jenkinsUrlProtocol: https
nodePort: 8080
overwritePlugins: true
resources:
limits:
cpu: 4000m
memory: 8192Mi
requests:
cpu: 200m
memory: 256Mi
scriptApproval:
- method com.cloudbees.jenkins.plugins.sshcredentials.SSHUserPrivateKey getPrivateKey
- method com.cloudbees.plugins.credentials.common.IdCredentials getId
- method java.lang.Process waitFor
- staticMethod com.cloudbees.plugins.credentials.CredentialsProvider lookupCredentials
java.lang.Class hudson.model.ItemGroup
- staticMethod jenkins.model.Jenkins getInstance
- staticMethod org.codehaus.groovy.runtime.DefaultGroovyMethods execute java.util.List
- staticMethod org.codehaus.groovy.runtime.DefaultGroovyMethods size java.lang.CharSequence
- staticMethod org.codehaus.groovy.runtime.ProcessGroovyMethods consumeProcessOutput
java.lang.Process java.lang.Appendable java.lang.Appendable
securityRealm: <securityRealm class="hudson.security.LegacySecurityRealm"/>
servicePort: 8080
serviceType: ClusterIP
sideCars:
configAutoReload:
enabled: true
folder: /var/jenkins_home/casc_configs
image: kiwigrid/k8s-sidecar:1.14.2
imagePullPolicy: Always
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
sshTcpPort: 1044
tag: 2.303.3-alpine
targetPort: 8080
persistence:
accessMode: ReadWriteOnce
enabled: true
existingClaim: jenkins
size: 100Gi
storageClass: managed-nfs-storage-default
rbac:
create: true
readSecrets: true
serviceAccount:
create: true

Set position of columns in Beamer (R markdown)

I am creating a RMarkdown presentation using Szeged as beamer type. Does anyone know how to set the position of the columns so that there is less space between them? I have the following output:
the code:
---
title: |
| Text
author: |
| Name
| email
|
date: date
output:
beamer_presentation:
theme: Szeged
slide_level: 2
includes:
in_header: header.tex
keep_tex: true
linkcolor: false
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = FALSE)
```
# Workshop
## section
\footnotesize
:::::::::::::: {.columns}
::: {.column}
Text 1
- option 1
- option 1
- option 1
- option 1
:::
::: {.column}
Text 2
- option 1
- option 1
- option 1
- option 1
:::
::: {.column}
Text 3
- option 1
- option 1
- option 1
- option 1
:::
::::::::::::::
But I would the columns to have less space between them so that they are more central, otherwise the text does not fit. Any ideas?
You can set the width of the columns:
---
title: |
| Text
author: |
| Name
| email
|
date: date
output:
beamer_presentation:
theme: Szeged
slide_level: 2
includes:
in_header: header.tex
keep_tex: true
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = FALSE)
```
# Workshop
## section
\footnotesize
:::::::::::::: {.columns totalwidth=\textwidth}
::: {.column width="30%"}
Text 1
- option 1
- option 1
- option 1
- option 1
:::
::: {.column width="30%"}
Text 2
- option 1
- option 1
- option 1
- option 1
:::
::: {.column width="30%"}
Text 3
- option 1
- option 1
- option 1
- option 1
:::
::::::::::::::

I am losing my mind with py2neo: node in the graph but not in the graph

So this is a graph that contains network devices
Preexisting nodes were added and now I am trying to add more nodes and add relationships to the graph. What am I doing wrong. At the bottom of the second block of code the error message is saying the node is not in this graph but as you can see the node is listed as present
matcher=NodeMatcher(db)
nodes=matcher.match()
for node in nodes:
print (node)
node1=matcher.match(name="mxxx103")
print (node1)
node2=matcher.match(name='mxxxcvss01')
print(node2)
for rel in db.relationships.match((node1,node2)):
print (rel)
And the output when running the above code
(_9787:Device {model: 'ASR1000', name: 'mxxx103', scanned: 'Yes'})
(_9788:Device {model: 'ASR1000', name: 'lxxx100', scanned: 'Yes'})
(_9789:Device {model: 'ASR1000', name: 'mxxx100', scanned: 'Yes'})
(_9790:Device {model: 'ASR1000', name: 'txxx100', scanned: 'Yes'})
(_9791:Device {model: 'ASR1000', name: 'mxxx101', scanned: 'Yes'})
(_9792:Device {model: 'ASR1000', name: 'mxxx102', scanned: 'Yes'})
(_9793:Device {model: 'ASR1000', name: 'txxx101', scanned: 'Yes'})
(_9794:Device {model: 'ASR1000', name: 'lxxx101', scanned: 'Yes'})
(_9795:Device {model: 'ASR1000', name: 'cxxx100', scanned: 'Yes'})
(_9796:Device {model: 'ASR1000', name: 'cxxx101', scanned: 'Yes'})
(_9797:Device {capabilities: 'R S I', model: 'WS-C4500X', name: 'mxxxcvss01'})
<py2neo.matching.NodeMatch object at 0x02CCB870>
<py2neo.matching.NodeMatch object at 0x02CCBCD0>
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-135-653e282922e7> in <module>
7 node2=matcher.match(name='mxxxcvss01')
8 print(node2)
----> 9 for rel in db.relationships.match((node1,node2)):
10 print (rel)
C:\Utils\WPy3.6 -32-Qt5\python-3.6.7\lib\site-packages\py2neo\matching.py in __iter__(self)
266 """ Iterate through all matching relationships.
267 """
--> 268 query, parameters = self._query_and_parameters()
269 for record in self.graph.run(query, parameters):
270 yield record[0]
C:\Utils\WPy3.6 -32-Qt5\python-3.6.7\lib\site-packages\py2neo\matching.py in _query_and_parameters(self, count)
311 if len(self._nodes) >= 1 and self._nodes[0] is not None:
312 start_node = Node.cast(self._nodes[0])
--> 313 verify_node(start_node)
314 clauses.append("MATCH (a) WHERE id(a) = {x}")
315 parameters["x"] = start_node.identity
C:\Utils\WPy3.6 -32-Qt5\python-3.6.7\lib\site-packages\py2neo\matching.py in verify_node(n)
288 def verify_node(n):
289 if n.graph != self.graph:
--> 290 raise ValueError("Node %r does not belong to this graph" % n)
291 if n.identity is None:
292 raise ValueError("Node %r is not bound to a graph" % n)
ValueError: Node ({model: 'ASR1000', name: 'mxxx103', scanned: 'Yes'}) does not belong to this graph
OK I managed to find the mistake, it seems that I need to look again and again over the return of each method and the data types py2neo uses
The below code worked. My mistake was to believe that the node.match returns a node. That is not the case. The below code worked
matcher=NodeMatcher(db)
nodes=matcher.match()
for node in nodes:
print (node)
node1=matcher.match(name="mdc103")
list (node1)
node2=matcher.match(name='mdccvss01')
list(node2)
type(node1)
node1 = db.evaluate('MATCH (x) WHERE x.name="mxxx103" RETURN(x)')
print(node1)
node2 = db.evaluate('MATCH (x) WHERE x.name="mxxxcvss01" RETURN(x)')
print(node2)
for rel in db.relationships.match((node1,node2)):
print (rel)

Why I get error: on_load_function_failed,re2

When I try to start the deployed app, I get this error:
initial_call: {supervisor,kernel,['Argument__1']}
pid: <0.1762.0>
registered_name: []
error_info: {exit,{on_load_function_failed,re2},[{gen_server,init_it,6,[{file,"gen_server.erl"},{line,352}]},{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,247}]}]}
ancestors: [kernel_sup,<0.1738.0>]
messages: []
links: [<0.1739.0>]
dictionary: []
trap_exit: true
status: running
heap_size: 376
stack_size: 27
reductions: 117
2017-06-19 11:51:18 supervisor_report
supervisor: {local,kernel_sup}
errorContext: start_error
reason: {on_load_function_failed,re2}
offender: [{pid,undefined},{id,kernel_safe_sup},{mfargs,{supervisor,start_link,[{local,kernel_safe_sup},kernel,safe]}},{restart_type,permanent},{shutdown,infinity},{child_type,supervisor}]
2017-06-19 11:51:19 crash_report
initial_call: {application_master,init,['Argument__1','Argument__2','Argument__3','Argument__4']}
pid: <0.1737.0>
registered_name: []
error_info: {exit,{{shutdown,{failed_to_start_child,kernel_safe_sup,{on_load_function_failed,re2}}},{kernel,start,[normal,[]]}},[{application_master,init,4,[{file,"application_master.erl"},{line,134}]},{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,247}]}]}
ancestors: [<0.1736.0>]
messages: [{'EXIT',<0.1738.0>,normal}]
links: [<0.1736.0>,<0.1735.0>]
dictionary: []
trap_exit: true
status: running
heap_size: 376
stack_size: 27
reductions: 152
2017-06-19 11:51:19 std_info
application: kernel
exited: {{shutdown,{failed_to_start_child,kernel_safe_sup,{on_load_function_failed,re2}}},{kernel,start,[normal,[]]}}
type: permanent
{"Kernel pid terminated",application_controller,"{application_start_failure,kernel,{{shutdown,{failed_to_start_child,kernel_safe_sup,{on_load_function_failed,re2}}},{kernel,start,[normal,[]]}}}"}
Kernel pid terminated (application_controller) ({application_start_failure,kernel,{{shutdown,{failed_to_start_child,kernel_safe_sup,{on_load_function_failed,re2}}},{kernel,start,[normal,[]]}}})
But I have no idea how to solve this error.. any idea?
Phoenix app start:
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [
# Start the endpoint when the application starts
supervisor(MyApp.Endpoint, []),
# Start the endpoint for schools
supervisor(MyApp.Schools.Endpoint, []),
# Start the Ecto repository
supervisor(MyApp.Schools.Repo, []),
supervisor(MyApp.Repo, []),
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end

Resources