You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
My deployment has six copies, respectively in different node, sometimes there will be one of the pod scheduling failure, state is CreateContainerConfigError.
After checking the kubelet log there are these errors, I don't know what the problem is
Nov 29 16:48:51 ip-10-215-52-128.us-west-2.compute.internal kubelet[4382]: E1129 08:48:51.392779 4382 kuberuntime_manager.go:1268] container &Container{Name:ws-40081,Image:hz-repo-registry.yqn.com/yqn-pr/ws-40081:20241121_1831_2cf92a7d,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:SKIP_CONSUMER_TAG,Value:1,ValueFrom:nil,},EnvVar{Name:YQN_RUN_TIME_ENV,Value:k8s,ValueFrom:nil,},EnvVar{Name:YQN_EUREKA_SERVER,Value:http://8999.pro.yqn.corp:8999,ValueFrom:nil,},EnvVar{Name:YQN_APOLLO_CLUSTER,Value:pro,ValueFrom:nil,},EnvVar{Name:YQN_APOLLO_ENV,Value:PRO,ValueFrom:nil,},EnvVar{Name:YQN_APOLLO_METADATA,Value:http://apollo-config.yqn.corp:8080,ValueFrom:nil,},EnvVar{Name:APP_NAME,Value:yqn-wms,ValueFrom:nil,},EnvVar{Name:APP_NAME_LOWER_CASE,Value:yqn-wms,ValueFrom:nil,},EnvVar{Name:APP_PORT,Value:40081,ValueFrom:nil,},EnvVar{Name:YQN_JVM_MEMORY,Value:5120m,ValueFrom:nil,},EnvVar{Name:APOLLO_ENCRYPT_JVM,Value:-Djasypt.encryptor.password=9i2x7DBpdWSe3XaJ -Djasypt.encryptor.algorithm=PBEWITHHMACSHA512ANDAES_256 -Djasypt.encryptor.key-obtention-iterations=1000 -Djasypt.encryptor.provider-name=SunJCE -Djasypt.encryptor.salt-generator-classname=org.jasypt.salt.RandomSaltGenerator -Djasypt.encryptor.iv-generator-classname=org.jasypt.iv.RandomIvGenerator -Djasypt.encryptor.string-output-type=base64,ValueFrom:nil,},EnvVar{Name:ARMS_JVM,Value:,ValueFrom:nil,},EnvVar{Name:JACOCO_JVM,Value:,ValueFrom:nil,},EnvVar{Name:SKYWALKING_JVM,Value:,ValueFrom:nil,},EnvVar{Name:SANDBOX_JVM,Value:,ValueFrom:nil,},EnvVar{Name:APOLLO_JVM,Value:-Dfile.encoding=utf-8 -Denv=PRO -Dapollo.cluster=pro -Dapollo.meta=http://apollo-config.yqn.corp:8080,ValueFrom:nil,},EnvVar{Name:YQN_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:YQN_NODE_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.hostIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:YQN_POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:YQN_POD_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:YQN_POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:YQN_POD_UID,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.uid,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:YQN_POD_SERVICE_ACCOUNT,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.serviceAccountName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{4 0} {<nil>} 4 DecimalSI},memory: {{10737418240 0} {<nil>} 10Gi BinarySI},},Requests:ResourceList{cpu: {{100 -3} {<nil>} 100m DecimalSI},memory: {{536870912 0} {<nil>} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:pvc-public-internal-oss-k8s,ReadOnly:false,MountPath:/oss/,SubPath:pro/ws-40081,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:soft-share,ReadOnly:false,MountPath:/opt/share,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mzdtc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/actuator/info,Port:{0 40081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:5,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/actuator/info,Port:{0 40081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:2,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:2,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/sh -c curl -s -X DELETE ${YQN_EUREKA_SERVER}/eureka/apps/${APP_NAME}/${YQN_POD_NAME}:${APP_NAME_LOWER_CASE}:${APP_PORT} && mkdir -p /opt/web/ws-${APP_PORT}/logs],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/sh -c curl -s -X PUT ${YQN_EUREKA_SERVER}/eureka/apps/${APP_NAME}/${YQN_POD_NAME}:${APP_NAME_LOWER_CASE}:${APP_PORT}/status?value=OUT_OF_SERVICE && sleep 60],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/actuator/info,Port:{0 40081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:50,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ws-40081-deployment-pro-5fdf4d9465-c67fh_pro(7d645628-997e-4858-b4f5-04b011f856fd): CreateContainerConfigError: failed to prepare subPath for volumeMount "pvc-public-internal-oss-k8s" of container "ws-40081"
Nov 29 16:48:51 ip-10-215-52-128.us-west-2.compute.internal kubelet[4382]: E1129 08:48:51.392803 4382 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ws-40081\" with CreateContainerConfigError: \"failed to prepare subPath for volumeMount \\\"pvc-public-internal-oss-k8s\\\" of container \\\"ws-40081\\\"\"" pod="pro/ws-40081-deployment-pro-5fdf4d9465-c67fh" podUID="7d645628-997e-4858-b4f5-04b011f856fd"
The text was updated successfully, but these errors were encountered:
Hello, thanks for creating this issue. We're going to need some more information to help debug the problem you're having.
Please could you attach the YAML you're using for your PV and PVC, and include logs from the CSI Driver by following the logging runbook. In this case, we need logs form the CSI Driver on your node
My deployment has six copies, respectively in different node, sometimes there will be one of the pod scheduling failure, state is CreateContainerConfigError.
After checking the kubelet log there are these errors, I don't know what the problem is
The text was updated successfully, but these errors were encountered: