Skip to content

Commit

Permalink
adding chart evi-ai-inference-2.1.0
Browse files Browse the repository at this point in the history
  • Loading branch information
softwarerecipes committed Mar 5, 2024
1 parent 58e409d commit bddda08
Show file tree
Hide file tree
Showing 9 changed files with 367 additions and 0 deletions.
6 changes: 6 additions & 0 deletions charts/evi-ai-inference-2.1.0/Chart.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
apiVersion: v2
appVersion: 1.16.0
description: A Helm chart for Kubernetes
name: evi-ai-inference
type: application
version: 2.1.0
13 changes: 13 additions & 0 deletions charts/evi-ai-inference-2.1.0/configs/AiInference.config
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
[Service]
logDir=.
logMaxFileSize=16777216
logMaxFileCount=8
logSeverity=0
[HTTP]
address=0.0.0.0
RESTfulPort=restServicePort
gRPCPort=grpcServicePort
[Pipeline]
maxConcurrentWorkload=maxConcurrentWorkloadPerReplica
pipelineManagerPoolSize=1
maxPipelineLifetime=30
24 changes: 24 additions & 0 deletions charts/evi-ai-inference-2.1.0/configs/media_storage_configmap.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
{
"version": 1,
"name": "row data source",
"video_image": {
"address": "minIOAddress_placeholder",
"port": "minIOPort_placeholder",
"rootUser": "rootUserPath_placeholder",
"rootPassword": "rootPasswordPath_placeholder"
},
"video_image_attributes": {
"flask_server_address": "storageRestAddress_placeholder",
"flask_server_port": "storageRestPort_placeholder",
"prefix": "v1",
"media": "/media"
},
"mediatype": [
"image",
"video"
],
"datasource": [
"person",
"vehicle"
]
}
31 changes: 31 additions & 0 deletions charts/evi-ai-inference-2.1.0/templates/configmap.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# INTEL CONFIDENTIAL
#
# Copyright (C) 2022 Intel Corporation.
#
# This software and the related documents are Intel copyrighted materials, and your use of
# them is governed by the express license under which they were provided to you (License).
# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute,
# disclose or transmit this software or the related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with no express or implied warranties,
# other than those that are expressly stated in the License.

---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: {{ .Values.global.namespace.value }}
name: {{ .Values.configMap.AIInference.name }}
data:
{{ $v := .Values.configMap.AIInference.value }}
{{ (.Files.Glob $v).AsConfig | replace "restServicePort" (toString .Values.image.restfulPort) | replace "grpcServicePort" (toString .Values.image.grpcPort) | replace "maxConcurrentWorkloadPerReplica" (toString .Values.configMap.AIInference.maxConcurrentWorkloadPerReplica) | indent 2 }}

---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: {{ .Values.global.namespace.value }}
name: {{ .Values.configMap.mediaStorage.name }}
data:
{{ $v := .Values.configMap.mediaStorage.value }}
{{ (.Files.Glob $v).AsConfig | replace "minIOPort_placeholder" .Values.configMap.mediaStorage.minIOPort | replace "minIOAddress_placeholder" .Values.configMap.mediaStorage.minIOAddress | replace "storageRestPort_placeholder" .Values.configMap.hbaseStorage.storageRestPort | replace "storageRestAddress_placeholder" .Values.configMap.hbaseStorage.storageRestAddress | replace "rootUserPath_placeholder" .Values.configMap.mediaStorage.rootUserMountPath | replace "rootPasswordPath_placeholder" .Values.configMap.mediaStorage.rootPasswordMountPath | indent 2 }}
142 changes: 142 additions & 0 deletions charts/evi-ai-inference-2.1.0/templates/deployment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
# INTEL CONFIDENTIAL
#
# Copyright (C) 2022-2023 Intel Corporation.
#
# This software and the related documents are Intel copyrighted materials, and your use of
# them is governed by the express license under which they were provided to you (License).
# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute,
# disclose or transmit this software or the related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with no express or implied warranties,
# other than those that are expressly stated in the License.

---
{{- if .Values.global.namespace.create -}}
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Values.global.namespace.value }}
{{ if .Values.global.namespace.istioInjection.create }}
labels:
istio-injection: enabled
{{ end }}
{{- end -}}

---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.global.deployment.name }}
namespace: {{ .Values.global.namespace.value }}
annotations:
container.apparmor.security.beta.kubernetes.io/ai-inference: runtime/default
spec:
selector:
matchLabels:
app: ai-inference
version: v1
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: ai-inference
sidecar.istio.io/inject: "{{ .Values.istioInjection }}"
version: v1
spec:
nodeSelector:
feature.node.kubernetes.io/cpu-cpuid.AVX512VNNI: 'true'
feature.node.kubernetes.io/cpu-cpuid.AVX2: 'true'
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: {{ .Values.global.service.name }}
# tolerations:
# - key: "node"
# operator: "Equal"
# value: "hddl"
# effect: "NoSchedule"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- ai-inference
topologyKey: kubernetes.io/hostname
containers:
- name: {{ .Values.global.deployment.name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.Version }}"
command: ["/opt/run_service.sh"]
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
# readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
resources:
limits:
cpu: {{ .Values.resources.limits.cpu }}
memory: {{ .Values.resources.limits.memory }}
requests:
cpu: {{ .Values.resources.requests.cpu }}
memory: {{ .Values.resources.requests.memory }}
env:
- name: FeatureStorage_HBaseVehicleFeatureServerAddress
value: "{{ .Values.configMap.hbaseStorage.hbaseAddr }}"
- name: FeatureStorage_HBaseVehicleFeatureServerPort
value: "{{ .Values.configMap.hbaseStorage.hbasePort }}"
- name: FeatureStorage_RestControllerBaseUrl
value: "{{ .Values.configMap.hbaseStorage.storageRestAddress }}:{{ .Values.configMap.hbaseStorage.storageRestPort }}"
livenessProbe:
httpGet:
path: /healthz
port: {{ .Values.image.restfulPort }}
initialDelaySeconds: 120
periodSeconds: 60
startupProbe:
httpGet:
path: /healthz
port: {{ .Values.image.restfulPort }}
failureThreshold: 5
periodSeconds: 60
volumeMounts:
- mountPath: /dev/dri/card0
name: dri
securityContext:
privileged: true
ports:
- containerPort: {{ .Values.image.restfulPort }}
name: restful
- containerPort: {{ .Values.image.grpcPort }}
name: grpc
volumeMounts:
- mountPath: /opt/hce-core/middleware/ai/ai_inference/source/low_latency_server/AiInference.config
subPath: AiInference.config
name: config-volume
- mountPath: /opt/hce-configs/media_storage_configmap.json
subPath: media_storage_configmap.json
name: config-volume-ms
- name: media-storage-secret
mountPath: {{ .Values.configMap.mediaStorage.secretMountPath }}
readOnly: true
volumes:
- name: config-volume
configMap:
name: {{ .Values.configMap.AIInference.name }}
- name: dri
hostPath:
path: /dev/dri/card0
- name: config-volume-ms
configMap:
name: {{ .Values.configMap.mediaStorage.name }}
- name: media-storage-secret
secret:
secretName: {{ .Values.global.secret.minio.name }}
optional: true
items:
- key: rootUser
path: rootUser
- key: rootPassword
path: rootPassword
24 changes: 24 additions & 0 deletions charts/evi-ai-inference-2.1.0/templates/secret.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# INTEL CONFIDENTIAL
#
# Copyright (C) 2022 Intel Corporation.
#
# This software and the related documents are Intel copyrighted materials, and your use of
# them is governed by the express license under which they were provided to you (License).
# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute,
# disclose or transmit this software or the related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with no express or implied warranties,
# other than those that are expressly stated in the License.

---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.global.secret.minio.name }}
namespace: {{ .Values.global.namespace.value }}
labels:
app: ai-inference
type: Opaque
data:
rootUser: {{ .Values.configMap.mediaStorage.rootUser }}
rootPassword: {{ .Values.configMap.mediaStorage.rootPassword }}
34 changes: 34 additions & 0 deletions charts/evi-ai-inference-2.1.0/templates/service.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# INTEL CONFIDENTIAL
#
# Copyright (C) 2022-2023 Intel Corporation.
#
# This software and the related documents are Intel copyrighted materials, and your use of
# them is governed by the express license under which they were provided to you (License).
# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute,
# disclose or transmit this software or the related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with no express or implied warranties,
# other than those that are expressly stated in the License.

---
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.global.service.name }}
namespace: {{ .Values.global.namespace.value }}
labels:
app: ai-inference
spec:
ports:
- name: restful
port: {{ .Values.image.restfulPort }}
protocol: TCP
targetPort: {{ .Values.image.restfulPort }}
- name: grpc
port: {{ .Values.image.grpcPort }}
protocol: TCP
targetPort: {{ .Values.image.grpcPort }}
type: ClusterIP
selector:
app: ai-inference
version: v1
20 changes: 20 additions & 0 deletions charts/evi-ai-inference-2.1.0/templates/serviceaccount.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# INTEL CONFIDENTIAL
#
# Copyright (C) 2022 Intel Corporation.
#
# This software and the related documents are Intel copyrighted materials, and your use of
# them is governed by the express license under which they were provided to you (License).
# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute,
# disclose or transmit this software or the related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with no express or implied warranties,
# other than those that are expressly stated in the License.

---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.global.service.name }}
namespace: {{ .Values.global.namespace.value }}
labels:
account: ai-inference
73 changes: 73 additions & 0 deletions charts/evi-ai-inference-2.1.0/values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
# INTEL CONFIDENTIAL
#
# Copyright (C) 2022-2023 Intel Corporation.
#
# This software and the related documents are Intel copyrighted materials, and your use of
# them is governed by the express license under which they were provided to you (License).
# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute,
# disclose or transmit this software or the related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with no express or implied warranties,
# other than those that are expressly stated in the License.

---
# Default values for ai-inference.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1

global:
namespace:
create: false
value: hce-ai
istioInjection:
create: false
service:
name: evi-ai-inference
deployment:
name: evi-ai-inference
secret:
minio:
name: evi-minio-secret

istioInjection: true

configMap:
AIInference:
name: ai-inference
value: configs/AiInference.config
maxConcurrentWorkloadPerReplica: 8
hbaseStorage:
name: hbase-storage
value: configs/hbase_storage_configmap.json
hbaseAddr: "my-hbase-hbase-master.dev"
hbasePort: 9090
storageRestAddress: "storage-rest.storage-rest"
storageRestPort: "9900"
mediaStorage:
name: media-storage
value: configs/media_storage_configmap.json
rootUser: ""
rootPassword: ""
secretMountPath: /opt/hce-configs/credentials/minio
rootUserMountPath: /opt/hce-configs/credentials/minio/rootUser
rootPasswordMountPath: /opt/hce-configs/credentials/minio/rootPassword
minIOAddress: "minio-service.minio"
minIOPort: "9000"


image:
repository: ai-inference-cpu
Version: master-96802044e3c560ce54cdfb2c9d69e25819ef112a
pullPolicy: IfNotPresent
restfulPort: 50051
grpcPort: 50052

resources:
requests:
cpu: 10m
memory: 300Mi
limits:
cpu: 120
memory: 128Gi

0 comments on commit bddda08

Please sign in to comment.