forked from ceph/ceph-nvmeof
-
Notifications
You must be signed in to change notification settings - Fork 0
/
.env
77 lines (69 loc) · 2.83 KB
/
.env
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# Globals
VERSION="1.1.0"
CEPH_VERSION="18.2.2"
SPDK_VERSION="23.01.1"
CONTAINER_REGISTRY="quay.io/ceph"
QUAY_SPDK="${CONTAINER_REGISTRY}/spdk"
QUAY_CEPH="${CONTAINER_REGISTRY}/vstart-cluster"
QUAY_NVMEOF="${CONTAINER_REGISTRY}/nvmeof"
QUAY_NVMEOFCLI="${CONTAINER_REGISTRY}/nvmeof-cli"
MAINTAINER="Ceph Developers <[email protected]>"
COMPOSE_PROJECT_NAME="ceph-nvmeof"
NVMEOF_CONTAINER_NAME="${COMPOSE_PROJECT_NAME}-nvmeof-1"
# Performance
NVMEOF_NOFILE=20480 # Max number of open files (depends on number of hosts connected)
HUGEPAGES=2048 # 4 GB
HUGEPAGES_DIR="/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages"
# NVMe-oF
NVMEOF_VERSION="${VERSION}"
NVMEOF_CONFIG="./ceph-nvmeof.conf"
NVMEOF_SPDK_VERSION="${SPDK_VERSION}"
NVMEOF_CEPH_VERSION="${CEPH_VERSION}"
NVMEOF_NAME="ceph-nvmeof"
NVMEOF_SUMMARY="Ceph NVMe over Fabrics Gateway"
NVMEOF_DESCRIPTION="Service to provide block storage on top of Ceph for platforms (e.g.: VMWare) without native Ceph support (RBD), replacing existing approaches (iSCSI) with a newer and more versatile standard (NVMe-oF)."
NVMEOF_URL="https://github.com/ceph/ceph-nvmeof"
NVMEOF_TAGS="ceph,nvme-of,nvme-of gateway,rbd,block storage"
NVMEOF_WANTS="ceph,rbd"
NVMEOF_IP_ADDRESS=192.168.13.3
NVMEOF_IPV6_ADDRESS=2001:db8::3
NVMEOF_IO_PORT=4420
NVMEOF_GW_PORT=5500
NVMEOF_DISC_PORT=8009
NVMEOF_PROMETHEUS_PORT=10008
NVMEOF_EXPOSE_SERVICES="${NVMEOF_IO_PORT}/tcp:nvme,${NVMEOF_GW_PORT}/tcp:grpc,${NVMEOF_DISC_PORT}/tcp:nvme-disc,${NVMEOF_PROMETHEUS_PORT}/tcp:prom"
# NVMe-oF CLI
MVMEOF_CLI_VERSION="${VERSION}"
NVMEOF_CLI_NAME="ceph-nvmeof-cli"
NVMEOF_CLI_SUMMARY="Ceph NVMe over Fabrics CLI"
NVMEOF_CLI_DESCRIPTION="Command line interface for Ceph NVMe over Fabrics Gateway"
# SPDK
SPDK_CEPH_VERSION="${CEPH_VERSION}"
SPDK_NAME="SPDK"
SPDK_SUMMARY="Build Ultra High-Performance Storage Applications with the Storage Performance Development Kit"
SPDK_DESCRIPTION="The Storage Performance Development Kit (SPDK) provides a set of tools and libraries for writing high performance, scalable, user-mode storage applications"
SPDK_URL="https://spdk.io"
SPDK_PKGDEP_ARGS="--rbd"
# check spdk/configure --help
SPDK_CONFIGURE_ARGS="--with-rbd --disable-tests --disable-unit-tests --disable-examples --enable-debug"
SPDK_TARGET_ARCH="x86-64-v2"
SPDK_MAKEFLAGS=
SPDK_CENTOS_BASE="https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/Packages/"
SPDK_CENTOS_REPO_VER="9.0-21.el9"
# Ceph Cluster
CEPH_CLUSTER_VERSION="${CEPH_VERSION}"
CEPH_BRANCH=ceph-nvmeof-mon
CEPH_SHA=b1eac7a453942845208a803ce5af51183f69d68d
CEPH_VSTART_ARGS="--memstore"
CEPH_DEVEL_MGR_PATH=../ceph
# Demo settings
RBD_POOL=rbd
RBD_IMAGE_NAME=demo_image
RBD_IMAGE_SIZE=10MiB
BDEV_NAME=demo_bdev
NQN="nqn.2016-06.io.spdk:cnode1"
SERIAL="SPDK00000000000001"
# Container names in docker compose environent
DISC1="ceph-nvmeof_discovery_1"
GW1="ceph-nvmeof_nvmeof_1"
GW2="ceph-nvmeof_nvmeof_2"