diff --git a/CHANGES.txt b/CHANGES.txt
index 761b2ae..7ec9ccc 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,3 +1,12 @@
+v1.1.5
+- Added single line progress bar for the resync progress.
+- Changed latest version check to download to /tmp and extract files to the script's location replacing the existing .sh and readme.txt files.
+- Added options:
+ -a, --all List all M.2 drives even if detected as active
+ -s, --steps Show the steps to do after running this script
+ -h, --help Show this help message
+ -v, --version Show the script version
+
v1.1.4
- Now only creates the RAID array and storage pool. Volume creation can now be done in DSM.
- Because you now create the volume in DSM you can set the volume size.
diff --git a/README.md b/README.md
index 1aaa4be..039db8a 100644
--- a/README.md
+++ b/README.md
@@ -30,6 +30,14 @@ sudo -i /volume1/scripts/create_m2_volume.sh
**Note:** Replace /volume1/scripts/ with the path to where the script is located.
+**Options:**
+```YAML
+ -a, --all List all M.2 drives even if detected as active
+ -s, --steps Show the steps to do after running this script
+ -h, --help Show this help message
+ -v, --version Show the script version
+```
+
It also has a dry run mode so you can see what it would have done had you run it for real.
diff --git a/syno_create_m2_volume.sh b/syno_create_m2_volume.sh
index 25e270a..2102525 100644
--- a/syno_create_m2_volume.sh
+++ b/syno_create_m2_volume.sh
@@ -18,18 +18,41 @@
#-----------------------------------------------------------------------------------
-# TODO
-# Better detection if DSM is uisng the drive.
-# Show drive names the same as DSM does
-# Support SATA M.2 drives
-# Maybe add logging
-# Add option to repair damaged array
+# TODO
+# Change to not include the 1st selected drive in the choices for 2nd drive.
+# Better detection if DSM is using the drive.
+# Show drive names the same as DSM does.
+# Support SATA M.2 drives.
+# Maybe add logging.
+# Add option to repair damaged array.
# DONE
+# Changed latest version check to download to /tmp and extract files to the script's location,
+# replacing the existing .sh and readme.txt files.
+#
+# Added single progress bar for the resync progress.
+#
+# Added options:
+# -a, --all List all M.2 drives even if detected as active
+# -s, --steps Show the steps to do after running this script
+# -h, --help Show this help message
+# -v, --version Show the script version
+#
+# Added -s, --steps option to show required steps after running script.
+#
+# Show DSM version and NAS model (to make it easier to debug)
+# Changed for DSM 7.2 and older DSM version:
+# - For DSM 7.x
+# - Ensures m2 volume support is enabled.
+# - Creates RAID and storage pool only.
+# - For DSM 6.2.4 and earlier
+# - Creates RAID, storage pool and volume.
+#
+#
# Allow specifying the size of the volume to leave unused space for drive wear management.
#
# Instead of creating the filesystem directly on the mdraid device, you can use LVM to create a PV on it,
-# and a VG, and then use the UI to create volume(s), making it more "standard" to what DSM would do
+# and a VG, and then use the UI to create volume(s), making it more "standard" to what DSM would do.
# https://systemadmintutorial.com/how-to-configure-lvm-in-linuxpvvglv/
#
# Physical Volume (PV): Consists of Raw disks or RAID arrays or other storage devices.
@@ -37,7 +60,7 @@
# Logical Volume (LV): VG's are divided into LV's and are mounted as partitions.
-scriptver="v1.1.4"
+scriptver="v1.1.5"
script=Synology_M2_volume
repo="007revad/Synology_M2_volume"
@@ -55,6 +78,98 @@ Cyan='\e[0;36m' # ${Cyan}
Error='\e[41m' # ${Error}
Off='\e[0m' # ${Off}
+
+usage(){
+ cat < Available Pool > Online Assemble
+ 2. Create the volume as you normally would:
+ Select the new Storage Pool > Create > Create Volume
+ 3. Optionally enable TRIM:
+ Storage Pool > ... > Settings > SSD TRIM
+EOF
+ else
+ cat < Available Pool > Online Assemble
+ 2. Optionally enable TRIM:
+ Storage Pool > ... > Settings > SSD TRIM
+EOF
+ fi
+ #return
+}
+
+
+# Check for flags with getopt
+if options="$(getopt -o abcdefghijklmnopqrstuvwxyz0123456789 -a \
+ -l all,steps,help,version,log,debug -- "$@")"; then
+ eval set -- "$options"
+ while true; do
+ case "${1,,}" in
+ -a|--all) # List all M.2 drives even if detected as active
+ all=yes
+ ;;
+ -s|--steps) # Show steps remaining after running script
+ showsteps
+ exit
+ ;;
+ -h|--help) # Show usage options
+ usage
+ exit
+ ;;
+ -v|--version) # Show script version
+ scriptversion
+ exit
+ ;;
+ -l|--log) # Log
+ log=yes
+ ;;
+ --debug) # Show and log debug info
+ debug=yes
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *) # Show usage options
+ echo "Invalid option '$1'"
+ usage "$1"
+ ;;
+ esac
+ shift
+ done
+fi
+
+
# Check script is running as root
if [[ $( whoami ) != "root" ]]; then
echo -e "${Error}ERROR${Off} This script must be run as sudo or root!"
@@ -62,17 +177,34 @@ if [[ $( whoami ) != "root" ]]; then
fi
# Show script version
-echo -e "$script $scriptver\ngithub.com/$repo\n"
+#echo -e "$script $scriptver\ngithub.com/$repo\n"
+echo "$script $scriptver"
-# Get DSM major version
+# Get DSM major and minor versions
dsm=$(get_key_value /etc.defaults/VERSION majorversion)
+dsminor=$(get_key_value /etc.defaults/VERSION minorversion)
+if [[ $dsm -gt "6" ]] && [[ $dsminor -gt "1" ]]; then
+ dsm72="yes"
+fi
+
+# Get NAS model
+model=$(cat /proc/sys/kernel/syno_hw_version)
+
+# Get DSM full version
+productversion=$(get_key_value /etc.defaults/VERSION productversion)
+buildphase=$(get_key_value /etc.defaults/VERSION buildphase)
+buildnumber=$(get_key_value /etc.defaults/VERSION buildnumber)
+
+# Show DSM full version and model
+if [[ $buildphase == GM ]]; then buildphase=""; fi
+echo -e "$model DSM $productversion-$buildnumber $buildphase\n"
echo -e "Type ${Cyan}yes${Off} to continue."\
"Type anything else to do a ${Cyan}dry run test${Off}."
read -r answer
if [[ ${answer,,} != "yes" ]]; then dryrun="yes";\
- echo -e "Doing a dry run test\n"; fi
+ echo -e "Doing a dry run test\n"; else echo; fi
#------------------------------------------------------------------------------
@@ -104,7 +236,7 @@ if ! printf "%s\n%s\n" "$tag" "$scriptver" |
echo "https://github.com/$repo/releases/latest"
sleep 10
else
- echo -e "${Cyan}Do you want to download $tag now?${Off} {y/n]"
+ echo -e "${Cyan}Do you want to download $tag now?${Off} [y/n]"
read -r -t 30 reply
if [[ ${reply,,} == "y" ]]; then
if cd /tmp; then
@@ -116,19 +248,32 @@ if ! printf "%s\n%s\n" "$tag" "$scriptver" |
else
if [[ -f /tmp/$script-$shorttag.tar.gz ]]; then
# Extract tar file to script location
- if ! tar -xf "/tmp/$script-$shorttag.tar.gz" -C "$scriptpath";
+ if ! tar -xf "/tmp/$script-$shorttag.tar.gz" -C "/tmp";
then
echo -e "${Error}ERROR ${Off} Failed to"\
"extract $script-$shorttag.tar.gz!"
else
+ # Copy new files to script location
+ cp "/tmp/$script-$shorttag/CHANGES.txt" "$scriptpath"
+ cp "/tmp/$script-$shorttag/"*.sh "$scriptpath"
+
+ # Delete downloaded .tar.gz file
if ! rm "/tmp/$script-$shorttag.tar.gz"; then
+ delerr=1
+ echo -e "${Error}ERROR ${Off} Failed to delete"\
+ "downloaded /tmp/$script-$shorttag.tar.gz!"
+ fi
+ # Delete extracted tmp files
+ if ! rm -r "/tmp/$script-$shorttag"; then
+ delerr=1
echo -e "${Error}ERROR ${Off} Failed to delete"\
- "downloaded $script-$shorttag.tar.gz!"
- else
+ "downloaded /tmp/$script-$shorttag!"
+ fi
+ if [[ $delerr != 1 ]]; then
echo -e "\n$tag and changes.txt are in "\
"${Cyan}$scriptpath/$script-$shorttag${Off}"
echo -e "${Cyan}Do you want to stop this script"\
- "so you can run the new one?${Off} {y/n]"
+ "so you can run the new one?${Off} [y/n]"
read -r -t 30 reply
if [[ ${reply,,} == "y" ]]; then exit; fi
fi
@@ -150,9 +295,8 @@ fi
#--------------------------------------------------------------------
# Check there's no active resync
-#if cat /proc/mdstat | grep resync >/dev/null ; then # useless cat
if grep resync /proc/mdstat >/dev/null ; then
- echo "The Synology is currently doing a RAID resync or data scrub!" >&2
+ echo "The Synology is currently doing a RAID resync or data scrub!"
exit
fi
@@ -168,24 +312,29 @@ getm2info() {
#echo "/dev/${dev}" >&2 # debug
- if grep -E "active.*${dev}" /proc/mdstat >/dev/null ; then
- echo -e "${Cyan}Skipping drive as it is being used by DSM${Off}" >&2
- #active="yes"
- else
- if [[ -e /dev/${dev}p1 ]] && [[ -e /dev/${dev}p2 ]] &&\
- [[ -e /dev/${dev}p3 ]]; then
- echo -e "${Cyan}WARNING Drive has a volume partition${Off}" >&2
- haspartitons="yes"
- elif [[ ! -e /dev/${dev}p3 ]] && [[ ! -e /dev/${dev}p2 ]] &&\
- [[ -e /dev/${dev}p1 ]]; then
- echo -e "${Cyan}WARNING Drive has a cache partition${Off}" >&2
- haspartitons="yes"
- elif [[ ! -e /dev/${dev}p3 ]] && [[ ! -e /dev/${dev}p2 ]] &&\
- [[ ! -e /dev/${dev}p1 ]]; then
- echo "No existing partitions on drive" >&2
+ if [[ $all != "yes" ]]; then
+ # Skip listing M.2 drives detected as active
+ if grep -E "active.*${dev}" /proc/mdstat >/dev/null ; then
+ echo -e "${Cyan}Skipping drive as it is being used by DSM${Off}" >&2
+ echo "" >&2
+ #active="yes"
+ return
fi
- m2list+=("${dev}")
fi
+
+ if [[ -e /dev/${dev}p1 ]] && [[ -e /dev/${dev}p2 ]] &&\
+ [[ -e /dev/${dev}p3 ]]; then
+ echo -e "${Cyan}WARNING Drive has a volume partition${Off}" >&2
+ haspartitons="yes"
+ elif [[ ! -e /dev/${dev}p3 ]] && [[ ! -e /dev/${dev}p2 ]] &&\
+ [[ -e /dev/${dev}p1 ]]; then
+ echo -e "${Cyan}WARNING Drive has a cache partition${Off}" >&2
+ haspartitons="yes"
+ elif [[ ! -e /dev/${dev}p3 ]] && [[ ! -e /dev/${dev}p2 ]] &&\
+ [[ ! -e /dev/${dev}p1 ]]; then
+ echo "No existing partitions on drive" >&2
+ fi
+ m2list+=("${dev}")
echo "" >&2
}
@@ -398,7 +547,7 @@ fi
# Let user confirm their choices
if [[ $m22 ]]; then
- echo -e "Ready to create ${Cyan}RAID $raidtype ${Off} volume"\
+ echo -e "Ready to create ${Cyan}RAID $raidtype${Off} volume"\
"group using ${Cyan}$m21${Off} and ${Cyan}$m22${Off}"
else
echo -e "Ready to create volume group on ${Cyan}$m21${Off}"
@@ -408,6 +557,10 @@ if [[ $haspartitons == "yes" ]]; then
echo -e "\n${Red}WARNING${Off} Everything on the selected"\
"M.2 drive(s) will be deleted."
fi
+if [[ $dryrun == "yes" ]]; then
+ echo -e " *** Not really because we're doing"\
+ "a ${Cyan}dry run${Off} ***"
+fi
echo -e "Type ${Cyan}yes${Off} to continue. Type anything else to quit."
read -r answer
if [[ ${answer,,} != "yes" ]]; then exit; fi
@@ -474,7 +627,7 @@ fi
#if [[ $raidtype ]]; then
if [[ $m21 ]] && [[ $m22 ]]; then
- echo -e "\nCreating the RAID array. This can take 10 minutes or more..."
+ echo -e "\nCreating the RAID array. This can take an hour..."
if [[ $dryrun == "yes" ]]; then
echo "mdadm --create /dev/md${nextmd} --level=${raidtype} --raid-devices=2"\
"--force /dev/${m21}p3 /dev/${m22}p3" # dryrun
@@ -482,10 +635,8 @@ if [[ $m21 ]] && [[ $m22 ]]; then
mdadm --create /dev/md"${nextmd}" --level="${raidtype}" --raid-devices=2\
--force /dev/"${m21}"p3 /dev/"${m22}"p3
fi
- resyncsleep=5
else
- # I assume single drive is --level=1 --raid-devices=1 ?
- echo -e "\nCreating single drive device."
+ echo -e "\nCreating single drive RAID."
if [[ $dryrun == "yes" ]]; then
echo "mdadm --create /dev/md${nextmd} --level=1 --raid-devices=1"\
"--force /dev/${m21}p3" # dryrun
@@ -493,14 +644,19 @@ else
mdadm --create /dev/md${nextmd} --level=1 --raid-devices=1\
--force /dev/"${m21}"p3
fi
- resyncsleep=30
fi
-# Show resync progress every 30 seconds
+# Show resync progress every 5 seconds
while grep resync /proc/mdstat >/dev/null; do
- grep -E -A 2 active.*nvme /proc/mdstat | grep resync | cut -d"(" -f1
- sleep "$resyncsleep"
+ # Only multi-drive RAID gets re-synced
+ progress="$(grep -E -A 2 active.*nvme /proc/mdstat | grep resync | cut -d\( -f1 )"
+ echo -ne "$progress\r"
+ sleep 5
done
+# Show 100% progress
+if [[ $m21 ]] && [[ $m22 ]]; then
+ echo -ne " [=====================] resync = 100%\r"
+fi
#--------------------------------------------------------------------
@@ -523,23 +679,56 @@ else
fi
+#--------------------------------------------------------------------
+# Enable m2 volume support - DSM 7.2 and later only
+
+# Backup synoinfo.conf if needed
+if [[ $dsm72 == "yes" ]]; then
+ synoinfo="/etc.defaults/synoinfo.conf"
+ if [[ ! -f ${synoinfo}.bak ]]; then
+ if cp "$synoinfo" "$synoinfo.bak"; then
+ echo -e "\nBacked up $(basename -- "$synoinfo")" >&2
+ else
+ echo -e "\n${Error}ERROR 5${Off} Failed to backup $(basename -- "$synoinfo")!"
+ exit 1
+ fi
+ fi
+fi
+
+# Check if m2 volume support is enabled
+if [[ $dsm72 == "yes" ]]; then
+ smp=support_m2_pool
+ setting="$(get_key_value "$synoinfo" "$smp")"
+ enabled=""
+ if [[ ! $setting ]]; then
+ # Add support_m2_pool"yes"
+ echo 'support_m2_pool="yes"' >> "$synoinfo"
+ enabled="yes"
+ elif [[ $setting == "no" ]]; then
+ # Change support_m2_pool"no" to "yes"
+ sed -i "s/${smp}=\"no\"/${smp}=\"yes\"/" "$synoinfo"
+ enabled="yes"
+ elif [[ $setting == "yes" ]]; then
+ echo -e "\nM.2 volume support already enabled."
+ fi
+
+ # Check if we enabled m2 volume support
+ setting="$(get_key_value "$synoinfo" "$smp")"
+ if [[ $enabled == "yes" ]]; then
+ if [[ $setting == "yes" ]]; then
+ echo -e "\nEnabled M.2 volume support."
+ else
+ echo -e "\n${Error}ERROR${Off} Failed to enable m2 volume support!"
+ fi
+ fi
+fi
+
+
#--------------------------------------------------------------------
# Notify of remaining steps
-echo -e "\n${Cyan}Remaining steps you need to do${Off}"
-cat < Available Pool > Online Assemble
- 2. Create the volume:
- Select the new Storage Pool > Create > Create Volume
- 3. Set the allocated size to max, or 7% less for overprovisioning.
- 4. Optionally enter a volume description. Be creative :)
- Click Next
- 5. Select the file system: Btrfs or ext4.
- Click Next and you've finished creating your volume.
- 6. Then, optionally, enable TRIM:
- Storage Pool > ... > Settings > SSD TRIM
-EOF
+echo
+showsteps # Show the final steps to do in DSM
#--------------------------------------------------------------------
@@ -553,9 +742,8 @@ if [[ ${answer,,} != "yes" ]]; then exit; fi
if [[ $dryrun == "yes" ]]; then
echo "reboot" # dryrun
else
- # Reboot in the background so user can DSM's "shutting down" message
- #reboot & # not working
- reboot
+ # Reboot in the background so user can see DSM's "going down" message
+ reboot &
fi