From a88d372afbe17ec7b140bb9904e11f80c06fbbeb Mon Sep 17 00:00:00 2001 From: Rodrigo Campos Date: Wed, 25 Sep 2019 19:00:39 +0200 Subject: [PATCH] packet: Always mount node local storage on /mnt/ The rkt pod is exposed with /mnt/ volume[1] and in this PR: https://github.com/kinvolk/lokomotive-kubernetes/pull/48/files#diff-b6c7caf796cd86bdcdd936319b1793a1R152 the location where the RAID 0 array is mounted was changed from `/mnt` to `/mnt/`. This breaks using local volumes in pods, as it seems the mount is not visible for the rkt container or the kubelet running in rkt. This patch just makes the mount location back to `/mnt` for all the cases (setup_raid, setup_raid_hdd) so it works on all cases, and as those options are exclusive (only one can be set) this causes no problem at all. Other options were considered, like changing the path that is exposed to rkt container to be /mnt/node-local-storage or /mnt/node-hdd-local-storage, according to what option was used (setup_raid, setup_raid_hdd), but that was messy without any good reason. So, we decided for this, more simpler, approach. The investigation of the root cause of this issue, to understand why mounts inside mnt (like in `/mnt/node-local-storage`) are can't be used by pods and what needs to be changed for that is left as a future patch (an issue will be created). This patch is just a minimal fix, a revert, to make this work again on master. [1]: https://github.com/kinvolk/lokomotive-kubernetes/blob/d59d071a451f45ac61c2524b94a146a6cde60401/packet/flatcar-linux/kubernetes/workers/cl/worker.yaml.tmpl#L65-L66 --- .../kubernetes/workers/cl/worker.yaml.tmpl | 24 ++++++++++--------- .../kubernetes/workers/variables.tf | 6 ++--- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/packet/flatcar-linux/kubernetes/workers/cl/worker.yaml.tmpl b/packet/flatcar-linux/kubernetes/workers/cl/worker.yaml.tmpl index c6889b2b..4b6350bf 100644 --- a/packet/flatcar-linux/kubernetes/workers/cl/worker.yaml.tmpl +++ b/packet/flatcar-linux/kubernetes/workers/cl/worker.yaml.tmpl @@ -195,10 +195,9 @@ storage: if [ "$${setup_fs_on_raid}" = true ]; then mkfs.ext4 "$${device_path}" - mkdir "/mnt/$${array_name}" - mount "$${device_path}" "/mnt/$${array_name}" + mount "$${device_path}" "/mnt/" # Make mount persistent across reboots - echo "$${device_path} /mnt/$${array_name} ext4 defaults,nofail,discard 0 0" | tee -a /etc/fstab + echo "$${device_path} /mnt/ ext4 defaults,nofail,discard 0 0" | tee -a /etc/fstab fi } @@ -211,17 +210,20 @@ storage: # https://www.kernel.org/doc/Documentation/admin-guide/devices.txt major_numbers="8,259" + # XXX: These options are exclusive, as only one fs can be mounted + # to /mnt/ + # This is, partly, because when creating dirs inside /mnt to mount + # several paths (like /mnt/node-local-storage), those are not visible + # to the pods. See this issue for more info: + # https://github.com/kinvolk/lokomotive-kubernetes/issues/73 + # # Variables replaced by Terraform if [ ${setup_raid} = true ]; then create_data_raid "$${major_numbers}" -1 /dev/md/node-local-storage true - else - # Both can be set independently - if [ ${setup_raid_hdd} = true ]; then - create_data_raid "$${major_numbers}" 1 /dev/md/node-local-hdd-storage true - fi - if [ ${setup_raid_ssd} = true ]; then - create_data_raid "$${major_numbers}" 0 /dev/md/node-local-ssd-storage ${setup_raid_ssd_fs} - fi + elif [ ${setup_raid_hdd} = true ]; then + create_data_raid "$${major_numbers}" 1 /dev/md/node-local-hdd-storage true + elif [ ${setup_raid_ssd} = true ]; then + create_data_raid "$${major_numbers}" 0 /dev/md/node-local-ssd-storage ${setup_raid_ssd_fs} fi - path: /etc/kubernetes/kubeconfig filesystem: root diff --git a/packet/flatcar-linux/kubernetes/workers/variables.tf b/packet/flatcar-linux/kubernetes/workers/variables.tf index 6dd55a6a..da270320 100644 --- a/packet/flatcar-linux/kubernetes/workers/variables.tf +++ b/packet/flatcar-linux/kubernetes/workers/variables.tf @@ -80,19 +80,19 @@ EOD } variable "setup_raid" { - description = "Attempt to create a RAID 0 from extra disks to be used for persistent container storage. Valid values: \"true\", \"false\"" + description = "Attempt to create a RAID 0 from extra disks to be used for persistent container storage. Can't be used with setup_raid_hdd nor setup_raid_sdd. Valid values: \"true\", \"false\"" type = "string" default = "false" } variable "setup_raid_hdd" { - description = "Attempt to create a RAID 0 from extra Hard Disk drives only, to be used for persistent container storage. Valid values: \"true\", \"false\"" + description = "Attempt to create a RAID 0 from extra Hard Disk drives only, to be used for persistent container storage. Can't be used with setup_raid nor setup_raid_sdd. Valid values: \"true\", \"false\"" type = "string" default = "false" } variable "setup_raid_ssd" { - description = "Attempt to create a RAID 0 from extra Solid State Drives only, to be used for persistent container storage. Valid values: \"true\", \"false\"" + description = "Attempt to create a RAID 0 from extra Solid State Drives only, to be used for persistent container storage. Can't be used with setup_raid nor setup_raid_hdd. Valid values: \"true\", \"false\"" type = "string" default = "false" }