mirror of https://github.com/tteck/Proxmox.git
Update esphome_container.sh
This commit is contained in:
parent
0e08cc81aa
commit
edf9f9b2ae
|
@ -9,13 +9,13 @@ while true; do
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
# Setup script environment
|
set -o errexit
|
||||||
set -o errexit #Exit immediately if a pipeline returns a non-zero status
|
set -o errtrace
|
||||||
set -o errtrace #Trap ERR from shell functions, command substitutions, and commands from subshell
|
set -o nounset
|
||||||
set -o nounset #Treat unset variables as an error
|
set -o pipefail
|
||||||
set -o pipefail #Pipe will exit with last non-zero status if applicable
|
|
||||||
shopt -s expand_aliases
|
shopt -s expand_aliases
|
||||||
alias die='EXIT=$? LINE=$LINENO error_exit'
|
alias die='EXIT=$? LINE=$LINENO error_exit'
|
||||||
|
CHECKMARK='\033[0;32m\xE2\x9C\x94\033[0m'
|
||||||
trap die ERR
|
trap die ERR
|
||||||
trap cleanup EXIT
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
@ -73,13 +73,10 @@ function load_module() {
|
||||||
TEMP_DIR=$(mktemp -d)
|
TEMP_DIR=$(mktemp -d)
|
||||||
pushd $TEMP_DIR >/dev/null
|
pushd $TEMP_DIR >/dev/null
|
||||||
|
|
||||||
# Download setup script
|
|
||||||
wget -qL https://raw.githubusercontent.com/tteck/Proxmox/main/setup/esphome_setup.sh
|
wget -qL https://raw.githubusercontent.com/tteck/Proxmox/main/setup/esphome_setup.sh
|
||||||
|
|
||||||
# Detect modules and automatically load at boot
|
|
||||||
load_module overlay
|
load_module overlay
|
||||||
|
|
||||||
# Select storage location
|
|
||||||
while read -r line; do
|
while read -r line; do
|
||||||
TAG=$(echo $line | awk '{print $1}')
|
TAG=$(echo $line | awk '{print $1}')
|
||||||
TYPE=$(echo $line | awk '{printf "%-10s", $2}')
|
TYPE=$(echo $line | awk '{printf "%-10s", $2}')
|
||||||
|
@ -106,14 +103,13 @@ else
|
||||||
fi
|
fi
|
||||||
info "Using '$STORAGE' for storage location."
|
info "Using '$STORAGE' for storage location."
|
||||||
|
|
||||||
# Get the next guest VM/LXC ID
|
|
||||||
CTID=$(pvesh get /cluster/nextid)
|
CTID=$(pvesh get /cluster/nextid)
|
||||||
info "Container ID is $CTID."
|
info "Container ID is $CTID."
|
||||||
|
|
||||||
# Download latest Debian 11 LXC template
|
echo -e "${CHECKMARK} \e[1;92m Updating LXC Template List... \e[0m"
|
||||||
msg "Updating LXC template list..."
|
|
||||||
pveam update >/dev/null
|
pveam update >/dev/null
|
||||||
msg "Downloading LXC template..."
|
|
||||||
|
echo -e "${CHECKMARK} \e[1;92m Downloading LXC Template... \e[0m"
|
||||||
OSTYPE=debian
|
OSTYPE=debian
|
||||||
OSVERSION=${OSTYPE}-11
|
OSVERSION=${OSTYPE}-11
|
||||||
mapfile -t TEMPLATES < <(pveam available -section system | sed -n "s/.*\($OSVERSION.*\)/\1/p" | sort -t - -k 2 -V)
|
mapfile -t TEMPLATES < <(pveam available -section system | sed -n "s/.*\($OSVERSION.*\)/\1/p" | sort -t - -k 2 -V)
|
||||||
|
@ -121,7 +117,6 @@ TEMPLATE="${TEMPLATES[-1]}"
|
||||||
pveam download local $TEMPLATE >/dev/null ||
|
pveam download local $TEMPLATE >/dev/null ||
|
||||||
die "A problem occured while downloading the LXC template."
|
die "A problem occured while downloading the LXC template."
|
||||||
|
|
||||||
# Create variables for container disk
|
|
||||||
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
|
||||||
case $STORAGE_TYPE in
|
case $STORAGE_TYPE in
|
||||||
dir|nfs)
|
dir|nfs)
|
||||||
|
@ -136,8 +131,7 @@ esac
|
||||||
DISK=${DISK_PREFIX:-vm}-${CTID}-disk-0${DISK_EXT-}
|
DISK=${DISK_PREFIX:-vm}-${CTID}-disk-0${DISK_EXT-}
|
||||||
ROOTFS=${STORAGE}:${DISK_REF-}${DISK}
|
ROOTFS=${STORAGE}:${DISK_REF-}${DISK}
|
||||||
|
|
||||||
# Create LXC
|
echo -e "${CHECKMARK} \e[1;92m Creating LXC Container... \e[0m"
|
||||||
msg "Creating LXC container..."
|
|
||||||
DISK_SIZE=4G
|
DISK_SIZE=4G
|
||||||
pvesm alloc $STORAGE $CTID $DISK $DISK_SIZE --format ${DISK_FORMAT:-raw} >/dev/null
|
pvesm alloc $STORAGE $CTID $DISK $DISK_SIZE --format ${DISK_FORMAT:-raw} >/dev/null
|
||||||
if [ "$STORAGE_TYPE" == "zfspool" ]; then
|
if [ "$STORAGE_TYPE" == "zfspool" ]; then
|
||||||
|
@ -152,17 +146,17 @@ pct create $CTID $TEMPLATE_STRING -arch $ARCH -features nesting=1 \
|
||||||
-hostname $HOSTNAME -net0 name=eth0,bridge=vmbr0,ip=dhcp -onboot 1 -cores 2 -memory 1024 \
|
-hostname $HOSTNAME -net0 name=eth0,bridge=vmbr0,ip=dhcp -onboot 1 -cores 2 -memory 1024 \
|
||||||
-ostype $OSTYPE -rootfs $ROOTFS,size=$DISK_SIZE -storage $STORAGE >/dev/null
|
-ostype $OSTYPE -rootfs $ROOTFS,size=$DISK_SIZE -storage $STORAGE >/dev/null
|
||||||
|
|
||||||
# Set container timezone to match host
|
|
||||||
MOUNT=$(pct mount $CTID | cut -d"'" -f 2)
|
MOUNT=$(pct mount $CTID | cut -d"'" -f 2)
|
||||||
ln -fs $(readlink /etc/localtime) ${MOUNT}/etc/localtime
|
ln -fs $(readlink /etc/localtime) ${MOUNT}/etc/localtime
|
||||||
pct unmount $CTID && unset MOUNT
|
pct unmount $CTID && unset MOUNT
|
||||||
|
|
||||||
# Setup container
|
echo -e "${CHECKMARK} \e[1;92m Starting LXC Container... \e[0m"
|
||||||
msg "Starting LXC container..."
|
|
||||||
pct start $CTID
|
pct start $CTID
|
||||||
pct push $CTID esphome_setup.sh /esphome_setup.sh -perms 755
|
pct push $CTID esphome_setup.sh /esphome_setup.sh -perms 755
|
||||||
pct exec $CTID /esphome_setup.sh
|
pct exec $CTID /esphome_setup.sh
|
||||||
|
|
||||||
# Get network details and show completion message
|
|
||||||
IP=$(pct exec $CTID ip a s dev eth0 | sed -n '/inet / s/\// /p' | awk '{print $2}')
|
IP=$(pct exec $CTID ip a s dev eth0 | sed -n '/inet / s/\// /p' | awk '{print $2}')
|
||||||
info "Successfully created ESPHome LXC Container to $CTID at IP Address ${IP}:6052"
|
info "Successfully created ESPHome LXC Container to $CTID"
|
||||||
|
echo -e "\e[1;92m ESPHome should be reachable by going to the following URL.
|
||||||
|
http://${IP}:6052
|
||||||
|
\e[0m"
|
||||||
|
|
Loading…
Reference in New Issue