Compare commits

...

28 commits

Author SHA1 Message Date
Roman 4258cc17b7
Merge 65fc4de5d4 into fb56c19e15 2024-12-23 10:10:26 +00:00
sturman 65fc4de5d4 Merge remote-tracking branch 'origin/main' into replace-placeholers-in-proeprties
# Conflicts:
#	readme-vars.yml
2024-12-23 12:10:17 +02:00
LinuxServer-CI fb56c19e15
Bot Updating Templated Files 2024-12-17 17:33:14 +00:00
LinuxServer-CI 2fc98aa77e
Bot Updating Templated Files 2024-12-17 17:31:51 +00:00
LinuxServer-CI 950358ef4e
Bot Updating Package Versions 2024-12-10 17:38:57 +00:00
LinuxServer-CI a554c80ade
Bot Updating Templated Files 2024-12-10 17:35:26 +00:00
LinuxServer-CI fa90814925
Bot Updating Templated Files 2024-12-10 17:34:10 +00:00
LinuxServer-CI d030680133
Bot Updating Templated Files 2024-12-03 17:37:10 +00:00
LinuxServer-CI 199a596d83
Bot Updating Templated Files 2024-12-03 17:35:49 +00:00
LinuxServer-CI 683806afc1
Bot Updating Templated Files 2024-12-03 17:34:17 +00:00
sturman 0aa4543e91 Merge remote-tracking branch 'origin/main' into replace-placeholers-in-proeprties 2024-11-10 12:21:29 +02:00
sturman 3c43dd6cd8 Merge remote-tracking branch 'origin/main' into replace-placeholers-in-proeprties 2024-10-10 12:33:26 +03:00
Roman 7654109ffa
Merge branch 'linuxserver:main' into replace-placeholers-in-proeprties 2024-09-27 09:42:48 +03:00
sturman a60a8bd34f Merge remote-tracking branch 'origin/main' into replace-placeholers-in-proeprties 2024-08-27 09:24:48 +03:00
sturman 61c4b501a0 Merge remote-tracking branch 'origin/main' into replace-placeholers-in-proeprties
# Conflicts:
#	README.md
#	readme-vars.yml
2024-08-17 10:39:00 +03:00
sturman 76bcaf322d Merge remote-tracking branch 'origin/main' into replace-placeholers-in-proeprties 2024-07-17 22:15:18 +03:00
sturman 4ea124f871 Merge remote-tracking branch 'origin/main' into replace-placeholers-in-proeprties 2024-06-04 15:27:21 +03:00
sturman 96d3003d43 Merge remote-tracking branch 'origin/main' into replace-placeholers-in-proeprties
# Conflicts:
#	readme-vars.yml
2024-05-21 16:10:51 +03:00
sturman 9f8030f07a Merge remote-tracking branch 'origin/main' into replace-placeholers-in-proeprties 2024-04-29 15:19:50 +03:00
sturman 1a538030be Add new environment variables to README.md 2024-03-20 12:27:16 +02:00
sturman 41b367182a Merge remote-tracking branch 'origin/main' into replace-placeholers-in-proeprties 2024-03-20 12:20:41 +02:00
sturman 27594b95a8 Sort dependencies 2024-02-24 21:51:16 +02:00
sturman 36abd4e762 Revert "Install envsubst alternative (Go version)"
This reverts commit 62dab7b4
2024-02-24 21:50:33 +02:00
sturman 3be04fb262 Fix log message 2024-02-24 20:55:31 +02:00
sturman a1b59c28ec Set DEBIAN_FRONTEND environment variable 2024-02-24 20:38:41 +02:00
sturman 62dab7b4c2 Install envsubst alternative (Go version) 2024-02-24 20:35:15 +02:00
sturman 1146ec48fd Install envsubst 2024-02-24 20:20:02 +02:00
sturman fb1bd160e3 Use envsubst to replace placeholders in system.properties 2024-02-24 20:19:53 +02:00
10 changed files with 327 additions and 195 deletions

View file

@ -43,16 +43,35 @@ jobs:
token=$(curl -sX GET \
"https://ghcr.io/token?scope=repository%3Alinuxserver%2Funifi-network-application%3Apull" \
| jq -r '.token')
multidigest=$(curl -s \
multidigest=$(curl -s \
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
--header "Accept: application/vnd.oci.image.index.v1+json" \
--header "Authorization: Bearer ${token}" \
"https://ghcr.io/v2/${image}/manifests/${tag}")
if jq -e '.layers // empty' <<< "${multidigest}" >/dev/null 2>&1; then
# If there's a layer element it's a single-arch manifest so just get that digest
digest=$(jq -r '.config.digest' <<< "${multidigest}")
else
# Otherwise it's multi-arch or has manifest annotations
if jq -e '.manifests[]?.annotations // empty' <<< "${multidigest}" >/dev/null 2>&1; then
# Check for manifest annotations and delete if found
multidigest=$(jq 'del(.manifests[] | select(.annotations))' <<< "${multidigest}")
fi
if [[ $(jq '.manifests | length' <<< "${multidigest}") -gt 1 ]]; then
# If there's still more than one digest, it's multi-arch
multidigest=$(jq -r ".manifests[] | select(.platform.architecture == \"amd64\").digest?" <<< "${multidigest}")
else
# Otherwise it's single arch
multidigest=$(jq -r ".manifests[].digest?" <<< "${multidigest}")
fi
if digest=$(curl -s \
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
--header "Accept: application/vnd.oci.image.manifest.v1+json" \
--header "Authorization: Bearer ${token}" \
"https://ghcr.io/v2/${image}/manifests/${tag}" \
| jq -r 'first(.manifests[].digest)')
digest=$(curl -s \
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
--header "Authorization: Bearer ${token}" \
"https://ghcr.io/v2/${image}/manifests/${multidigest}" \
| jq -r '.config.digest')
"https://ghcr.io/v2/${image}/manifests/${multidigest}"); then
digest=$(jq -r '.config.digest' <<< "${digest}");
fi
fi
image_info=$(curl -sL \
--header "Authorization: Bearer ${token}" \
"https://ghcr.io/v2/${image}/blobs/${digest}")
@ -90,7 +109,7 @@ jobs:
else
printf "\n## Trigger new build\n\n" >> $GITHUB_STEP_SUMMARY
echo "New version \`${EXT_RELEASE}\` found; old version was \`${IMAGE_VERSION}\`. Triggering new build" >> $GITHUB_STEP_SUMMARY
if "${artifacts_found}" == "true" ]]; then
if [[ "${artifacts_found}" == "true" ]]; then
echo "All artifacts seem to be uploaded." >> $GITHUB_STEP_SUMMARY
fi
response=$(curl -iX POST \

View file

@ -27,9 +27,18 @@ jobs:
fi
printf "\n## Evaluating \`%s\`\n\n" ${br} >> $GITHUB_STEP_SUMMARY
JENKINS_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-unifi-network-application/${br}/jenkins-vars.yml)
if [[ "${br}" == $(yq -r '.ls_branch' <<< "${JENKINS_VARS}") ]]; then
if ! curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-unifi-network-application/${br}/Jenkinsfile >/dev/null 2>&1; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> No Jenkinsfile found. Branch is either deprecated or is an early dev branch." >> $GITHUB_STEP_SUMMARY
skipped_branches="${skipped_branches}${br} "
elif [[ "${br}" == $(yq -r '.ls_branch' <<< "${JENKINS_VARS}") ]]; then
echo "Branch appears to be live; checking workflow." >> $GITHUB_STEP_SUMMARY
if [[ $(yq -r '.skip_package_check' <<< "${JENKINS_VARS}") == "true" ]]; then
README_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-unifi-network-application/${br}/readme-vars.yml)
if [[ $(yq -r '.project_deprecation_status' <<< "${README_VARS}") == "true" ]]; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Branch appears to be deprecated; skipping trigger." >> $GITHUB_STEP_SUMMARY
skipped_branches="${skipped_branches}${br} "
elif [[ $(yq -r '.skip_package_check' <<< "${JENKINS_VARS}") == "true" ]]; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Skipping branch ${br} due to \`skip_package_check\` being set in \`jenkins-vars.yml\`." >> $GITHUB_STEP_SUMMARY
skipped_branches="${skipped_branches}${br} "
@ -37,7 +46,7 @@ jobs:
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Github organizational variable \`SKIP_PACKAGE_TRIGGER\` contains \`unifi-network-application_${br}\`; skipping trigger." >> $GITHUB_STEP_SUMMARY
skipped_branches="${skipped_branches}${br} "
elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-unifi-network-application/job/${br}/lastBuild/api/json | jq -r '.building') == "true" ]; then
elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-unifi-network-application/job/${br}/lastBuild/api/json | jq -r '.building' 2>/dev/null) == "true" ]; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> There already seems to be an active build on Jenkins; skipping package trigger for ${br}" >> $GITHUB_STEP_SUMMARY
skipped_branches="${skipped_branches}${br} "
@ -49,6 +58,11 @@ jobs:
response=$(curl -iX POST \
https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-unifi-network-application/job/${br}/buildWithParameters?PACKAGE_CHECK=true \
--user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} | grep -i location | sed "s|^[L|l]ocation: \(.*\)|\1|")
if [[ -z "${response}" ]]; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Jenkins build could not be triggered. Skipping branch."
continue
fi
echo "Jenkins [job queue url](${response%$'\r'})" >> $GITHUB_STEP_SUMMARY
echo "Sleeping 10 seconds until job starts" >> $GITHUB_STEP_SUMMARY
sleep 10
@ -56,11 +70,14 @@ jobs:
buildurl="${buildurl%$'\r'}"
echo "Jenkins job [build url](${buildurl})" >> $GITHUB_STEP_SUMMARY
echo "Attempting to change the Jenkins job description" >> $GITHUB_STEP_SUMMARY
curl -iX POST \
if ! curl -ifX POST \
"${buildurl}submitDescription" \
--user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} \
--data-urlencode "description=GHA package trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \
--data-urlencode "Submit=Submit"
--data-urlencode "Submit=Submit"; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Unable to change the Jenkins job description."
fi
sleep 20
fi
else

View file

@ -17,6 +17,7 @@ RUN \
echo "**** install packages ****" && \
apt-get update && \
apt-get install --no-install-recommends -y \
gettext \
jsvc \
logrotate \
openjdk-17-jre-headless \

View file

@ -17,6 +17,7 @@ RUN \
echo "**** install packages ****" && \
apt-get update && \
apt-get install --no-install-recommends -y \
gettext \
jsvc \
logrotate \
openjdk-17-jre-headless \

274
Jenkinsfile vendored
View file

@ -8,7 +8,7 @@ pipeline {
}
// Input to determine if this is a package check
parameters {
string(defaultValue: 'false', description: 'package check run', name: 'PACKAGE_CHECK')
string(defaultValue: 'false', description: 'package check run', name: 'PACKAGE_CHECK')
}
// Configuration for the variables used for this specific repo
environment {
@ -191,6 +191,7 @@ pipeline {
env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
env.META_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN
env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache'
}
}
}
@ -215,6 +216,7 @@ pipeline {
env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DEV_DOCKERHUB_IMAGE + '/tags/'
env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache'
}
}
}
@ -239,6 +241,7 @@ pipeline {
env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN
env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/pull/' + env.PULL_REQUEST
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.PR_DOCKERHUB_IMAGE + '/tags/'
env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache'
}
}
}
@ -335,6 +338,35 @@ pipeline {
else
echo "No templates to delete"
fi
echo "Starting Stage 2.5 - Update init diagram"
if ! grep -q 'init_diagram:' readme-vars.yml; then
echo "Adding the key 'init_diagram' to readme-vars.yml"
sed -i '\\|^#.*changelog.*$|d' readme-vars.yml
sed -i 's|^changelogs:|# init diagram\\ninit_diagram:\\n\\n# changelog\\nchangelogs:|' readme-vars.yml
fi
mkdir -p ${TEMPDIR}/d2
docker run --rm -v ${TEMPDIR}/d2:/output -e PUID=$(id -u) -e PGID=$(id -g) -e RAW="true" ghcr.io/linuxserver/d2-builder:latest ${CONTAINER_NAME}:latest
ls -al ${TEMPDIR}/d2
yq -ei ".init_diagram |= load_str(\\"${TEMPDIR}/d2/${CONTAINER_NAME}-latest.d2\\")" readme-vars.yml
if [[ $(md5sum readme-vars.yml | cut -c1-8) != $(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/readme-vars.yml | cut -c1-8) ]]; then
echo "'init_diagram' has been updated. Updating repo and exiting build, new one will trigger based on commit."
mkdir -p ${TEMPDIR}/repo
git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO}
cd ${TEMPDIR}/repo/${LS_REPO}
git checkout -f main
cp ${WORKSPACE}/readme-vars.yml ${TEMPDIR}/repo/${LS_REPO}/readme-vars.yml
git add readme-vars.yml
git commit -m 'Bot Updating Templated Files'
git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git main
git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git main
echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER}
echo "Updating templates and exiting build, new one will trigger based on commit"
rm -Rf ${TEMPDIR}
exit 0
else
echo "false" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER}
echo "Init diagram is unchanged"
fi
echo "Starting Stage 3 - Update templates"
CURRENTHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8)
cd ${TEMPDIR}/docker-${CONTAINER_NAME}
@ -543,8 +575,42 @@ pipeline {
--label \"org.opencontainers.image.title=Unifi-network-application\" \
--label \"org.opencontainers.image.description=The [Unifi-network-application](https://ui.com/) software is a powerful, enterprise wireless software engine ideal for high-density client deployments requiring low latency and high uptime performance.\" \
--no-cache --pull -t ${IMAGE}:${META_TAG} --platform=linux/amd64 \
--provenance=false --sbom=false \
--provenance=true --sbom=true --builder=container --load \
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
sh '''#! /bin/bash
set -e
IFS=',' read -ra CACHE <<< "$BUILDCACHE"
for i in "${CACHE[@]}"; do
docker tag ${IMAGE}:${META_TAG} ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER}
done
'''
withCredentials([
[
$class: 'UsernamePasswordMultiBinding',
credentialsId: 'Quay.io-Robot',
usernameVariable: 'QUAYUSER',
passwordVariable: 'QUAYPASS'
]
]) {
retry_backoff(5,5) {
sh '''#! /bin/bash
set -e
echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin
echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin
echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin
echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin
if [[ "${PACKAGE_CHECK}" != "true" ]]; then
IFS=',' read -ra CACHE <<< "$BUILDCACHE"
for i in "${CACHE[@]}"; do
docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} &
done
for p in $(jobs -p); do
wait "$p" || { echo "job $p failed" >&2; exit 1; }
done
fi
'''
}
}
}
}
// Build MultiArch Docker containers for push to LS Repo
@ -575,8 +641,42 @@ pipeline {
--label \"org.opencontainers.image.title=Unifi-network-application\" \
--label \"org.opencontainers.image.description=The [Unifi-network-application](https://ui.com/) software is a powerful, enterprise wireless software engine ideal for high-density client deployments requiring low latency and high uptime performance.\" \
--no-cache --pull -t ${IMAGE}:amd64-${META_TAG} --platform=linux/amd64 \
--provenance=false --sbom=false \
--provenance=true --sbom=true --builder=container --load \
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
sh '''#! /bin/bash
set -e
IFS=',' read -ra CACHE <<< "$BUILDCACHE"
for i in "${CACHE[@]}"; do
docker tag ${IMAGE}:amd64-${META_TAG} ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER}
done
'''
withCredentials([
[
$class: 'UsernamePasswordMultiBinding',
credentialsId: 'Quay.io-Robot',
usernameVariable: 'QUAYUSER',
passwordVariable: 'QUAYPASS'
]
]) {
retry_backoff(5,5) {
sh '''#! /bin/bash
set -e
echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin
echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin
echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin
echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin
if [[ "${PACKAGE_CHECK}" != "true" ]]; then
IFS=',' read -ra CACHE <<< "$BUILDCACHE"
for i in "${CACHE[@]}"; do
docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} &
done
for p in $(jobs -p); do
wait "$p" || { echo "job $p failed" >&2; exit 1; }
done
fi
'''
}
}
}
}
stage('Build ARM64') {
@ -585,10 +685,6 @@ pipeline {
}
steps {
echo "Running on node: ${NODE_NAME}"
echo 'Logging into Github'
sh '''#! /bin/bash
echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin
'''
sh "sed -r -i 's|(^FROM .*)|\\1\\n\\nENV LSIO_FIRST_PARTY=true|g' Dockerfile.aarch64"
sh "docker buildx build \
--label \"org.opencontainers.image.created=${GITHUB_DATE}\" \
@ -604,18 +700,49 @@ pipeline {
--label \"org.opencontainers.image.title=Unifi-network-application\" \
--label \"org.opencontainers.image.description=The [Unifi-network-application](https://ui.com/) software is a powerful, enterprise wireless software engine ideal for high-density client deployments requiring low latency and high uptime performance.\" \
--no-cache --pull -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} --platform=linux/arm64 \
--provenance=false --sbom=false \
--provenance=true --sbom=true --builder=container --load \
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
sh "docker tag ${IMAGE}:arm64v8-${META_TAG} ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}"
retry_backoff(5,5) {
sh "docker push ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}"
sh '''#! /bin/bash
set -e
IFS=',' read -ra CACHE <<< "$BUILDCACHE"
for i in "${CACHE[@]}"; do
docker tag ${IMAGE}:arm64v8-${META_TAG} ${i}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
done
'''
withCredentials([
[
$class: 'UsernamePasswordMultiBinding',
credentialsId: 'Quay.io-Robot',
usernameVariable: 'QUAYUSER',
passwordVariable: 'QUAYPASS'
]
]) {
retry_backoff(5,5) {
sh '''#! /bin/bash
set -e
echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin
echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin
echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin
echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin
if [[ "${PACKAGE_CHECK}" != "true" ]]; then
IFS=',' read -ra CACHE <<< "$BUILDCACHE"
for i in "${CACHE[@]}"; do
docker push ${i}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} &
done
for p in $(jobs -p); do
wait "$p" || { echo "job $p failed" >&2; exit 1; }
done
fi
'''
}
}
sh '''#! /bin/bash
containers=$(docker ps -aq)
if [[ -n "${containers}" ]]; then
docker stop ${containers}
fi
docker system prune -af --volumes || : '''
docker system prune -af --volumes || :
'''
}
}
}
@ -765,37 +892,23 @@ pipeline {
environment name: 'EXIT_STATUS', value: ''
}
steps {
withCredentials([
[
$class: 'UsernamePasswordMultiBinding',
credentialsId: 'Quay.io-Robot',
usernameVariable: 'QUAYUSER',
passwordVariable: 'QUAYPASS'
]
]) {
retry_backoff(5,5) {
sh '''#! /bin/bash
set -e
echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin
echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin
echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin
echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin
for PUSHIMAGE in "${GITHUBIMAGE}" "${GITLABIMAGE}" "${QUAYIMAGE}" "${IMAGE}"; do
docker tag ${IMAGE}:${META_TAG} ${PUSHIMAGE}:${META_TAG}
docker tag ${PUSHIMAGE}:${META_TAG} ${PUSHIMAGE}:latest
docker tag ${PUSHIMAGE}:${META_TAG} ${PUSHIMAGE}:${EXT_RELEASE_TAG}
if [ -n "${SEMVER}" ]; then
docker tag ${PUSHIMAGE}:${META_TAG} ${PUSHIMAGE}:${SEMVER}
fi
docker push ${PUSHIMAGE}:latest
docker push ${PUSHIMAGE}:${META_TAG}
docker push ${PUSHIMAGE}:${EXT_RELEASE_TAG}
if [ -n "${SEMVER}" ]; then
docker push ${PUSHIMAGE}:${SEMVER}
fi
retry_backoff(5,5) {
sh '''#! /bin/bash
set -e
for PUSHIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do
[[ ${PUSHIMAGE%%/*} =~ \\. ]] && PUSHIMAGEPLUS="${PUSHIMAGE}" || PUSHIMAGEPLUS="docker.io/${PUSHIMAGE}"
IFS=',' read -ra CACHE <<< "$BUILDCACHE"
for i in "${CACHE[@]}"; do
if [[ "${PUSHIMAGEPLUS}" == "$(cut -d "/" -f1 <<< ${i})"* ]]; then
CACHEIMAGE=${i}
fi
done
'''
}
docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${META_TAG} -t ${PUSHIMAGE}:latest -t ${PUSHIMAGE}:${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER}
if [ -n "${SEMVER}" ]; then
docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER}
fi
done
'''
}
}
}
@ -806,57 +919,34 @@ pipeline {
environment name: 'EXIT_STATUS', value: ''
}
steps {
withCredentials([
[
$class: 'UsernamePasswordMultiBinding',
credentialsId: 'Quay.io-Robot',
usernameVariable: 'QUAYUSER',
passwordVariable: 'QUAYPASS'
]
]) {
retry_backoff(5,5) {
sh '''#! /bin/bash
set -e
echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin
echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin
echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin
echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin
if [ "${CI}" == "false" ]; then
docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} --platform=arm64
docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG}
retry_backoff(5,5) {
sh '''#! /bin/bash
set -e
for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do
[[ ${MANIFESTIMAGE%%/*} =~ \\. ]] && MANIFESTIMAGEPLUS="${MANIFESTIMAGE}" || MANIFESTIMAGEPLUS="docker.io/${MANIFESTIMAGE}"
IFS=',' read -ra CACHE <<< "$BUILDCACHE"
for i in "${CACHE[@]}"; do
if [[ "${MANIFESTIMAGEPLUS}" == "$(cut -d "/" -f1 <<< ${i})"* ]]; then
CACHEIMAGE=${i}
fi
done
docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${META_TAG} -t ${MANIFESTIMAGE}:amd64-latest -t ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER}
docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${META_TAG} -t ${MANIFESTIMAGE}:arm64v8-latest -t ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
if [ -n "${SEMVER}" ]; then
docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER}
docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${SEMVER} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
fi
for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do
docker tag ${IMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG}
docker tag ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-latest
docker tag ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG}
docker tag ${IMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG}
docker tag ${MANIFESTIMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-latest
docker tag ${MANIFESTIMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG}
if [ -n "${SEMVER}" ]; then
docker tag ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:amd64-${SEMVER}
docker tag ${MANIFESTIMAGE}:arm64v8-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${SEMVER}
fi
docker push ${MANIFESTIMAGE}:amd64-${META_TAG}
docker push ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG}
docker push ${MANIFESTIMAGE}:amd64-latest
docker push ${MANIFESTIMAGE}:arm64v8-${META_TAG}
docker push ${MANIFESTIMAGE}:arm64v8-latest
docker push ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG}
if [ -n "${SEMVER}" ]; then
docker push ${MANIFESTIMAGE}:amd64-${SEMVER}
docker push ${MANIFESTIMAGE}:arm64v8-${SEMVER}
fi
done
for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do
docker buildx imagetools create -t ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:amd64-latest ${MANIFESTIMAGE}:arm64v8-latest
docker buildx imagetools create -t ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG}
docker buildx imagetools create -t ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG}
if [ -n "${SEMVER}" ]; then
docker buildx imagetools create -t ${MANIFESTIMAGE}:${SEMVER} ${MANIFESTIMAGE}:amd64-${SEMVER} ${MANIFESTIMAGE}:arm64v8-${SEMVER}
fi
done
'''
}
done
for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do
docker buildx imagetools create -t ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:amd64-latest ${MANIFESTIMAGE}:arm64v8-latest
docker buildx imagetools create -t ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG}
docker buildx imagetools create -t ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG}
if [ -n "${SEMVER}" ]; then
docker buildx imagetools create -t ${MANIFESTIMAGE}:${SEMVER} ${MANIFESTIMAGE}:amd64-${SEMVER} ${MANIFESTIMAGE}:arm64v8-${SEMVER}
fi
done
'''
}
}
}

View file

@ -148,7 +148,6 @@ The simplest migration approach is to take a full backup of your existing instal
You can then start up the new container with a clean `/config` mount (and a database container configured), and perform a restore using the setup wizard.
### Strict reverse proxies
This image uses a self-signed certificate by default. This naturally means the scheme is `https`.
@ -158,6 +157,9 @@ If you are using a reverse proxy which validates certificates, you need to [disa
To help you get started creating a container from this image you can either use docker-compose or the docker cli.
>[!NOTE]
>Unless a parameter is flaged as 'optional', it is *mandatory* and a value must be provided.
### docker-compose (recommended, [click here for more info](https://docs.linuxserver.io/general/docker-compose))
```yaml
@ -202,10 +204,8 @@ docker run -d \
-e PUID=1000 \
-e PGID=1000 \
-e TZ=Etc/UTC \
-e MONGO_USER=unifi \
-e MONGO_PASS= \
-e MONGO_HOST=unifi-db \
-e MONGO_PORT=27017 \
-e MONGO_URI=mongodb+srv://unifi:password@unifi.mongodb.net/?retryWrites=true&w=majority&appName=unifi \
-e STAT_MONGO_URI=mongodb+srv://unifi:password@unifi.mongodb.net/?retryWrites=true&w=majority&appName=unifi \
-e MONGO_DBNAME=unifi \
-e MONGO_AUTHSOURCE=admin \
-e MEM_LIMIT=1024 `#optional` \
@ -231,10 +231,10 @@ Containers are configured using parameters passed at runtime (such as those abov
| Parameter | Function |
| :----: | --- |
| `-p 8443` | Unifi web admin port |
| `-p 3478/udp` | Unifi STUN port |
| `-p 10001/udp` | Required for AP discovery |
| `-p 8080` | Required for device communication |
| `-p 8443:8443` | Unifi web admin port |
| `-p 3478:3478/udp` | Unifi STUN port |
| `-p 10001:10001/udp` | Required for AP discovery |
| `-p 8080:8080` | Required for device communication |
| `-p 1900/udp` | Required for `Make controller discoverable on L2 network` option |
| `-p 8843` | Unifi guest portal HTTPS redirect port |
| `-p 8880` | Unifi guest portal HTTP redirect port |

View file

@ -48,7 +48,7 @@ coreutils 9.4-3ubuntu6 deb
cron 3.0pl1-184ubuntu2 deb
cron-daemon-common 3.0pl1-184ubuntu2 deb
cron4j 2.2.5 java-archive
curl 8.5.0-2ubuntu10.4 deb
curl 8.5.0-2ubuntu10.5 deb
dash 0.5.12-6ubuntu5 deb
debconf 1.5.86ubuntu1 deb
debianutils 5.17build1 deb
@ -150,14 +150,14 @@ jstun 0.7.4 jav
jsvc 1.0.15-11build1 deb
jul-to-slf4j 2.0.13 java-archive
keyboxd 2.4.4-2ubuntu17 deb
krb5-locales 1.20.1-6ubuntu2.1 deb
krb5-locales 1.20.1-6ubuntu2.2 deb
lazysodium-java 5.1.4 java-archive
libacl1 2.3.2-1build1 deb
libacl1 2.3.2-1build1.1 deb
libapt-pkg6.0t64 2.7.14build2 deb
libassuan0 2.5.6-1build1 deb
libattr1 1:2.5.2-1build1 deb
libaudit-common 1:3.1.2-2.1build1 deb
libaudit1 1:3.1.2-2.1build1 deb
libaudit-common 1:3.1.2-2.1build1.1 deb
libaudit1 1:3.1.2-2.1build1.1 deb
libblkid1 2.39.3-9ubuntu6.1 deb
libbrotli1 1.1.0-2build2 deb
libbsd0 0.12.1-1build1 deb
@ -169,7 +169,7 @@ libcap2 1:2.66-5ubuntu2 deb
libcom-err2 1.47.0-2.4~exp1ubuntu4.1 deb
libcommons-daemon-java 1.0.15-11build1 deb
libcrypt1 1:4.4.36-4build1 deb
libcurl4t64 8.5.0-2ubuntu10.4 deb
libcurl4t64 8.5.0-2ubuntu10.5 deb
libdb5.3t64 5.3.28+dfsg2-7 deb
libdebconfclient0 0.271ubuntu3 deb
libext2fs2t64 1.47.0-2.4~exp1ubuntu4.1 deb
@ -179,20 +179,20 @@ libgcrypt20 1.10.3-2build1 deb
libgmp10 2:6.3.0+dfsg-2ubuntu6 deb
libgnutls30t64 3.8.3-1.1ubuntu3.2 deb
libgpg-error0 1.47-3build2 deb
libgssapi-krb5-2 1.20.1-6ubuntu2.1 deb
libgssapi-krb5-2 1.20.1-6ubuntu2.2 deb
libhogweed6t64 3.9.1-2.2build1.1 deb
libidn2-0 2.3.7-2build1 deb
libjpeg-turbo8 2.1.5-2ubuntu2 deb
libjpeg8 8c-2ubuntu11 deb
libjq1 1.7.1-3build1 deb
libk5crypto3 1.20.1-6ubuntu2.1 deb
libk5crypto3 1.20.1-6ubuntu2.2 deb
libkeyutils1 1.6.3-3build1 deb
libkrb5-3 1.20.1-6ubuntu2.1 deb
libkrb5support0 1.20.1-6ubuntu2.1 deb
libkrb5-3 1.20.1-6ubuntu2.2 deb
libkrb5support0 1.20.1-6ubuntu2.2 deb
libksba8 1.6.6-1build1 deb
liblcms2-2 2.14-2build1 deb
libldap-common 2.6.7+dfsg-1~exp1ubuntu8 deb
libldap2 2.6.7+dfsg-1~exp1ubuntu8 deb
libldap-common 2.6.7+dfsg-1~exp1ubuntu8.1 deb
libldap2 2.6.7+dfsg-1~exp1ubuntu8.1 deb
liblz4-1 1.9.4-1build1.1 deb
liblzma5 5.6.1+really5.4.5-1build0.1 deb
libmd0 1.1.0-2build1 deb

View file

@ -6,49 +6,42 @@ project_url: "https://ui.com/"
project_logo: "https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/unifi-banner.png"
project_blurb: "The [{{ project_name|capitalize }}]({{ project_url }}) software is a powerful, enterprise wireless software engine ideal for high-density client deployments requiring low latency and high uptime performance."
project_lsio_github_repo_url: "https://github.com/linuxserver/docker-{{ project_name }}"
# supported architectures
available_architectures:
- { arch: "{{ arch_x86_64 }}", tag: "amd64-latest"}
- { arch: "{{ arch_arm64 }}", tag: "arm64v8-latest"}
- {arch: "{{ arch_x86_64 }}", tag: "amd64-latest"}
- {arch: "{{ arch_arm64 }}", tag: "arm64v8-latest"}
# container parameters
param_container_name: "{{ project_name }}"
param_usage_include_vols: true
param_volumes:
- { vol_path: "/config", vol_host_path: "/path/to/{{ project_name }}/data", desc: "Persistent config files" }
- {vol_path: "/config", vol_host_path: "/path/to/{{ project_name }}/data", desc: "Persistent config files"}
param_usage_include_ports: true
param_ports:
- { external_port: "8443", internal_port: "8443", port_desc: "Unifi web admin port" }
- { external_port: "3478", internal_port: "3478/udp", port_desc: "Unifi STUN port" }
- { external_port: "10001", internal_port: "10001/udp", port_desc: "Required for AP discovery" }
- { external_port: "8080", internal_port: "8080", port_desc: "Required for device communication" }
- {external_port: "8443", internal_port: "8443", port_desc: "Unifi web admin port"}
- {external_port: "3478", internal_port: "3478/udp", port_desc: "Unifi STUN port"}
- {external_port: "10001", internal_port: "10001/udp", port_desc: "Required for AP discovery"}
- {external_port: "8080", internal_port: "8080", port_desc: "Required for device communication"}
param_usage_include_env: true
param_env_vars:
- { env_var: "MONGO_USER", env_value: "unifi", desc: "Mongodb Username. Only evaluated on first run. **Special characters must be [url encoded](https://en.wikipedia.org/wiki/Percent-encoding)**." }
- { env_var: "MONGO_PASS", env_value: "", desc: "Mongodb Password. Only evaluated on first run. **Special characters must be [url encoded](https://en.wikipedia.org/wiki/Percent-encoding)**." }
- { env_var: "MONGO_HOST", env_value: "unifi-db", desc: "Mongodb Hostname. Only evaluated on first run." }
- { env_var: "MONGO_PORT", env_value: "27017", desc: "Mongodb Port. Only evaluated on first run." }
- { env_var: "MONGO_DBNAME", env_value: "unifi", desc: "Mongodb Database Name (stats DB is automatically suffixed with `_stat`). Only evaluated on first run." }
- { env_var: "MONGO_AUTHSOURCE", env_value: "admin", desc: "Mongodb [authSource](https://www.mongodb.com/docs/manual/reference/connection-string/#mongodb-urioption-urioption.authSource). For Atlas set to `admin`. Only evaluated on first run." }
- {env_var: "MONGO_USER", env_value: "unifi", desc: "Mongodb Username. Only evaluated on first run. **Special characters must be [url encoded](https://en.wikipedia.org/wiki/Percent-encoding)**."}
- {env_var: "MONGO_PASS", env_value: "", desc: "Mongodb Password. Only evaluated on first run. **Special characters must be [url encoded](https://en.wikipedia.org/wiki/Percent-encoding)**."}
- {env_var: "MONGO_HOST", env_value: "unifi-db", desc: "Mongodb Hostname. Only evaluated on first run."}
- {env_var: "MONGO_PORT", env_value: "27017", desc: "Mongodb Port. Only evaluated on first run."}
- {env_var: "MONGO_DBNAME", env_value: "unifi", desc: "Mongodb Database Name (stats DB is automatically suffixed with `_stat`). Only evaluated on first run."}
- {env_var: "MONGO_AUTHSOURCE", env_value: "admin", desc: "Mongodb [authSource](https://www.mongodb.com/docs/manual/reference/connection-string/#mongodb-urioption-urioption.authSource). For Atlas set to `admin`. Only evaluated on first run."}
# optional container parameters
opt_param_usage_include_env: true
opt_param_env_vars:
- { env_var: "MEM_LIMIT", env_value: "1024", desc: "Optionally change the Java memory limit (in Megabytes). Set to `default` to reset to default" }
- { env_var: "MEM_STARTUP", env_value: "1024", desc: "Optionally change the Java initial/minimum memory (in Megabytes). Set to `default` to reset to default" }
- { env_var: "MONGO_TLS", env_value: "", desc: "Mongodb enable [TLS](https://www.mongodb.com/docs/manual/reference/connection-string/#mongodb-urioption-urioption.tls). Only evaluated on first run." }
- {env_var: "MEM_LIMIT", env_value: "1024", desc: "Optionally change the Java memory limit (in Megabytes). Set to `default` to reset to default"}
- {env_var: "MEM_STARTUP", env_value: "1024", desc: "Optionally change the Java initial/minimum memory (in Megabytes). Set to `default` to reset to default"}
- {env_var: "MONGO_TLS", env_value: "", desc: "Mongodb enable [TLS](https://www.mongodb.com/docs/manual/reference/connection-string/#mongodb-urioption-urioption.tls). Only evaluated on first run."}
opt_param_usage_include_ports: true
opt_param_ports:
- { external_port: "1900", internal_port: "1900/udp", port_desc: "Required for `Make controller discoverable on L2 network` option" }
- { external_port: "8843", internal_port: "8843", port_desc: "Unifi guest portal HTTPS redirect port" }
- { external_port: "8880", internal_port: "8880", port_desc: "Unifi guest portal HTTP redirect port" }
- { external_port: "6789", internal_port: "6789", port_desc: "For mobile throughput test" }
- { external_port: "5514", internal_port: "5514/udp", port_desc: "Remote syslog port" }
- {external_port: "1900", internal_port: "1900/udp", port_desc: "Required for `Make controller discoverable on L2 network` option"}
- {external_port: "8843", internal_port: "8843", port_desc: "Unifi guest portal HTTPS redirect port"}
- {external_port: "8880", internal_port: "8880", port_desc: "Unifi guest portal HTTP redirect port"}
- {external_port: "6789", internal_port: "6789", port_desc: "For mobile throughput test"}
- {external_port: "5514", internal_port: "5514/udp", port_desc: "Remote syslog port"}
# application setup block
app_setup_block_enabled: true
app_setup_block: |
@ -141,11 +134,50 @@ app_setup_block: |
The simplest migration approach is to take a full backup of your existing install, including history, from the Unifi-Controller web UI, then shut down the old container.
You can then start up the new container with a clean `/config` mount (and a database container configured), and perform a restore using the setup wizard.
# init diagram
init_diagram: |
"unifi-network-application:latest": {
docker-mods
base {
fix-attr +\nlegacy cont-init
}
docker-mods -> base
legacy-services
custom services
init-services -> legacy-services
init-services -> custom services
custom services -> legacy-services
legacy-services -> ci-service-check
init-migrations -> init-adduser
init-os-end -> init-config
init-config -> init-config-end
init-crontab-config -> init-config-end
init-unifi-network-application-config -> init-config-end
init-config -> init-crontab-config
init-mods-end -> init-custom-files
base -> init-envfile
base -> init-migrations
init-config-end -> init-mods
init-mods-package-install -> init-mods-end
init-mods -> init-mods-package-install
init-adduser -> init-os-end
init-envfile -> init-os-end
init-custom-files -> init-services
init-config -> init-unifi-network-application-config
init-services -> svc-cron
svc-cron -> legacy-services
init-services -> svc-unifi-network-application
svc-unifi-network-application -> legacy-services
}
Base Images: {
"baseimage-ubuntu:noble"
}
"unifi-network-application:latest" <- Base Images
# changelog
changelogs:
- { date: "11.08.24:", desc: "**Important**: The mongodb init instructions have been updated to enable auth ([RBAC](https://www.mongodb.com/docs/manual/core/authorization/#role-based-access-control)). We have been notified that if RBAC is not enabled, the official mongodb container allows remote access to the db contents over port 27017 without credentials. If you set up the mongodb container with the old instructions we provided, you should not map or expose port 27017. If you would like to enable auth, the easiest way is to create new instances of both unifi and mongodb with the new instructions and restore unifi from a backup." }
- { date: "11.08.24:", desc: "Rebase to Ubuntu Noble." }
- { date: "04.03.24:", desc: "Install from zip package instead of deb." }
- { date: "17.10.23:", desc: "Add environment variables for TLS and authSource to support Atlas and new MongoDB versions." }
- { date: "05.09.23:", desc: "Initial release." }
- {date: "23.12.24:", desc: "Change environment variables" }
- {date: "11.08.24:", desc: "**Important**: The mongodb init instructions have been updated to enable auth ([RBAC](https://www.mongodb.com/docs/manual/core/authorization/#role-based-access-control)). We have been notified that if RBAC is not enabled, the official mongodb container allows remote access to the db contents over port 27017 without credentials. If you set up the mongodb container with the old instructions we provided, you should not map or expose port 27017. If you would like to enable auth, the easiest way is to create new instances of both unifi and mongodb with the new instructions and restore unifi from a backup."}
- {date: "11.08.24:", desc: "Rebase to Ubuntu Noble."}
- {date: "04.03.24:", desc: "Install from zip package instead of deb."}
- {date: "17.10.23:", desc: "Add environment variables for TLS and authSource to support Atlas and new MongoDB versions."}
- {date: "05.09.23:", desc: "Initial release."}

View file

@ -41,6 +41,6 @@
# unifi.throughput.port=6789
#
db.mongo.local=false
db.mongo.uri=mongodb://~MONGO_USER~:~MONGO_PASS~@~MONGO_HOST~:~MONGO_PORT~/~MONGO_DBNAME~?tls=~MONGO_TLS~~MONGO_AUTHSOURCE~
statdb.mongo.uri=mongodb://~MONGO_USER~:~MONGO_PASS~@~MONGO_HOST~:~MONGO_PORT~/~MONGO_DBNAME~_stat?tls=~MONGO_TLS~~MONGO_AUTHSOURCE~
unifi.db.name=~MONGO_DBNAME~
db.mongo.uri=$MONGO_URI
statdb.mongo.uri=$STAT_MONGO_URI
unifi.db.name=$MONGO_DBNAME

View file

@ -28,39 +28,11 @@ if [[ ! -L "/usr/lib/unifi/run" ]]; then
fi
if [[ ! -e /config/data/system.properties ]]; then
if [[ -z "${MONGO_HOST}" ]]; then
echo "*** No MONGO_HOST set, cannot configure database settings. ***"
if [[ -z "${MONGO_URI}" || -z "${STAT_MONGO_URI}" || -z "${MONGO_DBNAME}" ]]; then
echo "*** Required environment variables are not set, cannot configure database settings. ***"
sleep infinity
else
echo "*** Waiting for MONGO_HOST ${MONGO_HOST} to be reachable. ***"
DBCOUNT=0
while true; do
if nc -w1 "${MONGO_HOST}" "${MONGO_PORT}" >/dev/null 2>&1; then
break
fi
DBCOUNT=$((DBCOUNT+1))
if [[ ${DBCOUNT} -gt 6 ]]; then
echo "*** Defined MONGO_HOST ${MONGO_HOST} is not reachable, cannot proceed. ***"
sleep infinity
fi
sleep 5
done
sed -i "s/~MONGO_USER~/${MONGO_USER}/" /defaults/system.properties
sed -i "s/~MONGO_HOST~/${MONGO_HOST}/" /defaults/system.properties
sed -i "s/~MONGO_PORT~/${MONGO_PORT}/" /defaults/system.properties
sed -i "s/~MONGO_DBNAME~/${MONGO_DBNAME}/" /defaults/system.properties
sed -i "s/~MONGO_PASS~/${MONGO_PASS}/" /defaults/system.properties
if [[ "${MONGO_TLS,,}" = "true" ]]; then
sed -i "s/~MONGO_TLS~/true/" /defaults/system.properties
else
sed -i "s/~MONGO_TLS~/false/" /defaults/system.properties
fi
if [[ -z "${MONGO_AUTHSOURCE}" ]]; then
sed -i "s/~MONGO_AUTHSOURCE~//" /defaults/system.properties
else
sed -i "s/~MONGO_AUTHSOURCE~/\&authSource=${MONGO_AUTHSOURCE}/" /defaults/system.properties
fi
cp /defaults/system.properties /config/data
envsubst < /defaults/system.properties > /config/data/system.properties
fi
fi