Merge branch 'master' into feature/http-method-push-monitor

# Conflicts:
#	src/pages/EditMonitor.vue
This commit is contained in:
Stefan Ottosson 2024-05-23 17:34:45 +02:00
commit 1ac5696463
No known key found for this signature in database
GPG key ID: 254BFA8856A90692
549 changed files with 68374 additions and 39624 deletions

28
.devcontainer/README.md Normal file
View file

@ -0,0 +1,28 @@
# Codespaces
You can modifiy Uptime Kuma in your browser without setting up a local development.
![image](https://github.com/louislam/uptime-kuma/assets/1336778/31d9f06d-dd0b-4405-8e0d-a96586ee4595)
1. Click `Code` -> `Create codespace on master`
2. Wait a few minutes until you see there are two exposed ports
3. Go to the `3000` url, see if it is working
![image](https://github.com/louislam/uptime-kuma/assets/1336778/909b2eb4-4c5e-44e4-ac26-6d20ed856e7f)
## Frontend
Since the frontend is using [Vite.js](https://vitejs.dev/), all changes in this area will be hot-reloaded.
You don't need to restart the frontend, unless you try to add a new frontend dependency.
## Backend
The backend does not automatically hot-reload.
You will need to restart the backend after changing something using these steps:
1. Click `Terminal`
2. Click `Codespaces: server-dev` in the right panel
3. Press `Ctrl + C` to stop the server
4. Press `Up` to run `npm run start-server-dev`
![image](https://github.com/louislam/uptime-kuma/assets/1336778/e0c0a350-fe46-4588-9f37-e053c85834d1)

View file

@ -0,0 +1,23 @@
{
"image": "mcr.microsoft.com/devcontainers/javascript-node:dev-18-bookworm",
"features": {
"ghcr.io/devcontainers/features/github-cli:1": {}
},
"updateContentCommand": "npm ci",
"postCreateCommand": "",
"postAttachCommand": {
"frontend-dev": "npm run start-frontend-devcontainer",
"server-dev": "npm run start-server-dev",
"open-port": "gh codespace ports visibility 3001:public -c $CODESPACE_NAME"
},
"customizations": {
"vscode": {
"extensions": [
"streetsidesoftware.code-spell-checker",
"dbaeumer.vscode-eslint",
"GitHub.copilot-chat"
]
}
},
"forwardPorts": [3000, 3001]
}

View file

@ -1,6 +1,7 @@
/.idea
/node_modules
/data
/data*
/cypress
/out
/test
/kubernetes
@ -17,6 +18,7 @@ README.md
.vscode
.eslint*
.stylelint*
/.devcontainer
/.github
yarn.lock
app.json
@ -28,8 +30,16 @@ SECURITY.md
tsconfig.json
.env
/tmp
/babel.config.js
/ecosystem.config.js
/extra/healthcheck.exe
/extra/healthcheck
/extra/exe-builder
/extra/push-examples
/extra/uptime-kuma-push
# Comment the following line if you want to rebuild the healthcheck binary
/extra/healthcheck-armv7
### .gitignore content (commented rules are duplicated)
@ -44,6 +54,4 @@ dist-ssr
#!/data/.gitkeep
#.vscode
### End of .gitignore content

View file

@ -19,3 +19,6 @@ indent_size = 2
[*.vue]
trim_trailing_whitespace = false
[*.go]
indent_style = tab

View file

@ -1,6 +1,7 @@
module.exports = {
ignorePatterns: [
"test/*",
"test/*.js",
"test/cypress",
"server/modules/apicache/*",
"src/util.js"
],
@ -14,13 +15,18 @@ module.exports = {
extends: [
"eslint:recommended",
"plugin:vue/vue3-recommended",
"plugin:jsdoc/recommended-error",
],
parser: "vue-eslint-parser",
parserOptions: {
parser: "@babel/eslint-parser",
parser: "@typescript-eslint/parser",
sourceType: "module",
requireConfigFile: false,
},
plugins: [
"jsdoc",
"@typescript-eslint",
],
rules: {
"yoda": "error",
eqeqeq: [ "warn", "smart" ],
@ -71,14 +77,14 @@ module.exports = {
"no-var": "error",
"key-spacing": "warn",
"keyword-spacing": "warn",
"space-infix-ops": "warn",
"space-infix-ops": "error",
"arrow-spacing": "warn",
"no-trailing-spaces": "error",
"no-constant-condition": [ "error", {
"checkLoops": false,
}],
"space-before-blocks": "warn",
//'no-console': 'warn',
//"no-console": "warn",
"no-extra-boolean-cast": "off",
"no-multiple-empty-lines": [ "warn", {
"max": 1,
@ -90,14 +96,51 @@ module.exports = {
"no-unneeded-ternary": "error",
"array-bracket-newline": [ "error", "consistent" ],
"eol-last": [ "error", "always" ],
//'prefer-template': 'error',
//"prefer-template": "error",
"template-curly-spacing": [ "warn", "never" ],
"comma-dangle": [ "warn", "only-multiline" ],
"no-empty": [ "error", {
"allowEmptyCatch": true
}],
"no-control-regex": "off",
"one-var": [ "error", "never" ],
"max-statements-per-line": [ "error", { "max": 1 }]
"max-statements-per-line": [ "error", { "max": 1 }],
"jsdoc/check-tag-names": [
"error",
{
"definedTags": [ "link" ]
}
],
"jsdoc/no-undefined-types": "off",
"jsdoc/no-defaults": [
"error",
{ "noOptionalParamNames": true }
],
"jsdoc/require-throws": "warn",
"jsdoc/require-jsdoc": [
"error",
{
"require": {
"FunctionDeclaration": true,
"MethodDefinition": true,
}
}
],
"jsdoc/no-blank-block-descriptions": "error",
"jsdoc/require-returns-description": "warn",
"jsdoc/require-returns-check": [
"error",
{ "reportMissingReturnForUndefinedTypes": false }
],
"jsdoc/require-returns": [
"warn",
{
"forceRequireReturn": true,
"forceReturnsWithAsync": true
}
],
"jsdoc/require-param-type": "warn",
"jsdoc/require-param-description": "warn"
},
"overrides": [
{
@ -107,21 +150,20 @@ module.exports = {
}
},
// Override for jest puppeteer
// Override for TypeScript
{
"files": [
"**/*.spec.js",
"**/*.spec.jsx"
"**/*.ts",
],
env: {
jest: true,
},
globals: {
page: true,
browser: true,
context: true,
jestPuppeteer: true,
},
extends: [
"plugin:@typescript-eslint/recommended",
],
"rules": {
"jsdoc/require-returns-type": "off",
"jsdoc/require-param-type": "off",
"@typescript-eslint/no-explicit-any": "off",
"prefer-const": "off",
}
}
]
};

View file

@ -6,7 +6,7 @@ body:
- type: checkboxes
id: no-duplicate-issues
attributes:
label: "⚠️ Please verify that this bug has NOT been raised before."
label: "⚠️ Please verify that this question has NOT been raised before."
description: "Search in the issues sections by clicking [HERE](https://github.com/louislam/uptime-kuma/issues?q=)"
options:
- label: "I checked and didn't find similar issue"
@ -24,8 +24,14 @@ body:
required: true
attributes:
label: "📝 Describe your problem"
description: "Please walk us through it step by step."
description: "Please walk us through it step by step. Include all important details and add screenshots where appropriate"
placeholder: "Describe what are you asking for..."
- type: textarea
id: error-msg
validations:
required: false
attributes:
label: "📝 Error Message(s) or Log"
- type: input
id: uptime-kuma-version
attributes:
@ -38,7 +44,7 @@ body:
id: operating-system
attributes:
label: "💻 Operating System and Arch"
description: "Which OS is your server/device running on?"
description: "Which OS is your server/device running on? (For Replit, please do not report this bug)"
placeholder: "Ex. Ubuntu 20.04 x86"
validations:
required: true
@ -46,23 +52,24 @@ body:
id: browser-vendor
attributes:
label: "🌐 Browser"
description: "Which browser are you running on?"
description: "Which browser are you running on? (For Replit, please do not report this bug)"
placeholder: "Ex. Google Chrome 95.0.4638.69"
validations:
required: true
- type: input
id: docker-version
- type: textarea
id: deployment-info
attributes:
label: "🐋 Docker Version"
description: "If running with Docker, which version are you running?"
placeholder: "Ex. Docker 20.10.9 / K8S / Podman"
label: "🖥️ Deployment Environment"
description: |
examples:
- **Runtime**: Docker 20.10.9 / nodejs 14.18.0 / K8S via ... v1.3.3 / ..
- **Database**: sqlite/embedded mariadb/external mariadb
- **Filesystem used to store the database on**: Windows/ZFS/btrfs/NFSv3 on a SSD/HDD/eMMC
- **number of monitors**: 42
value: |
- Runtime:
- Database:
- Filesystem used to store the database on:
- number of monitors:
validations:
required: false
- type: input
id: nodejs-version
attributes:
label: "🟩 NodeJS Version"
description: "If running with Node.js? which version are you running?"
placeholder: "Ex. 14.18.0"
validations:
required: false
required: true

View file

@ -3,14 +3,14 @@ description: "Submit a bug report to help us improve"
#title: "[Bug] "
labels: [bug]
body:
- type: checkboxes
id: no-duplicate-issues
- type: textarea
id: related-issues
validations:
required: true
attributes:
label: "⚠️ Please verify that this bug has NOT been raised before."
description: "Search in the issues sections by clicking [HERE](https://github.com/louislam/uptime-kuma/issues?q=)"
options:
- label: "I checked and didn't find similar issue"
required: true
label: "📑 I have found these related issues/pull requests"
description: "Search related issues by clicking [HERE](https://github.com/louislam/uptime-kuma/issues?q=) and explain what the difference between them or explain that you are unable to find any related issues"
placeholder: "Related to #1 by also touching the ... system. They should not be merged because ..."
- type: checkboxes
attributes:
label: "🛡️ Security Policy"
@ -31,7 +31,7 @@ body:
required: true
attributes:
label: "👟 Reproduction steps"
description: "How do you trigger this bug? Please walk us through it step by step."
description: "How do you trigger this bug? Please walk us through it step by step. Include all important details and add screenshots where appropriate"
placeholder: "..."
- type: textarea
id: expected-behavior
@ -61,8 +61,8 @@ body:
id: operating-system
attributes:
label: "💻 Operating System and Arch"
description: "Which OS is your server/device running on?"
placeholder: "Ex. Ubuntu 20.04 x86"
description: "Which OS is your server/device running on? (For Replit, please do not report this bug)"
placeholder: "Ex. Ubuntu 20.04 x64 "
validations:
required: true
- type: input
@ -73,22 +73,23 @@ body:
placeholder: "Ex. Google Chrome 95.0.4638.69"
validations:
required: true
- type: input
id: docker-version
- type: textarea
id: deployment-info
attributes:
label: "🐋 Docker Version"
description: "If running with Docker, which version are you running?"
placeholder: "Ex. Docker 20.10.9 / K8S / Podman"
label: "🖥️ Deployment Environment"
description: |
examples:
- **Runtime**: Docker 20.10.9 / nodejs 18.17.1 / K8S via ... v1.3.3 / ..
- **Database**: sqlite/embedded mariadb/external mariadb
- **Filesystem used to store the database on**: Windows/ZFS/btrfs/NFSv3 on a SSD/HDD/eMMC
- **number of monitors**: 42
value: |
- Runtime:
- Database:
- Filesystem used to store the database on:
- number of monitors:
validations:
required: false
- type: input
id: nodejs-version
attributes:
label: "🟩 NodeJS Version"
description: "If running with Node.js? which version are you running?"
placeholder: "Ex. 14.18.0"
validations:
required: false
required: true
- type: textarea
id: logs
attributes:

View file

@ -3,14 +3,14 @@ description: "Submit a proposal for a new feature"
#title: "[Feature] "
labels: [feature-request]
body:
- type: checkboxes
id: no-duplicate-issues
- type: textarea
id: related-issues
validations:
required: true
attributes:
label: "⚠️ Please verify that this feature request has NOT been suggested before."
description: "Search in the issues sections by clicking [HERE](https://github.com/louislam/uptime-kuma/issues?q=)"
options:
- label: "I checked and didn't find similar feature request"
required: true
label: "📑 I have found these related issues/pull requests"
description: "Search related issues by clicking [HERE](https://github.com/louislam/uptime-kuma/issues?q=) and explain what the difference between them or explain that you are unable to find any related issues"
placeholder: "Related to #1 by also touching the ... system. They should not be merged because ..."
- type: dropdown
id: feature-area
attributes:
@ -18,10 +18,17 @@ body:
description: "What kind of feature request is this?"
multiple: true
options:
- API
- New Notification
- New Monitor
- UI Feature
- API / automation options
- New notification-provider
- Change to existing notification-provider
- New monitor
- Change to existing monitor
- Dashboard
- Status-page
- Maintenance
- Deployment
- Certificate expiry
- Settings
- Other
validations:
required: true

17
.github/ISSUE_TEMPLATE/security.md vendored Normal file
View file

@ -0,0 +1,17 @@
---
name: "Security Issue"
about: "Just for alerting @louislam, do not provide any details here"
title: "Security Issue"
ref: "main"
labels:
- security
---
DO NOT PROVIDE ANY DETAILS HERE. Please privately report to https://github.com/louislam/uptime-kuma/security/advisories/new.
Why need this issue? It is because GitHub Advisory do not send a notification to @louislam, it is a workaround to do so.
Your GitHub Advisory URL:

View file

@ -1,4 +1,8 @@
👉 Delete this line if you have read and agree our pull request rules and guidelines: https://github.com/louislam/uptime-kuma/blob/master/CONTRIBUTING.md#can-i-create-a-pull-request-for-uptime-kuma
⚠️⚠️⚠️ Since we do not accept all types of pull requests and do not want to waste your time. Please be sure that you have read pull request rules:
https://github.com/louislam/uptime-kuma/blob/master/CONTRIBUTING.md#can-i-create-a-pull-request-for-uptime-kuma
Tick the checkbox if you understand [x]:
- [ ] I have read and understand the pull request rules.
# Description
@ -11,8 +15,7 @@ Please delete any options that are not relevant.
- Bug fix (non-breaking change which fixes an issue)
- User interface (UI)
- New feature (non-breaking change which adds functionality)
- Breaking change (fix or feature that would cause existing functionality to not work as expected)
- Translation update
- Breaking change (a fix or feature that would cause existing functionality to not work as expected)
- Other
- This change requires a documentation update
@ -21,9 +24,8 @@ Please delete any options that are not relevant.
- [ ] My code follows the style guidelines of this project
- [ ] I ran ESLint and other linters for modified files
- [ ] I have performed a self-review of my own code and tested it
- [ ] I have commented my code, particularly in hard-to-understand areas
(including JSDoc for methods)
- [ ] My changes generate no new warnings
- [ ] I have commented my code, particularly in hard-to-understand areas (including JSDoc for methods)
- [ ] My changes generates no new warnings
- [ ] My code needed automated testing. I have added them (this is optional task)
## Screenshots (if any)

1
.github/config/exclude.txt vendored Normal file
View file

@ -0,0 +1 @@
# This is a .gitignore style file for 'GrantBirki/json-yaml-validate' Action workflow

View file

@ -1,52 +1,93 @@
# This workflow will do a clean install of node dependencies, cache/restore them, build the source code and run tests across different versions of node
# This workflow will do a clean install of node dependencies, build the source code and run tests across different versions of node
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-nodejs-with-github-actions
name: Auto Test
on:
push:
branches: [ master ]
branches: [ master, 1.23.X ]
paths-ignore:
- '*.md'
pull_request:
branches: [ master ]
branches: [ master, 1.23.X ]
paths-ignore:
- '*.md'
jobs:
auto-test:
needs: [ check-linters, e2e-test ]
runs-on: ${{ matrix.os }}
timeout-minutes: 15
strategy:
matrix:
os: [macos-latest, ubuntu-latest, windows-latest, ARM64]
node: [ 18, 20.5 ]
# See supported Node.js release schedule at https://nodejs.org/en/about/releases/
steps:
- run: git config --global core.autocrlf false # Mainly for Windows
- uses: actions/checkout@v4
- name: Use Node.js ${{ matrix.node }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node }}
- run: npm install
- run: npm run build
- run: npm run test-backend
env:
HEADLESS_TEST: 1
JUST_FOR_TEST: ${{ secrets.JUST_FOR_TEST }}
# As a lot of dev dependencies are not supported on ARMv7, we have to test it separately and just test if `npm ci --production` works
armv7-simple-test:
needs: [ check-linters ]
runs-on: ${{ matrix.os }}
timeout-minutes: 15
strategy:
matrix:
os: [macos-latest, ubuntu-latest, windows-latest]
node: [ 14, 16, 17, 18 ]
os: [ ARMv7 ]
node: [ 18, 20 ]
# See supported Node.js release schedule at https://nodejs.org/en/about/releases/
steps:
- run: git config --global core.autocrlf false # Mainly for Windows
- uses: actions/checkout@v3
- run: git config --global core.autocrlf false # Mainly for Windows
- uses: actions/checkout@v4
- name: Use Node.js ${{ matrix.node }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node }}
- run: npm ci --production
- name: Use Node.js ${{ matrix.node }}
uses: actions/setup-node@v3
with:
node-version: ${{ matrix.node }}
cache: 'npm'
- run: npm install
- run: npm run build
- run: npm test
env:
HEADLESS_TEST: 1
JUST_FOR_TEST: ${{ secrets.JUST_FOR_TEST }}
check-linters:
runs-on: ubuntu-latest
steps:
- run: git config --global core.autocrlf false # Mainly for Windows
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Use Node.js 14
uses: actions/setup-node@v3
- name: Use Node.js 20
uses: actions/setup-node@v4
with:
node-version: 14
cache: 'npm'
node-version: 20
- run: npm install
- run: npm run lint
- run: npm run lint:prod
e2e-test:
needs: [ check-linters ]
runs-on: ARM64
steps:
- run: git config --global core.autocrlf false # Mainly for Windows
- uses: actions/checkout@v4
- name: Use Node.js 20
uses: actions/setup-node@v4
with:
node-version: 20
- run: npm install
- run: npx playwright install
- run: npm run build
- run: npm run test-e2e

View file

@ -1,4 +1,3 @@
name: Close Incorrect Issue
on:
@ -12,13 +11,13 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest]
node-version: [16.x]
node-version: [18]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v2
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
cache: 'npm'

43
.github/workflows/codeql-analysis.yml vendored Normal file
View file

@ -0,0 +1,43 @@
name: "CodeQL"
on:
push:
branches: [ "master", "1.23.X"]
pull_request:
branches: [ "master", "1.23.X"]
schedule:
- cron: '16 22 * * 0'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
timeout-minutes: 360
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go', 'javascript-typescript' ]
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@v2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{matrix.language}}"

26
.github/workflows/conflict_labeler.yml vendored Normal file
View file

@ -0,0 +1,26 @@
name: Merge Conflict Labeler
on:
push:
branches:
- master
pull_request_target:
branches:
- master
types: [synchronize]
jobs:
label:
name: Labeling
runs-on: ubuntu-latest
if: ${{ github.repository == 'louislam/uptime-kuma' }}
permissions:
contents: read
pull-requests: write
steps:
- name: Apply label
uses: eps1lon/actions-label-merge-conflict@v3
with:
dirtyLabel: 'needs:resolve-merge-conflict'
removeOnDirtyLabel: 'needs:resolve-merge-conflict'
repoToken: '${{ secrets.GITHUB_TOKEN }}'

View file

@ -0,0 +1,27 @@
name: json-yaml-validate
on:
push:
branches:
- master
pull_request:
branches:
- master
- 1.23.X
workflow_dispatch:
permissions:
contents: read
pull-requests: write # enable write permissions for pull request comments
jobs:
json-yaml-validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: json-yaml-validate
id: json-yaml-validate
uses: GrantBirki/json-yaml-validate@v2.4.0
with:
comment: "true" # enable comment mode
exclude_file: ".github/config/exclude.txt" # gitignore style file for exclusions

View file

@ -0,0 +1,17 @@
name: prevent-file-change
on:
pull_request:
jobs:
check-file-changes:
runs-on: ubuntu-latest
steps:
- name: Prevent file change
uses: xalvarez/prevent-file-change-action@v1
with:
githubToken: ${{ secrets.GITHUB_TOKEN }}
# Regex, /src/lang/*.json is not allowed to be changed, except for /src/lang/en.json
pattern: '^(?!src/lang/en\.json$)src/lang/.*\.json$'
trustedAuthors: UptimeKumaBot

42
.github/workflows/stale-bot.yml vendored Normal file
View file

@ -0,0 +1,42 @@
name: 'Automatically close stale issues'
on:
workflow_dispatch:
schedule:
- cron: '0 */6 * * *'
#Run every 6 hours
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v8
with:
stale-issue-message: |-
We are clearing up our old `help`-issues and your issue has been open for 60 days with no activity.
If no comment is made and the stale label is not removed, this issue will be closed in 7 days.
days-before-stale: 60
days-before-close: 7
days-before-pr-stale: -1
days-before-pr-close: -1
exempt-issue-labels: 'News,Medium,High,discussion,bug,doc,feature-request'
exempt-issue-assignees: 'louislam'
operations-per-run: 200
- uses: actions/stale@v8
with:
stale-issue-message: |-
This issue was marked as `cannot-reproduce` by a maintainer.
If an issue is non-reproducible, we cannot fix it, as we do not know what the underlying issue is.
If you have any ideas how we can reproduce this issue, we would love to hear them.
We don't have a good way to deal with truely unreproducible issues and are going to close this issue in a month.
If think there might be other differences in our environment or in how we tried to reproduce this, we would appreciate any ideas.
close-issue-message: |-
This issue will be closed as no way to reproduce it has been found.
If you/somebody finds a way how to (semi-reliably) reproduce this, we can reopen this issue. ^^
days-before-stale: 180
days-before-close: 30
days-before-pr-stale: -1
days-before-pr-close: -1
any-of-issue-labels: 'cannot-reproduce'
operations-per-run: 200

11
.gitignore vendored
View file

@ -7,9 +7,20 @@ dist-ssr
/data
!/data/.gitkeep
/data*
.vscode
/private
/out
/tmp
.env
/extra/healthcheck.exe
/extra/healthcheck
/extra/healthcheck-armv7
extra/exe-builder/bin
extra/exe-builder/obj
.vs
.vscode

View file

@ -10,5 +10,7 @@
"color-function-notation": "legacy",
"shorthand-property-no-redundant-values": null,
"color-hex-length": null,
"declaration-block-no-redundant-longhand-properties": null,
"at-rule-no-unknown": null
}
}

View file

@ -1,80 +1,219 @@
# Project Info
First of all, thank you everyone who made pull requests for Uptime Kuma, I never thought GitHub Community can be that nice! And also because of this, I also never thought other people actually read my code and edit my code. It is not structured and commented so well, lol. Sorry about that.
First of all, I want to thank everyone who have wrote issues or shared pull requests for Uptime Kuma.
I never thought the GitHub community would be so nice!
Because of this, I also never thought that other people would actually read and edit my code.
Parts of the code are not very well-structured or commented, sorry about that.
The project was created with vite.js (vue3). Then I created a subdirectory called "server" for server part. Both frontend and backend share the same package.json.
The project was created with `vite.js` and is written in `vue3`.
Our backend lives in the `server`-directory and mostly communicates via websockets.
Both frontend and backend share the same `package.json`.
The frontend code build into "dist" directory. The server (express.js) exposes the "dist" directory as root of the endpoint. This is how production is working.
## Key Technical Skills
- Node.js (You should know what are promise, async/await and arrow function etc.)
- Socket.io
- SCSS
- Vue.js
- Bootstrap
- SQLite
For production, the frontend is build into `dist`-directory and the server (`express.js`) exposes the `dist` directory as the root of the endpoint.
For development, we run vite in development mode on another port.
## Directories
- data (App data)
- dist (Frontend build)
- extra (Extra useful scripts)
- public (Frontend resources for dev only)
- server (Server source code)
- src (Frontend source code)
- test (unit test)
- `config` (dev config files)
- `data` (App data)
- `db` (Base database and migration scripts)
- `dist` (Frontend build)
- `docker` (Dockerfiles)
- `extra` (Extra useful scripts)
- `public` (Frontend resources for dev only)
- `server` (Server source code)
- `src` (Frontend source code)
- `test` (unit test)
## Can I create a pull request for Uptime Kuma?
Yes, you can. However, since I don't want to waste your time, be sure to **create empty draft pull request, so we can discuss first** if it is a large pull request or you don't know it will be merged or not.
Yes or no, it depends on what you will try to do.
Both your and our maintainers time is precious, and we don't want to waste both time.
Also, please don't rush or ask for ETA, because I have to understand the pull request, make sure it is no breaking changes and stick to my vision of this project, especially for large pull requests.
If you have any questions about any process/.. is not clear, you are likely not alone => please ask them ^^
I will mark your pull request in the [milestones](https://github.com/louislam/uptime-kuma/milestones), if I am plan to review and merge it.
Different guidelines exist for different types of pull requests (PRs):
- <details><summary><b>security fixes</b></summary>
<p>
Submitting security fixes is something that may put the community at risk.
Please read through our [security policy](SECURITY.md) and submit vulnerabilities via an [advisory](https://github.com/louislam/uptime-kuma/security/advisories/new) + [issue](https://github.com/louislam/uptime-kuma/issues/new?assignees=&labels=help&template=security.md) instead.
We encourage you to submit how to fix a vulnerability if you know how to, this is not required.
Following the security policy allows us to properly test, fix bugs.
This review allows us to notice, if there are any changes necessary to unrelated parts like the documentation.
[**PLEASE SEE OUR SECURITY POLICY.**](SECURITY.md)
</p>
</details>
- <details><summary><b>small, non-breaking bug fixes</b></summary>
<p>
If you come across a bug and think you can solve, we appreciate your work.
Please make sure that you follow by these rules:
- keep the PR as small as possible, fix only one thing at a time => keeping it reviewable
- test that your code does what you came it does.
<sub>Because maintainer time is precious junior maintainers may merge uncontroversial PRs in this area.</sub>
</p>
</details>
- <details><summary><b>translations / internationalisation (i18n)</b></summary>
<p>
We use weblate to localise this project into many languages.
If you are unhappy with a translation this is the best start.
On how to translate using weblate, please see [these instructions](https://github.com/louislam/uptime-kuma/blob/master/src/lang/README.md).
There are two cases in which a change cannot be done in weblate and requires a PR:
- A text may not be currently localisable. In this case, **adding a new language key** via `$t("languageKey")` might be nessesary
- language keys need to be **added to `en.json`** to be visible in weblate. If this has not happened, a PR is appreciated.
- **Adding a new language** requires a new file see [these instructions](https://github.com/louislam/uptime-kuma/blob/master/src/lang/README.md)
<sub>Because maintainer time is precious junior maintainers may merge uncontroversial PRs in this area.</sub>
</p>
</details>
- <details><summary><b>new notification providers</b></summary>
<p>
To set up a new notification provider these files need to be modified/created:
- `server/notification-providers/PROVIDER_NAME.js` is where the heart of the notification provider lives.
- Both `monitorJSON` and `heartbeatJSON` can be `null` for some events.
If both are `null`, this is a general testing message, but if just `heartbeatJSON` is `null` this is a certificate expiry.
- Please wrap the axios call into a
```js
try {
let result = await axios.post(...);
if (result.status === ...) ...
} catch (error) {
this.throwGeneralAxiosError(error);
}
```
- `server/notification.js` is where the backend of the notification provider needs to be registered.
*If you have an idea how we can skip this step, we would love to hear about it ^^*
- `src/components/NotificationDialog.vue` you need to decide if the provider is a regional or a global one and add it with a name to the respective list
- `src/components/notifications/PROVIDER_NAME.vue` is where the frontend of each provider lives.
Please make sure that you have:
- used `HiddenInput` for secret credentials
- included all the necessary helptexts/placeholder/.. to make sure the notification provider is simple to setup for new users.
- include all translations (`{{ $t("Translation key") }}`, [`i18n-t keypath="Translation key">`](https://vue-i18n.intlify.dev/guide/advanced/component.html)) in `src/lang/en.json` to enable our translators to translate this
- `src/components/notifications/index.js` is where the frontend of the provider needs to be registered.
*If you have an idea how we can skip this step, we would love to hear about it ^^*
✅ Accept:
- Bug/Security fix
- Translations
- Adding notification providers
Offering notifications is close to the core of what we are as an uptime monitor.
Therefore, making sure that they work is also really important.
Because testing notification providers is quite time intensive, we mostly offload this onto the person contributing a notification provider.
To make shure you have tested the notification provider, please include screenshots of the following events in the pull-request description:
- `UP`/`DOWN`
- Certificate Expiry via https://expired.badssl.com/
- Testing (the test button on the notification provider setup page)
Using the following way to format this is encouraged:
```md
| Event | Before | After |
------------------
| `UP` | paste-image-here | paste-image-here |
| `DOWN` | paste-image-here | paste-image-here |
| Certificate-expiry | paste-image-here | paste-image-here |
| Testing | paste-image-here | paste-image-here |
```
⚠️ Discussion First
- Large pull requests
- New features
<sub>Because maintainer time is precious junior maintainers may merge uncontroversial PRs in this area.</sub>
</p>
</details>
- <details><summary><b>new monitoring types</b></summary>
<p>
❌ Won't Merge
- Do not pass auto test
- Any breaking changes
- Duplicated pull request
- Buggy
- Existing logic is completely modified or deleted for no reason
- A function that is completely out of scope
To set up a new notification provider these files need to be modified/created:
- `server/monitor-types/MONITORING_TYPE.js` is the core of each monitor.
the `async check(...)`-function should:
- throw an error for each fault that is detected with an actionable error message
- in the happy-path, you should set `heartbeat.msg` to a successfull message and set `heartbeat.status = UP`
- `server/uptime-kuma-server.js` is where the monitoring backend needs to be registered.
*If you have an idea how we can skip this step, we would love to hear about it ^^*
- `src/pages/EditMonitor.vue` is the shared frontend users interact with.
Please make sure that you have:
- used `HiddenInput` for secret credentials
- included all the necessary helptexts/placeholder/.. to make sure the notification provider is simple to setup for new users.
- include all translations (`{{ $t("Translation key") }}`, [`i18n-t keypath="Translation key">`](https://vue-i18n.intlify.dev/guide/advanced/component.html)) in `src/lang/en.json` to enable our translators to translate this
-
<sub>Because maintainer time is precious junior maintainers may merge uncontroversial PRs in this area.</sub>
</p>
</details>
- <details><summary><b>new features/ major changes / breaking bugfixes</b></summary>
<p>
be sure to **create an empty draft pull request or open an issue, so we can have a discussion first**.
This is especially important for a large pull request or you don't know if it will be merged or not.
<sub>Because of the large impact of this work, only senior maintainers may merge PRs in this area.</sub>
</p>
</details>
The following rules are essential for making your PR mergable:
- Merging multiple issues by a huge PR is more difficult to review and causes conflicts with other PRs. Please
- (if possible) **create one PR for one issue** or
- (if not possible) **explain which issues a PR addresses and why this PR should not be broken apart**
- Make sure your **PR passes our continuous integration**.
PRs will not be merged unless all CI-Checks are green.
- **Breaking changes** (unless for a good reason and discussed beforehand) will not get merged / not get merged quickly.
Such changes require a major version release.
- **Test your code** before submitting a PR.
Buggy PRs will not be merged.
- Make sure the **UI/UX is close to Uptime Kuma**.
- **Think about the maintainability**:
Don't add functionality that is completely **out of scope**.
Keep in mind that we need to be able to maintain the functionality.
- Don't modify or delete existing logic without a valid reason.
- Don't convert existing code into other programming languages for no reason.
I ([@louislam](https://github.com/louislam)) have the final say.
If your pull request does not meet my expectations, I will reject it, no matter how much time you spent on it.
Therefore, it is essential to have a discussion beforehand.
I will assign your pull request to a [milestone](https://github.com/louislam/uptime-kuma/milestones), if I plan to review and merge it.
Please don't rush or ask for an ETA.
We have to understand the pull request, make sure it has no breaking changes and stick to the vision of this project, especially for large pull requests.
## I'd like to work on an issue. How do I do that?
We have found that assigning people to issues is management-overhead that we don't need.
A short comment that you want to try your hand at this issue is appreciated to save other devs time.
If you come across any problem during development, feel free to leave a comment with what you are stuck on.
### Recommended Pull Request Guideline
Before deep into coding, discussion first is preferred. Creating an empty pull request for discussion would be recommended.
Before diving deep into coding, having a discussion first by creating an empty pull request for discussion is preferred.
The rationale behind this is that we can align the direction and scope of the feature to eliminate any conflicts with existing and planned work, and can help by pointing out any potential pitfalls.
1. Fork the project
1. Clone your fork repo to local
1. Create a new branch
1. Create an empty commit
`git commit -m "[empty commit] pull request for <YOUR TASK NAME>" --allow-empty`
1. Push to your fork repo
1. Create a pull request: https://github.com/louislam/uptime-kuma/compare
1. Write a proper description
1. Click "Change to draft"
1. Discussion
2. Clone your fork repo to local
3. Create a new branch
4. Create an empty commit: `git commit -m "<YOUR TASK NAME>" --allow-empty`
5. Push to your fork repo
6. Prepare a pull request: https://github.com/louislam/uptime-kuma/compare
7. Write a proper description. You can mention @louislam in it, so @louislam will get the notification.
8. Create your pull request as a Draft
9. Wait for the discussion
## Project Styles
I personally do not like something need to learn so much and need to config so much before you can finally start the app.
I personally do not like something that requires so many configurations before you can finally start the app.
The goal is to make the Uptime Kuma installation as easy as installing a mobile app.
- Easy to install for non-Docker users, no native build dependency is needed (at least for x86_64), no extra config, no extra effort to get it run
- Single container for Docker users, no very complex docker-compose file. Just map the volume and expose the port, then good to go
- Settings should be configurable in the frontend. Environment variable is not encouraged, unless it is related to startup such as `DATA_DIR`.
- Easy to install for non-Docker users
- no native build dependency is needed (for `x86_64`/`armv7`/`arm64`)
- no extra configuration and
- no extra effort required to get it running
- Single container for Docker users
- no complex docker-compose file
- mapping the volume and exposing the port should be the only requirements
- Settings should be configurable in the frontend. Environment variables are discouraged, unless it is related to startup such as `DATA_DIR`
- Easy to use
- The web UI styling should be consistent and nice.
- The web UI styling should be consistent and nice
## Coding Styles
@ -83,7 +222,7 @@ I personally do not like something need to learn so much and need to config so m
- Follow ESLint
- Methods and functions should be documented with JSDoc
## Name convention
## Name Conventions
- Javascript/Typescript: camelCaseType
- SQLite: snake_case (Underscore)
@ -91,13 +230,25 @@ I personally do not like something need to learn so much and need to config so m
## Tools
- Node.js >= 14
- NPM >= 8.5
- Git
- IDE that supports ESLint and EditorConfig (I am using IntelliJ IDEA)
- A SQLite GUI tool (SQLite Expert Personal is suggested)
- [`Node.js`](https://nodejs.org/) >= 18
- [`npm`](https://www.npmjs.com/) >= 9.3
- [`git`](https://git-scm.com/)
- IDE that supports [`ESLint`](https://eslint.org/) and EditorConfig (I am using [`IntelliJ IDEA`](https://www.jetbrains.com/idea/))
- A SQLite GUI tool (f.ex. [`SQLite Expert Personal`](https://www.sqliteexpert.com/download.html) or [`DBeaver Community`](https://dbeaver.io/download/))
## Install dependencies
### GitHub Codespaces
If you don't want to setup an local environment, you can now develop on GitHub Codespaces, read more:
https://github.com/louislam/uptime-kuma/tree/master/.devcontainer
## Git Branches
- `master`: 2.X.X development. If you want to add a new feature, your pull request should base on this.
- `1.23.X`: 1.23.X development. If you want to fix a bug for v1 and v2, your pull request should base on this.
- All other branches are unused, outdated or for dev.
## Install Dependencies for Development
```bash
npm ci
@ -115,33 +266,42 @@ Port `3000` and port `3001` will be used.
npm run dev
```
But sometimes, you would like to restart the server, but not the frontend, you can run these commands in two terminals:
```bash
npm run start-frontend-dev
npm run start-server-dev
```
## Backend Server
It binds to `0.0.0.0:3001` by default.
The backend is an `express.js` server with `socket.io` integrated.
It uses `socket.io` to communicate with clients, and most server logic is encapsulated in the `socket.io` handlers.
`express.js` is also used to serve:
It is mainly a socket.io app + express.js.
- as an entry point for redirecting to a status page or the dashboard
- the frontend built files (`index.html`, `*.js`, `*.css`, etc.)
- internal APIs of the status page
express.js is used for:
- entry point such as redirecting to a status page or the dashboard
- serving the frontend built files (index.html, .js and .css etc.)
- serving internal APIs of status page
### Structure in `/server/`
### Structure in /server/
- model/ (Object model, auto mapping to the database table name)
- modules/ (Modified 3rd-party modules)
- notification-providers/ (individual notification logic)
- routers/ (Express Routers)
- socket-handler (Socket.io Handlers)
- server.js (Server entry point and main logic)
- `jobs/` (Jobs that are running in another process)
- `model/` (Object model, auto-mapping to the database table name)
- `modules/` (Modified 3rd-party modules)
- `monitor_types/` (Monitor Types)
- `notification-providers/` (individual notification logic)
- `routers/` (Express Routers)
- `socket-handler/` (Socket.io Handlers)
- `server.js` (Server entry point)
- `uptime-kuma-server.js` (UptimeKumaServer class, main logic should be here, but some still in `server.js`)
## Frontend Dev Server
It binds to `0.0.0.0:3000` by default. Frontend dev server is used for development only.
It binds to `0.0.0.0:3000` by default. The frontend dev server is used for development only.
For production, it is not used. It will be compiled to `dist` directory instead.
For production, it is not used. It will be compiled to `dist` directory instead.
You can use Vue.js devtools Chrome extension for debugging.
@ -157,94 +317,201 @@ Uptime Kuma Frontend is a single page application (SPA). Most paths are handled
The router is in `src/router.js`
As you can see, most data in frontend is stored in root level, even though you changed the current router to any other pages.
As you can see, most data in the frontend is stored at the root level, even though you changed the current router to any other pages.
The data and socket logic are in `src/mixins/socket.js`.
## Database Migration
1. Create `patch-{name}.sql` in `./db/`
2. Add your patch filename in the `patchList` list in `./server/database.js`
See: https://github.com/louislam/uptime-kuma/tree/master/db/knex_migrations
## Unit Test
It is an end-to-end testing. It is using Jest and Puppeteer.
```bash
npm run build
npm test
```
By default, the Chromium window will be shown up during the test. Specifying `HEADLESS_TEST=1` for terminal environments.
## Dependencies
## Update Dependencies
Both frontend and backend share the same `package.json`.
However, the frontend dependencies are eventually not used in the production environment, because it is usually also baked into `dist` files. So:
Install `ncu`
https://github.com/raineorshine/npm-check-updates
- Frontend dependencies = "devDependencies"
- Examples: `vue`, `chart.js`
- Backend dependencies = "dependencies"
- Examples: `socket.io`, `sqlite3`
- Development dependencies = "devDependencies"
- Examples: `eslint`, `sass`
```bash
ncu -u -t patch
npm install
```
### Update Dependencies
Since previously updating Vite 2.5.10 to 2.6.0 broke the application completely, from now on, it should update patch release version only.
Since previously updating Vite 2.5.10 to 2.6.0 broke the application completely, from now on, it should update the patch release version only.
Patch release = the third digit ([Semantic Versioning](https://semver.org/))
If for security / bug / other reasons, a library must be updated, breaking changes need to be checked by the person proposing the change.
## Translations
Please read: https://github.com/louislam/uptime-kuma/tree/master/src/languages
Please add **all** the strings which are translatable to `src/lang/en.json` (if translation keys are omitted, they can not be translated.)
**Don't include any other languages in your initial pull request** (even if this is your mother tongue), to avoid merge-conflicts between weblate and `master`.
The translations can then (after merging a PR into `master`) be translated by awesome people donating their language skills.
If you want to help by translating Uptime Kuma into your language, please visit the [instructions on how to translate using weblate](https://github.com/louislam/uptime-kuma/blob/master/src/lang/README.md).
## Spelling & Grammar
Feel free to correct the grammar in the documentation or code.
My mother language is not English and my grammar is not that great.
## Wiki
Since there is no way to make a pull request to wiki's repo, I have set up another repo to do that.
Since there is no way to make a pull request to the wiki, I have set up another repo to do that.
https://github.com/louislam/uptime-kuma-wiki
## Docker
### Arch
- amd64
- arm64
- armv7
### Docker Tags
#### v2
- `2`, `latest-2`: v2 with full features such as Chromium and bundled MariaDB
- `2.x.x`
- `2-slim`: v2 with basic features
- `2.x.x-slim`
- `beta2`: Latest beta build
- `2.x.x-beta.x`
- `nightly2`: Dev build
- `base2`: Basic Debian setup without Uptime Kuma source code (Full features)
- `base2-slim`: Basic Debian setup without Uptime Kuma source code
- `pr-test2`: For testing pull request without setting up a local environment
#### v1
- `1`, `latest`, `1-debian`, `debian`: Latest version of v1
- `1.x.x`, `1.x.x-debian`
- `1.x.x-beta.x`: Beta build
- `beta`: Latest beta build
- `nightly`: Dev build
- `base-debian`: Basic Debian setup without Uptime Kuma source code
- `pr-test`: For testing pull request without setting up a local environment
- `base-alpine`: (Deprecated) Basic Alpine setup without Uptime Kuma source code
- `1-alpine`, `alpine`: (Deprecated)
- `1.x.x-alpine`: (Deprecated)
## Maintainer
Check the latest issues and pull requests:
https://github.com/louislam/uptime-kuma/issues?q=sort%3Aupdated-desc
### Release Procedures
### What is a maintainer and what are their roles?
1. Draft a release note
2. Make sure the repo is cleared
3. `npm run release-final with env vars: `VERSION` and `GITHUB_TOKEN`
4. Wait until the `Press any key to continue`
5. `git push`
6. Publish the release note as 1.X.X
7. Press any key to continue
8. SSH to demo site server and update to 1.X.X
This project has multiple maintainers which specialise in different areas.
Currently, there are 3 maintainers:
Checking:
| Person | Role | Main Area |
|-------------------|-------------------|------------------|
| `@louislam` | senior maintainer | major features |
| `@chakflying` | junior maintainer | fixing bugs |
| `@commanderstorm` | junior maintainer | issue-management |
- Check all tags is fine on https://hub.docker.com/r/louislam/uptime-kuma/tags
- Try the Docker image with tag 1.X.X (Clean install / amd64 / arm64 / armv7)
- Try clean installation with Node.js
### Procedures
### Release Beta Procedures
We have a few procedures we follow. These are documented here:
1. Draft a release note, check "This is a pre-release"
2. Make sure the repo is cleared
3. `npm run release-beta` with env vars: `VERSION` and `GITHUB_TOKEN`
4. Wait until the `Press any key to continue`
5. Publish the release note as 1.X.X-beta.X
6. Press any key to continue
- <details><summary>Release</summary>
<p>
### Release Wiki
1. Draft a release note
2. Make sure the repo is cleared
3. If the healthcheck is updated, remember to re-compile it: `npm run build-docker-builder-go`
4. `npm run release-final` with env vars: `VERSION` and `GITHUB_TOKEN`
5. Wait until the `Press any key to continue`
6. `git push`
7. Publish the release note as `1.X.X`
8. Press any key to continue
9. Deploy to the demo server: `npm run deploy-demo-server`
#### Setup Repo
These Items need to be checked:
```bash
git clone https://github.com/louislam/uptime-kuma-wiki.git
cd uptime-kuma-wiki
git remote add production https://github.com/louislam/uptime-kuma.wiki.git
```
- [ ] Check all tags is fine on https://hub.docker.com/r/louislam/uptime-kuma/tags
- [ ] Try the Docker image with tag 1.X.X (Clean install / amd64 / arm64 / armv7)
- [ ] Try clean installation with Node.js
</p>
</details>
- <details><summary>Release Beta</summary>
<p>
#### Push to Production Wiki
1. Draft a release note, check `This is a pre-release`
2. Make sure the repo is cleared
3. `npm run release-beta` with env vars: `VERSION` and `GITHUB_TOKEN`
4. Wait until the `Press any key to continue`
5. Publish the release note as `1.X.X-beta.X`
6. Press any key to continue
</p>
</details>
- <details><summary>Release Wiki</summary>
<p>
```bash
git pull
git push production master
```
**Setup Repo**
```bash
git clone https://github.com/louislam/uptime-kuma-wiki.git
cd uptime-kuma-wiki
git remote add production https://github.com/louislam/uptime-kuma.wiki.git
```
**Push to Production Wiki**
```bash
git pull
git push production master
```
</p>
</details>
- <details><summary>Change the base of a pull request such as <code>master</code> to <code>1.23.X</code></summary>
<p>
```bash
git rebase --onto <new parent> <old parent>
```
</p>
</details>
### Set up a Docker Builder
- amd64, armv7 using local.
- arm64 using remote arm64 cpu, as the emulator is too slow and can no longer pass the `npm ci` command.
1. Add the public key to the remote server.
2. Add the remote context. The remote machine must be arm64 and installed Docker CE.
```
docker context create oracle-arm64-jp --docker "host=ssh://root@100.107.174.88"
```
3. Create a new builder.
```
docker buildx create --name kuma-builder --platform linux/amd64,linux/arm/v7
docker buildx use kuma-builder
docker buildx inspect --bootstrap
```
4. Append the remote context to the builder.
```
docker buildx create --append --name kuma-builder --platform linux/arm64 oracle-arm64-jp
```
5. Verify the builder and check if the builder is using `kuma-builder`.
```
docker buildx inspect kuma-builder
docker buildx ls
```

144
README.md
View file

@ -1,39 +1,39 @@
# Uptime Kuma
<a target="_blank" href="https://github.com/louislam/uptime-kuma"><img src="https://img.shields.io/github/stars/louislam/uptime-kuma" /></a> <a target="_blank" href="https://hub.docker.com/r/louislam/uptime-kuma"><img src="https://img.shields.io/docker/pulls/louislam/uptime-kuma" /></a> <a target="_blank" href="https://hub.docker.com/r/louislam/uptime-kuma"><img src="https://img.shields.io/docker/v/louislam/uptime-kuma/latest?label=docker%20image%20ver." /></a> <a target="_blank" href="https://github.com/louislam/uptime-kuma"><img src="https://img.shields.io/github/last-commit/louislam/uptime-kuma" /></a> <a target="_blank" href="https://opencollective.com/uptime-kuma"><img src="https://opencollective.com/uptime-kuma/total/badge.svg?label=Open%20Collective%20Backers&color=brightgreen" /></a>
[![GitHub Sponsors](https://img.shields.io/github/sponsors/louislam?label=GitHub%20Sponsors)](https://github.com/sponsors/louislam)
<div align="center" width="100%">
<img src="./public/icon.svg" width="128" alt="" />
</div>
It is a self-hosted monitoring tool like "Uptime Robot".
# Uptime Kuma
<img src="https://uptime.kuma.pet/img/dark.jpg" width="700" alt="" />
Uptime Kuma is an easy-to-use self-hosted monitoring tool.
<a target="_blank" href="https://github.com/louislam/uptime-kuma"><img src="https://img.shields.io/github/stars/louislam/uptime-kuma?style=flat" /></a> <a target="_blank" href="https://hub.docker.com/r/louislam/uptime-kuma"><img src="https://img.shields.io/docker/pulls/louislam/uptime-kuma" /></a> <a target="_blank" href="https://hub.docker.com/r/louislam/uptime-kuma"><img src="https://img.shields.io/docker/v/louislam/uptime-kuma/latest?label=docker%20image%20ver." /></a> <a target="_blank" href="https://github.com/louislam/uptime-kuma"><img src="https://img.shields.io/github/last-commit/louislam/uptime-kuma" /></a> <a target="_blank" href="https://opencollective.com/uptime-kuma"><img src="https://opencollective.com/uptime-kuma/total/badge.svg?label=Open%20Collective%20Backers&color=brightgreen" /></a>
[![GitHub Sponsors](https://img.shields.io/github/sponsors/louislam?label=GitHub%20Sponsors)](https://github.com/sponsors/louislam) <a href="https://weblate.kuma.pet/projects/uptime-kuma/uptime-kuma/">
<img src="https://weblate.kuma.pet/widgets/uptime-kuma/-/svg-badge.svg" alt="Translation status" />
</a>
<img src="https://user-images.githubusercontent.com/1336778/212262296-e6205815-ad62-488c-83ec-a5b0d0689f7c.jpg" width="700" alt="" />
## 🥔 Live Demo
Try it!
https://demo.uptime.kuma.pet
Demo Server (Location: Frankfurt - Germany): https://demo.kuma.pet/start-demo
It is a temporary live demo, all data will be deleted after 10 minutes. The server is located in Tokyo, so if you live far from there, it may affect your experience. I suggest that you should install and try it out for the best demo experience.
VPS is sponsored by Uptime Kuma sponsors on [Open Collective](https://opencollective.com/uptime-kuma)! Thank you so much!
It is a temporary live demo, all data will be deleted after 10 minutes. Sponsored by [Uptime Kuma Sponsors](https://github.com/louislam/uptime-kuma#%EF%B8%8F-sponsors).
## ⭐ Features
* Monitoring uptime for HTTP(s) / TCP / HTTP(s) Keyword / Ping / DNS Record / Push / Steam Game Server / Docker Containers.
* Fancy, Reactive, Fast UI/UX.
* Notifications via Telegram, Discord, Gotify, Slack, Pushover, Email (SMTP), and [90+ notification services, click here for the full list](https://github.com/louislam/uptime-kuma/tree/master/src/components/notifications).
* 20 second intervals.
* [Multi Languages](https://github.com/louislam/uptime-kuma/tree/master/src/languages)
* Multiple Status Pages
* Map Status Page to Domain
* Ping Chart
* Certificate Info
* Proxy Support
* 2FA available
- Monitoring uptime for HTTP(s) / TCP / HTTP(s) Keyword / HTTP(s) Json Query / Ping / DNS Record / Push / Steam Game Server / Docker Containers
- Fancy, Reactive, Fast UI/UX
- Notifications via Telegram, Discord, Gotify, Slack, Pushover, Email (SMTP), and [90+ notification services, click here for the full list](https://github.com/louislam/uptime-kuma/tree/master/src/components/notifications)
- 20-second intervals
- [Multi Languages](https://github.com/louislam/uptime-kuma/tree/master/src/lang)
- Multiple status pages
- Map status pages to specific domains
- Ping chart
- Certificate info
- Proxy support
- 2FA support
## 🔧 How to Install
@ -43,21 +43,32 @@ VPS is sponsored by Uptime Kuma sponsors on [Open Collective](https://opencollec
docker run -d --restart=always -p 3001:3001 -v uptime-kuma:/app/data --name uptime-kuma louislam/uptime-kuma:1
```
⚠️ Please use a **local volume** only. Other types such as NFS are not supported.
Uptime Kuma is now running on <http://0.0.0.0:3001>.
Browse to http://localhost:3001 after starting.
> [!WARNING]
> File Systems like **NFS** (Network File System) are **NOT** supported. Please map to a local directory or volume.
> [!NOTE]
> If you want to limit exposure to localhost (without exposing port for other users or to use a [reverse proxy](https://github.com/louislam/uptime-kuma/wiki/Reverse-Proxy)), you can expose the port like this:
>
> ```bash
> docker run -d --restart=always -p 127.0.0.1:3001:3001 -v uptime-kuma:/app/data --name uptime-kuma louislam/uptime-kuma:1
> ```
### 💪🏻 Non-Docker
Required Tools:
- [Node.js](https://nodejs.org/en/download/) >= 14
- [Git](https://git-scm.com/downloads)
- [pm2](https://pm2.keymetrics.io/) - For run in background
Requirements:
- Platform
- ✅ Major Linux distros such as Debian, Ubuntu, CentOS, Fedora and ArchLinux etc.
- ✅ Windows 10 (x64), Windows Server 2012 R2 (x64) or higher
- ❌ Replit / Heroku
- [Node.js](https://nodejs.org/en/download/) 18 / 20.4
- [npm](https://docs.npmjs.com/cli/) 9
- [Git](https://git-scm.com/downloads)
- [pm2](https://pm2.keymetrics.io/) - For running Uptime Kuma in the background
```bash
# Update your npm to the latest version
npm install npm -g
git clone https://github.com/louislam/uptime-kuma.git
cd uptime-kuma
npm run setup
@ -65,16 +76,15 @@ npm run setup
# Option 1. Try it
node server/server.js
# (Recommended) Option 2. Run in background using PM2
# Install PM2 if you don't have it:
# (Recommended) Option 2. Run in the background using PM2
# Install PM2 if you don't have it:
npm install pm2 -g && pm2 install pm2-logrotate
# Start Server
pm2 start server/server.js --name uptime-kuma
```
Browse to http://localhost:3001 after starting.
Uptime Kuma is now running on http://localhost:3001
More useful PM2 Commands
@ -100,14 +110,10 @@ https://github.com/louislam/uptime-kuma/wiki/%F0%9F%86%99-How-to-Update
## 🆕 What's Next?
I will mark requests/issues to the next milestone.
I will assign requests/issues to the next milestone.
https://github.com/louislam/uptime-kuma/milestones
Project Plan:
https://github.com/louislam/uptime-kuma/projects/1
## ❤️ Sponsors
Thank you so much! (GitHub Sponsors will be updated manually. OpenCollective sponsors will be updated automatically, the list will be cached by GitHub though. It may need some time to be updated)
@ -134,40 +140,56 @@ Telegram Notification Sample:
## Motivation
* I was looking for a self-hosted monitoring tool like "Uptime Robot", but it is hard to find a suitable one. One of the close ones is statping. Unfortunately, it is not stable and no longer maintained.
* Want to build a fancy UI.
* Learn Vue 3 and vite.js.
* Show the power of Bootstrap 5.
* Try to use WebSocket with SPA instead of REST API.
* Deploy my first Docker image to Docker Hub.
- I was looking for a self-hosted monitoring tool like "Uptime Robot", but it is hard to find a suitable one. One of the closest ones is statping. Unfortunately, it is not stable and no longer maintained.
- Wanted to build a fancy UI.
- Learn Vue 3 and vite.js.
- Show the power of Bootstrap 5.
- Try to use WebSocket with SPA instead of a REST API.
- Deploy my first Docker image to Docker Hub.
If you love this project, please consider giving me a ⭐.
If you love this project, please consider giving it a ⭐.
## 🗣️ Discussion
## 🗣️ Discussion / Ask for Help
### Issues Page
⚠️ For any general or technical questions, please don't send me an email, as I am unable to provide support in that manner. I will not respond if you ask questions there.
You can discuss or ask for help in [issues](https://github.com/louislam/uptime-kuma/issues).
I recommend using Google, GitHub Issues, or Uptime Kuma's subreddit for finding answers to your question. If you cannot find the information you need, feel free to ask:
### Subreddit
- [GitHub Issues](https://github.com/louislam/uptime-kuma/issues)
- [Subreddit (r/UptimeKuma)](https://www.reddit.com/r/UptimeKuma/)
My Reddit account: [u/louislamlam](https://reddit.com/u/louislamlam).
You can mention me if you ask a question on Reddit.
[r/Uptime kuma](https://www.reddit.com/r/UptimeKuma/)
My Reddit account: [u/louislamlam](https://reddit.com/u/louislamlam)
You can mention me if you ask a question on the subreddit.
## Contribute
## Contributions
### Beta Version
### Create Pull Requests
We DO NOT accept all types of pull requests and do not want to waste your time. Please be sure that you have read and follow pull request rules:
[CONTRIBUTING.md#can-i-create-a-pull-request-for-uptime-kuma](https://github.com/louislam/uptime-kuma/blob/master/CONTRIBUTING.md#can-i-create-a-pull-request-for-uptime-kuma)
### Test Pull Requests
There are a lot of pull requests right now, but I don't have time to test them all.
If you want to help, you can check this:
https://github.com/louislam/uptime-kuma/wiki/Test-Pull-Requests
### Test Beta Version
Check out the latest beta release here: https://github.com/louislam/uptime-kuma/releases
### Bug Reports / Feature Requests
If you want to report a bug or request a new feature, feel free to open a [new issue](https://github.com/louislam/uptime-kuma/issues).
### Translations
If you want to translate Uptime Kuma into your language, please read: https://github.com/louislam/uptime-kuma/tree/master/src/languages
Feel free to correct my grammar in this README, source code, or wiki, as my mother language is not English and my grammar is not that great.
If you want to translate Uptime Kuma into your language, please visit [Weblate Readme](https://github.com/louislam/uptime-kuma/blob/master/src/lang/README.md).
### Spelling & Grammar
Feel free to correct the grammar in the documentation or code.
My mother language is not English and my grammar is not that great.
### Pull Requests
If you want to modify Uptime Kuma, this guideline may be useful for you: https://github.com/louislam/uptime-kuma/blob/master/CONTRIBUTING.md

View file

@ -2,24 +2,29 @@
## Reporting a Vulnerability
Please report security issues to uptime@kuma.pet.
1. Please report security issues to https://github.com/louislam/uptime-kuma/security/advisories/new.
2. Please also create an empty security issue to alert me, as GitHub Advisories do not send a notification, I probably will miss it without this. https://github.com/louislam/uptime-kuma/issues/new?assignees=&labels=help&template=security.md
Do not use the issue tracker or discuss it in the public as it will cause more damage.
Do not use the public issue tracker or discuss it in public as it will cause more damage.
## Do you accept other 3rd-party bug bounty platforms?
At this moment, I DO NOT accept other bug bounty platforms, because I am not familiar with these platforms and someone has tried to send a phishing link to me by doing this already. To minimize my own risk, please report through GitHub Advisories only. I will ignore all 3rd-party bug bounty platforms emails.
## Supported Versions
### Uptime Kuma Versions
You should use or upgrade to the latest version of Uptime Kuma. All `1.X.X` versions are upgradable to the lastest version.
You should use or upgrade to the latest version of Uptime Kuma. All `1.X.X` versions are upgradable to the latest version.
### Upgradable Docker Tags
| Tag | Supported |
| ------- | ------------------ |
| Tag | Supported |
|-|-|
| 1 | :white_check_mark: |
| 1-debian | :white_check_mark: |
| latest | :white_check_mark: |
| debian | :white_check_mark: |
| 1-alpine | ⚠️ Deprecated |
| alpine | ⚠️ Deprecated |
| All other tags | ❌ |
| All other tags | ❌ |

View file

@ -1,11 +0,0 @@
const config = {};
if (process.env.TEST_FRONTEND) {
config.presets = [ "@babel/preset-env" ];
}
if (process.env.TEST_BACKEND) {
config.plugins = [ "babel-plugin-rewire" ];
}
module.exports = config;

9
compose.yaml Normal file
View file

@ -0,0 +1,9 @@
services:
uptime-kuma:
image: louislam/uptime-kuma:1
volumes:
- ./data:/app/data
ports:
# <Host Port>:<Container Port>
- 3001:3001
restart: unless-stopped

28
config/cypress.config.js Normal file
View file

@ -0,0 +1,28 @@
const { defineConfig } = require("cypress");
module.exports = defineConfig({
projectId: "vyjuem",
e2e: {
experimentalStudio: true,
setupNodeEvents(on, config) {
},
fixturesFolder: "test/cypress/fixtures",
screenshotsFolder: "test/cypress/screenshots",
videosFolder: "test/cypress/videos",
downloadsFolder: "test/cypress/downloads",
supportFile: "test/cypress/support/e2e.js",
baseUrl: "http://localhost:3002",
defaultCommandTimeout: 10000,
pageLoadTimeout: 60000,
viewportWidth: 1920,
viewportHeight: 1080,
specPattern: [
"test/cypress/e2e/setup.cy.js",
"test/cypress/e2e/**/*.js"
],
},
env: {
baseUrl: "http://localhost:3002",
},
});

View file

@ -0,0 +1,10 @@
const { defineConfig } = require("cypress");
module.exports = defineConfig({
e2e: {
supportFile: false,
specPattern: [
"test/cypress/unit/**/*.js"
],
}
});

View file

@ -1,33 +0,0 @@
const PuppeteerEnvironment = require("jest-environment-puppeteer");
const util = require("util");
class DebugEnv extends PuppeteerEnvironment {
async handleTestEvent(event, state) {
const ignoredEvents = [
"setup",
"add_hook",
"start_describe_definition",
"add_test",
"finish_describe_definition",
"run_start",
"run_describe_start",
"test_start",
"hook_start",
"hook_success",
"test_fn_start",
"test_fn_success",
"test_done",
"run_describe_finish",
"run_finish",
"teardown",
"test_fn_failure",
];
if (!ignoredEvents.includes(event.name)) {
console.log(
new Date().toString() + ` Unhandled event [${event.name}] ` + util.inspect(event)
);
}
}
}
module.exports = DebugEnv;

View file

@ -1,5 +0,0 @@
module.exports = {
"rootDir": "..",
"testRegex": "./test/frontend.spec.js",
};

View file

@ -1,20 +0,0 @@
module.exports = {
"launch": {
"dumpio": true,
"slowMo": 500,
"headless": process.env.HEADLESS_TEST || false,
"userDataDir": "./data/test-chrome-profile",
args: [
"--disable-setuid-sandbox",
"--disable-gpu",
"--disable-dev-shm-usage",
"--no-default-browser-check",
"--no-experiments",
"--no-first-run",
"--no-pings",
"--no-sandbox",
"--no-zygote",
"--single-process",
],
}
};

View file

@ -1,12 +0,0 @@
module.exports = {
"verbose": true,
"preset": "jest-puppeteer",
"globals": {
"__DEV__": true
},
"testRegex": "./test/e2e.spec.js",
"testEnvironment": "./config/jest-debug-env.js",
"rootDir": "..",
"testTimeout": 30000,
};

View file

@ -0,0 +1,60 @@
import { defineConfig, devices } from "@playwright/test";
const port = 30001;
const url = `http://localhost:${port}`;
export default defineConfig({
// Look for test files in the "tests" directory, relative to this configuration file.
testDir: "../test/e2e",
outputDir: "../private/playwright-test-results",
fullyParallel: false,
locale: "en-US",
// Fail the build on CI if you accidentally left test.only in the source code.
forbidOnly: !!process.env.CI,
// Retry on CI only.
retries: process.env.CI ? 2 : 0,
// Opt out of parallel tests on CI.
workers: 1,
// Reporter to use
reporter: [
[
"html", {
outputFolder: "../private/playwright-report",
open: "never",
}
],
],
use: {
// Base URL to use in actions like `await page.goto('/')`.
baseURL: url,
// Collect trace when retrying the failed test.
trace: "on-first-retry",
},
// Configure projects for major browsers.
projects: [
{
name: "chromium",
use: { ...devices["Desktop Chrome"] },
},
/*
{
name: "firefox",
use: { browserName: "firefox" }
},*/
],
// Run your local dev server before starting the tests.
webServer: {
command: `node extra/remove-playwright-test-data.js && node server/server.js --port=${port} --data-dir=./data/playwright-test`,
url,
reuseExistingServer: false,
cwd: "../",
},
});

View file

@ -1,8 +1,8 @@
import legacy from "@vitejs/plugin-legacy";
import vue from "@vitejs/plugin-vue";
import { defineConfig } from "vite";
import visualizer from "rollup-plugin-visualizer";
import viteCompression from "vite-plugin-compression";
import VueDevTools from "vite-plugin-vue-devtools";
const postCssScss = require("postcss-scss");
const postcssRTLCSS = require("postcss-rtlcss");
@ -11,14 +11,17 @@ const viteCompressionFilter = /\.(js|mjs|json|css|html|svg)$/i;
// https://vitejs.dev/config/
export default defineConfig({
server: {
port: 3000,
},
define: {
"FRONTEND_VERSION": JSON.stringify(process.env.npm_package_version),
"DEVCONTAINER": JSON.stringify(process.env.DEVCONTAINER),
"GITHUB_CODESPACES_PORT_FORWARDING_DOMAIN": JSON.stringify(process.env.GITHUB_CODESPACES_PORT_FORWARDING_DOMAIN),
"CODESPACE_NAME": JSON.stringify(process.env.CODESPACE_NAME),
},
plugins: [
vue(),
legacy({
targets: [ "since 2015" ],
}),
visualizer({
filename: "tmp/dist-stats.html"
}),
@ -30,6 +33,7 @@ export default defineConfig({
algorithm: "brotliCompress",
filter: viteCompressionFilter,
}),
VueDevTools(),
],
css: {
postcss: {
@ -39,6 +43,9 @@ export default defineConfig({
}
},
build: {
commonjsOptions: {
include: [ /.js$/ ],
},
rollupOptions: {
output: {
manualChunks(id, { getModuleInfo, getModuleIds }) {

View file

565
db/knex_init_db.js Normal file
View file

@ -0,0 +1,565 @@
const { R } = require("redbean-node");
const { log } = require("../src/util");
/**
* DO NOT ADD ANYTHING HERE!
* IF YOU NEED TO ADD FIELDS, ADD IT TO ./db/knex_migrations
* See ./db/knex_migrations/README.md for more information
* @returns {Promise<void>}
*/
async function createTables() {
log.info("mariadb", "Creating basic tables for MariaDB");
const knex = R.knex;
// TODO: Should check later if it is really the final patch sql file.
// docker_host
await knex.schema.createTable("docker_host", (table) => {
table.increments("id");
table.integer("user_id").unsigned().notNullable();
table.string("docker_daemon", 255);
table.string("docker_type", 255);
table.string("name", 255);
});
// group
await knex.schema.createTable("group", (table) => {
table.increments("id");
table.string("name", 255).notNullable();
table.datetime("created_date").notNullable().defaultTo(knex.fn.now());
table.boolean("public").notNullable().defaultTo(false);
table.boolean("active").notNullable().defaultTo(true);
table.integer("weight").notNullable().defaultTo(1000);
table.integer("status_page_id").unsigned();
});
// proxy
await knex.schema.createTable("proxy", (table) => {
table.increments("id");
table.integer("user_id").unsigned().notNullable();
table.string("protocol", 10).notNullable();
table.string("host", 255).notNullable();
table.smallint("port").notNullable(); // TODO: Maybe a issue with MariaDB, need migration to int
table.boolean("auth").notNullable();
table.string("username", 255).nullable();
table.string("password", 255).nullable();
table.boolean("active").notNullable().defaultTo(true);
table.boolean("default").notNullable().defaultTo(false);
table.datetime("created_date").notNullable().defaultTo(knex.fn.now());
table.index("user_id", "proxy_user_id");
});
// user
await knex.schema.createTable("user", (table) => {
table.increments("id");
table.string("username", 255).notNullable().unique().collate("utf8_general_ci");
table.string("password", 255);
table.boolean("active").notNullable().defaultTo(true);
table.string("timezone", 150);
table.string("twofa_secret", 64);
table.boolean("twofa_status").notNullable().defaultTo(false);
table.string("twofa_last_token", 6);
});
// monitor
await knex.schema.createTable("monitor", (table) => {
table.increments("id");
table.string("name", 150);
table.boolean("active").notNullable().defaultTo(true);
table.integer("user_id").unsigned()
.references("id").inTable("user")
.onDelete("SET NULL")
.onUpdate("CASCADE");
table.integer("interval").notNullable().defaultTo(20);
table.text("url");
table.string("type", 20);
table.integer("weight").defaultTo(2000);
table.string("hostname", 255);
table.integer("port");
table.datetime("created_date").notNullable().defaultTo(knex.fn.now());
table.string("keyword", 255);
table.integer("maxretries").notNullable().defaultTo(0);
table.boolean("ignore_tls").notNullable().defaultTo(false);
table.boolean("upside_down").notNullable().defaultTo(false);
table.integer("maxredirects").notNullable().defaultTo(10);
table.text("accepted_statuscodes_json").notNullable().defaultTo("[\"200-299\"]");
table.string("dns_resolve_type", 5);
table.string("dns_resolve_server", 255);
table.string("dns_last_result", 255);
table.integer("retry_interval").notNullable().defaultTo(0);
table.string("push_token", 20).defaultTo(null);
table.text("method").notNullable().defaultTo("GET");
table.text("body").defaultTo(null);
table.text("headers").defaultTo(null);
table.text("basic_auth_user").defaultTo(null);
table.text("basic_auth_pass").defaultTo(null);
table.integer("docker_host").unsigned()
.references("id").inTable("docker_host");
table.string("docker_container", 255);
table.integer("proxy_id").unsigned()
.references("id").inTable("proxy");
table.boolean("expiry_notification").defaultTo(true);
table.text("mqtt_topic");
table.string("mqtt_success_message", 255);
table.string("mqtt_username", 255);
table.string("mqtt_password", 255);
table.string("database_connection_string", 2000);
table.text("database_query");
table.string("auth_method", 250);
table.text("auth_domain");
table.text("auth_workstation");
table.string("grpc_url", 255).defaultTo(null);
table.text("grpc_protobuf").defaultTo(null);
table.text("grpc_body").defaultTo(null);
table.text("grpc_metadata").defaultTo(null);
table.text("grpc_method").defaultTo(null);
table.text("grpc_service_name").defaultTo(null);
table.boolean("grpc_enable_tls").notNullable().defaultTo(false);
table.string("radius_username", 255);
table.string("radius_password", 255);
table.string("radius_calling_station_id", 50);
table.string("radius_called_station_id", 50);
table.string("radius_secret", 255);
table.integer("resend_interval").notNullable().defaultTo(0);
table.integer("packet_size").notNullable().defaultTo(56);
table.string("game", 255);
});
// heartbeat
await knex.schema.createTable("heartbeat", (table) => {
table.increments("id");
table.boolean("important").notNullable().defaultTo(false);
table.integer("monitor_id").unsigned().notNullable()
.references("id").inTable("monitor")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.smallint("status").notNullable();
table.text("msg");
table.datetime("time").notNullable();
table.integer("ping");
table.integer("duration").notNullable().defaultTo(0);
table.integer("down_count").notNullable().defaultTo(0);
table.index("important");
table.index([ "monitor_id", "time" ], "monitor_time_index");
table.index("monitor_id");
table.index([ "monitor_id", "important", "time" ], "monitor_important_time_index");
});
// incident
await knex.schema.createTable("incident", (table) => {
table.increments("id");
table.string("title", 255).notNullable();
table.text("content", 255).notNullable();
table.string("style", 30).notNullable().defaultTo("warning");
table.datetime("created_date").notNullable().defaultTo(knex.fn.now());
table.datetime("last_updated_date");
table.boolean("pin").notNullable().defaultTo(true);
table.boolean("active").notNullable().defaultTo(true);
table.integer("status_page_id").unsigned();
});
// maintenance
await knex.schema.createTable("maintenance", (table) => {
table.increments("id");
table.string("title", 150).notNullable();
table.text("description").notNullable();
table.integer("user_id").unsigned()
.references("id").inTable("user")
.onDelete("SET NULL")
.onUpdate("CASCADE");
table.boolean("active").notNullable().defaultTo(true);
table.string("strategy", 50).notNullable().defaultTo("single");
table.datetime("start_date");
table.datetime("end_date");
table.time("start_time");
table.time("end_time");
table.string("weekdays", 250).defaultTo("[]");
table.text("days_of_month").defaultTo("[]");
table.integer("interval_day");
table.index("active");
table.index([ "strategy", "active" ], "manual_active");
table.index("user_id", "maintenance_user_id");
});
// status_page
await knex.schema.createTable("status_page", (table) => {
table.increments("id");
table.string("slug", 255).notNullable().unique().collate("utf8_general_ci");
table.string("title", 255).notNullable();
table.text("description");
table.string("icon", 255).notNullable();
table.string("theme", 30).notNullable();
table.boolean("published").notNullable().defaultTo(true);
table.boolean("search_engine_index").notNullable().defaultTo(true);
table.boolean("show_tags").notNullable().defaultTo(false);
table.string("password");
table.datetime("created_date").notNullable().defaultTo(knex.fn.now());
table.datetime("modified_date").notNullable().defaultTo(knex.fn.now());
table.text("footer_text");
table.text("custom_css");
table.boolean("show_powered_by").notNullable().defaultTo(true);
table.string("google_analytics_tag_id");
});
// maintenance_status_page
await knex.schema.createTable("maintenance_status_page", (table) => {
table.increments("id");
table.integer("status_page_id").unsigned().notNullable()
.references("id").inTable("status_page")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.integer("maintenance_id").unsigned().notNullable()
.references("id").inTable("maintenance")
.onDelete("CASCADE")
.onUpdate("CASCADE");
});
// maintenance_timeslot
await knex.schema.createTable("maintenance_timeslot", (table) => {
table.increments("id");
table.integer("maintenance_id").unsigned().notNullable()
.references("id").inTable("maintenance")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.datetime("start_date").notNullable();
table.datetime("end_date");
table.boolean("generated_next").defaultTo(false);
table.index("maintenance_id");
table.index([ "maintenance_id", "start_date", "end_date" ], "active_timeslot_index");
table.index("generated_next", "generated_next_index");
});
// monitor_group
await knex.schema.createTable("monitor_group", (table) => {
table.increments("id");
table.integer("monitor_id").unsigned().notNullable()
.references("id").inTable("monitor")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.integer("group_id").unsigned().notNullable()
.references("id").inTable("group")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.integer("weight").notNullable().defaultTo(1000);
table.boolean("send_url").notNullable().defaultTo(false);
table.index([ "monitor_id", "group_id" ], "fk");
});
// monitor_maintenance
await knex.schema.createTable("monitor_maintenance", (table) => {
table.increments("id");
table.integer("monitor_id").unsigned().notNullable()
.references("id").inTable("monitor")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.integer("maintenance_id").unsigned().notNullable()
.references("id").inTable("maintenance")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.index("maintenance_id", "maintenance_id_index2");
table.index("monitor_id", "monitor_id_index");
});
// notification
await knex.schema.createTable("notification", (table) => {
table.increments("id");
table.string("name", 255);
table.boolean("active").notNullable().defaultTo(true);
table.integer("user_id").unsigned();
table.boolean("is_default").notNullable().defaultTo(false);
table.text("config", "longtext");
});
// monitor_notification
await knex.schema.createTable("monitor_notification", (table) => {
table.increments("id").unsigned(); // TODO: no auto increment????
table.integer("monitor_id").unsigned().notNullable()
.references("id").inTable("monitor")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.integer("notification_id").unsigned().notNullable()
.references("id").inTable("notification")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.index([ "monitor_id", "notification_id" ], "monitor_notification_index");
});
// tag
await knex.schema.createTable("tag", (table) => {
table.increments("id");
table.string("name", 255).notNullable();
table.string("color", 255).notNullable();
table.datetime("created_date").notNullable().defaultTo(knex.fn.now());
});
// monitor_tag
await knex.schema.createTable("monitor_tag", (table) => {
table.increments("id");
table.integer("monitor_id").unsigned().notNullable()
.references("id").inTable("monitor")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.integer("tag_id").unsigned().notNullable()
.references("id").inTable("tag")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.text("value");
});
// monitor_tls_info
await knex.schema.createTable("monitor_tls_info", (table) => {
table.increments("id");
table.integer("monitor_id").unsigned().notNullable()
.references("id").inTable("monitor")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.text("info_json");
});
// notification_sent_history
await knex.schema.createTable("notification_sent_history", (table) => {
table.increments("id");
table.string("type", 50).notNullable();
table.integer("monitor_id").unsigned().notNullable();
table.integer("days").notNullable();
table.unique([ "type", "monitor_id", "days" ]);
table.index([ "type", "monitor_id", "days" ], "good_index");
});
// setting
await knex.schema.createTable("setting", (table) => {
table.increments("id");
table.string("key", 200).notNullable().unique().collate("utf8_general_ci");
table.text("value");
table.string("type", 20);
});
// status_page_cname
await knex.schema.createTable("status_page_cname", (table) => {
table.increments("id");
table.integer("status_page_id").unsigned()
.references("id").inTable("status_page")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.string("domain").notNullable().unique().collate("utf8_general_ci");
});
/*********************
* Converted Patch here
*********************/
// 2023-06-30-1348-http-body-encoding.js
// ALTER TABLE monitor ADD http_body_encoding VARCHAR(25);
// UPDATE monitor SET http_body_encoding = 'json' WHERE (type = 'http' or type = 'keyword') AND http_body_encoding IS NULL;
await knex.schema.table("monitor", function (table) {
table.string("http_body_encoding", 25);
});
await knex("monitor")
.where(function () {
this.where("type", "http").orWhere("type", "keyword");
})
.whereNull("http_body_encoding")
.update({
http_body_encoding: "json",
});
// 2023-06-30-1354-add-description-monitor.js
// ALTER TABLE monitor ADD description TEXT default null;
await knex.schema.table("monitor", function (table) {
table.text("description").defaultTo(null);
});
// 2023-06-30-1357-api-key-table.js
/*
CREATE TABLE [api_key] (
[id] INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
[key] VARCHAR(255) NOT NULL,
[name] VARCHAR(255) NOT NULL,
[user_id] INTEGER NOT NULL,
[created_date] DATETIME DEFAULT (DATETIME('now')) NOT NULL,
[active] BOOLEAN DEFAULT 1 NOT NULL,
[expires] DATETIME DEFAULT NULL,
CONSTRAINT FK_user FOREIGN KEY ([user_id]) REFERENCES [user]([id]) ON DELETE CASCADE ON UPDATE CASCADE
);
*/
await knex.schema.createTable("api_key", function (table) {
table.increments("id").primary();
table.string("key", 255).notNullable();
table.string("name", 255).notNullable();
table.integer("user_id").unsigned().notNullable()
.references("id").inTable("user")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.dateTime("created_date").defaultTo(knex.fn.now()).notNullable();
table.boolean("active").defaultTo(1).notNullable();
table.dateTime("expires").defaultTo(null);
});
// 2023-06-30-1400-monitor-tls.js
/*
ALTER TABLE monitor
ADD tls_ca TEXT default null;
ALTER TABLE monitor
ADD tls_cert TEXT default null;
ALTER TABLE monitor
ADD tls_key TEXT default null;
*/
await knex.schema.table("monitor", function (table) {
table.text("tls_ca").defaultTo(null);
table.text("tls_cert").defaultTo(null);
table.text("tls_key").defaultTo(null);
});
// 2023-06-30-1401-maintenance-cron.js
/*
-- 999 characters. https://stackoverflow.com/questions/46134830/maximum-length-for-cron-job
DROP TABLE maintenance_timeslot;
ALTER TABLE maintenance ADD cron TEXT;
ALTER TABLE maintenance ADD timezone VARCHAR(255);
ALTER TABLE maintenance ADD duration INTEGER;
*/
await knex.schema
.dropTableIfExists("maintenance_timeslot")
.table("maintenance", function (table) {
table.text("cron");
table.string("timezone", 255);
table.integer("duration");
});
// 2023-06-30-1413-add-parent-monitor.js.
/*
ALTER TABLE monitor
ADD parent INTEGER REFERENCES [monitor] ([id]) ON DELETE SET NULL ON UPDATE CASCADE;
*/
await knex.schema.table("monitor", function (table) {
table.integer("parent").unsigned()
.references("id").inTable("monitor")
.onDelete("SET NULL")
.onUpdate("CASCADE");
});
/*
patch-add-invert-keyword.sql
ALTER TABLE monitor
ADD invert_keyword BOOLEAN default 0 not null;
*/
await knex.schema.table("monitor", function (table) {
table.boolean("invert_keyword").defaultTo(0).notNullable();
});
/*
patch-added-json-query.sql
ALTER TABLE monitor
ADD json_path TEXT;
ALTER TABLE monitor
ADD expected_value VARCHAR(255);
*/
await knex.schema.table("monitor", function (table) {
table.text("json_path");
table.string("expected_value", 255);
});
/*
patch-added-kafka-producer.sql
ALTER TABLE monitor
ADD kafka_producer_topic VARCHAR(255);
ALTER TABLE monitor
ADD kafka_producer_brokers TEXT;
ALTER TABLE monitor
ADD kafka_producer_ssl INTEGER;
ALTER TABLE monitor
ADD kafka_producer_allow_auto_topic_creation VARCHAR(255);
ALTER TABLE monitor
ADD kafka_producer_sasl_options TEXT;
ALTER TABLE monitor
ADD kafka_producer_message TEXT;
*/
await knex.schema.table("monitor", function (table) {
table.string("kafka_producer_topic", 255);
table.text("kafka_producer_brokers");
// patch-fix-kafka-producer-booleans.sql
table.boolean("kafka_producer_ssl").defaultTo(0).notNullable();
table.boolean("kafka_producer_allow_auto_topic_creation").defaultTo(0).notNullable();
table.text("kafka_producer_sasl_options");
table.text("kafka_producer_message");
});
/*
patch-add-certificate-expiry-status-page.sql
ALTER TABLE status_page
ADD show_certificate_expiry BOOLEAN default 0 NOT NULL;
*/
await knex.schema.table("status_page", function (table) {
table.boolean("show_certificate_expiry").defaultTo(0).notNullable();
});
/*
patch-monitor-oauth-cc.sql
ALTER TABLE monitor
ADD oauth_client_id TEXT default null;
ALTER TABLE monitor
ADD oauth_client_secret TEXT default null;
ALTER TABLE monitor
ADD oauth_token_url TEXT default null;
ALTER TABLE monitor
ADD oauth_scopes TEXT default null;
ALTER TABLE monitor
ADD oauth_auth_method TEXT default null;
*/
await knex.schema.table("monitor", function (table) {
table.text("oauth_client_id").defaultTo(null);
table.text("oauth_client_secret").defaultTo(null);
table.text("oauth_token_url").defaultTo(null);
table.text("oauth_scopes").defaultTo(null);
table.text("oauth_auth_method").defaultTo(null);
});
/*
patch-add-timeout-monitor.sql
ALTER TABLE monitor
ADD timeout DOUBLE default 0 not null;
*/
await knex.schema.table("monitor", function (table) {
table.double("timeout").defaultTo(0).notNullable();
});
/*
patch-add-gamedig-given-port.sql
ALTER TABLE monitor
ADD gamedig_given_port_only BOOLEAN default 1 not null;
*/
await knex.schema.table("monitor", function (table) {
table.boolean("gamedig_given_port_only").defaultTo(1).notNullable();
});
log.info("mariadb", "Created basic tables for MariaDB");
}
module.exports = {
createTables,
};

View file

@ -0,0 +1,41 @@
exports.up = function (knex) {
return knex.schema
.createTable("stat_minutely", function (table) {
table.increments("id");
table.comment("This table contains the minutely aggregate statistics for each monitor");
table.integer("monitor_id").unsigned().notNullable()
.references("id").inTable("monitor")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.integer("timestamp")
.notNullable()
.comment("Unix timestamp rounded down to the nearest minute");
table.float("ping").notNullable().comment("Average ping in milliseconds");
table.smallint("up").notNullable();
table.smallint("down").notNullable();
table.unique([ "monitor_id", "timestamp" ]);
})
.createTable("stat_daily", function (table) {
table.increments("id");
table.comment("This table contains the daily aggregate statistics for each monitor");
table.integer("monitor_id").unsigned().notNullable()
.references("id").inTable("monitor")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.integer("timestamp")
.notNullable()
.comment("Unix timestamp rounded down to the nearest day");
table.float("ping").notNullable().comment("Average ping in milliseconds");
table.smallint("up").notNullable();
table.smallint("down").notNullable();
table.unique([ "monitor_id", "timestamp" ]);
});
};
exports.down = function (knex) {
return knex.schema
.dropTable("stat_minutely")
.dropTable("stat_daily");
};

View file

@ -0,0 +1,16 @@
exports.up = function (knex) {
// Add new column heartbeat.end_time
return knex.schema
.alterTable("heartbeat", function (table) {
table.datetime("end_time").nullable().defaultTo(null);
});
};
exports.down = function (knex) {
// Rename heartbeat.start_time to heartbeat.time
return knex.schema
.alterTable("heartbeat", function (table) {
table.dropColumn("end_time");
});
};

View file

@ -0,0 +1,15 @@
exports.up = function (knex) {
// Add new column heartbeat.retries
return knex.schema
.alterTable("heartbeat", function (table) {
table.integer("retries").notNullable().defaultTo(0);
});
};
exports.down = function (knex) {
return knex.schema
.alterTable("heartbeat", function (table) {
table.dropColumn("retries");
});
};

View file

@ -0,0 +1,16 @@
exports.up = function (knex) {
// Add new column monitor.mqtt_check_type
return knex.schema
.alterTable("monitor", function (table) {
table.string("mqtt_check_type", 255).notNullable().defaultTo("keyword");
});
};
exports.down = function (knex) {
// Drop column monitor.mqtt_check_type
return knex.schema
.alterTable("monitor", function (table) {
table.dropColumn("mqtt_check_type");
});
};

View file

@ -0,0 +1,14 @@
exports.up = function (knex) {
// update monitor.push_token to 32 length
return knex.schema
.alterTable("monitor", function (table) {
table.string("push_token", 32).alter();
});
};
exports.down = function (knex) {
return knex.schema
.alterTable("monitor", function (table) {
table.string("push_token", 20).alter();
});
};

View file

@ -0,0 +1,21 @@
exports.up = function (knex) {
return knex.schema
.createTable("remote_browser", function (table) {
table.increments("id");
table.string("name", 255).notNullable();
table.string("url", 255).notNullable();
table.integer("user_id").unsigned();
}).alterTable("monitor", function (table) {
// Add new column monitor.remote_browser
table.integer("remote_browser").nullable().defaultTo(null).unsigned()
.index()
.references("id")
.inTable("remote_browser");
});
};
exports.down = function (knex) {
return knex.schema.dropTable("remote_browser").alterTable("monitor", function (table) {
table.dropColumn("remote_browser");
});
};

View file

@ -0,0 +1,12 @@
exports.up = function (knex) {
return knex.schema
.alterTable("status_page", function (table) {
table.integer("auto_refresh_interval").defaultTo(300).unsigned();
});
};
exports.down = function (knex) {
return knex.schema.alterTable("status_page", function (table) {
table.dropColumn("auto_refresh_interval");
});
};

View file

@ -0,0 +1,24 @@
exports.up = function (knex) {
return knex.schema
.alterTable("stat_daily", function (table) {
table.float("ping_min").notNullable().defaultTo(0).comment("Minimum ping during this period in milliseconds");
table.float("ping_max").notNullable().defaultTo(0).comment("Maximum ping during this period in milliseconds");
})
.alterTable("stat_minutely", function (table) {
table.float("ping_min").notNullable().defaultTo(0).comment("Minimum ping during this period in milliseconds");
table.float("ping_max").notNullable().defaultTo(0).comment("Maximum ping during this period in milliseconds");
});
};
exports.down = function (knex) {
return knex.schema
.alterTable("stat_daily", function (table) {
table.dropColumn("ping_min");
table.dropColumn("ping_max");
})
.alterTable("stat_minutely", function (table) {
table.dropColumn("ping_min");
table.dropColumn("ping_max");
});
};

View file

@ -0,0 +1,26 @@
exports.up = function (knex) {
return knex.schema
.createTable("stat_hourly", function (table) {
table.increments("id");
table.comment("This table contains the hourly aggregate statistics for each monitor");
table.integer("monitor_id").unsigned().notNullable()
.references("id").inTable("monitor")
.onDelete("CASCADE")
.onUpdate("CASCADE");
table.integer("timestamp")
.notNullable()
.comment("Unix timestamp rounded down to the nearest hour");
table.float("ping").notNullable().comment("Average ping in milliseconds");
table.float("ping_min").notNullable().defaultTo(0).comment("Minimum ping during this period in milliseconds");
table.float("ping_max").notNullable().defaultTo(0).comment("Maximum ping during this period in milliseconds");
table.smallint("up").notNullable();
table.smallint("down").notNullable();
table.unique([ "monitor_id", "timestamp" ]);
});
};
exports.down = function (knex) {
return knex.schema
.dropTable("stat_hourly");
};

View file

@ -0,0 +1,26 @@
exports.up = function (knex) {
return knex.schema
.alterTable("stat_daily", function (table) {
table.text("extras").defaultTo(null).comment("Extra statistics during this time period");
})
.alterTable("stat_minutely", function (table) {
table.text("extras").defaultTo(null).comment("Extra statistics during this time period");
})
.alterTable("stat_hourly", function (table) {
table.text("extras").defaultTo(null).comment("Extra statistics during this time period");
});
};
exports.down = function (knex) {
return knex.schema
.alterTable("stat_daily", function (table) {
table.dropColumn("extras");
})
.alterTable("stat_minutely", function (table) {
table.dropColumn("extras");
})
.alterTable("stat_hourly", function (table) {
table.dropColumn("extras");
});
};

View file

@ -0,0 +1,56 @@
# Info
https://knexjs.org/guide/migrations.html#knexfile-in-other-languages
## Basic rules
- All tables must have a primary key named `id`
- Filename format: `YYYY-MM-DD-HHMM-patch-name.js`
- Avoid native SQL syntax, use knex methods, because Uptime Kuma supports SQLite and MariaDB.
## Template
```js
exports.up = function(knex) {
};
exports.down = function(knex) {
};
// exports.config = { transaction: false };
```
## Example
Filename: 2023-06-30-1348-create-user-and-product.js
```js
exports.up = function(knex) {
return knex.schema
.createTable('user', function (table) {
table.increments('id');
table.string('first_name', 255).notNullable();
table.string('last_name', 255).notNullable();
})
.createTable('product', function (table) {
table.increments('id');
table.decimal('price').notNullable();
table.string('name', 1000).notNullable();
}).then(() => {
knex("products").insert([
{ price: 10, name: "Apple" },
{ price: 20, name: "Orange" },
]);
});
};
exports.down = function(knex) {
return knex.schema
.dropTable("product")
.dropTable("user");
};
```
https://knexjs.org/guide/migrations.html#transactions-in-migrations

View file

@ -0,0 +1,3 @@
# Don't create a new migration file here
Please go to ./db/knex_migrations/README.md

View file

@ -0,0 +1,7 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE status_page
ADD show_certificate_expiry BOOLEAN default 0 NOT NULL;
COMMIT;

View file

@ -1,5 +1,7 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor_group
ADD send_url BOOLEAN DEFAULT 0 NOT NULL;
COMMIT;

View file

@ -0,0 +1,7 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD description TEXT default null;
COMMIT;

View file

@ -0,0 +1,7 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD gamedig_given_port_only BOOLEAN default 1 not null;
COMMIT;

View file

@ -0,0 +1,7 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD game VARCHAR(255);
COMMIT;

View file

@ -0,0 +1,7 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE status_page
ADD google_analytics_tag_id VARCHAR;
COMMIT;

View file

@ -0,0 +1,7 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD invert_keyword BOOLEAN default 0 not null;
COMMIT;

View file

@ -0,0 +1,7 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD parent INTEGER REFERENCES [monitor] ([id]) ON DELETE SET NULL ON UPDATE CASCADE;
COMMIT;

View file

@ -1,3 +1,4 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
@ -15,4 +16,4 @@ ALTER TABLE monitor
ALTER TABLE monitor
ADD radius_secret VARCHAR(255);
COMMIT
COMMIT;

View file

@ -0,0 +1,7 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD timeout DOUBLE default 0 not null;
COMMIT;

View file

@ -0,0 +1,10 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD json_path TEXT;
ALTER TABLE monitor
ADD expected_value VARCHAR(255);
COMMIT;

View file

@ -0,0 +1,22 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD kafka_producer_topic VARCHAR(255);
ALTER TABLE monitor
ADD kafka_producer_brokers TEXT;
ALTER TABLE monitor
ADD kafka_producer_ssl INTEGER;
ALTER TABLE monitor
ADD kafka_producer_allow_auto_topic_creation VARCHAR(255);
ALTER TABLE monitor
ADD kafka_producer_sasl_options TEXT;
ALTER TABLE monitor
ADD kafka_producer_message TEXT;
COMMIT;

View file

@ -0,0 +1,15 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
CREATE TABLE [api_key] (
[id] INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
[key] VARCHAR(255) NOT NULL,
[name] VARCHAR(255) NOT NULL,
[user_id] INTEGER NOT NULL,
[created_date] DATETIME DEFAULT (DATETIME('now')) NOT NULL,
[active] BOOLEAN DEFAULT 1 NOT NULL,
[expires] DATETIME DEFAULT NULL,
CONSTRAINT FK_user FOREIGN KEY ([user_id]) REFERENCES [user]([id]) ON DELETE CASCADE ON UPDATE CASCADE
);
COMMIT;

View file

@ -0,0 +1,34 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
-- Rename COLUMNs to another one (suffixed by `_old`)
ALTER TABLE monitor
RENAME COLUMN kafka_producer_ssl TO kafka_producer_ssl_old;
ALTER TABLE monitor
RENAME COLUMN kafka_producer_allow_auto_topic_creation TO kafka_producer_allow_auto_topic_creation_old;
-- Add correct COLUMNs
ALTER TABLE monitor
ADD COLUMN kafka_producer_ssl BOOLEAN default 0 NOT NULL;
ALTER TABLE monitor
ADD COLUMN kafka_producer_allow_auto_topic_creation BOOLEAN default 0 NOT NULL;
-- These SQL is still not fully safe. See https://github.com/louislam/uptime-kuma/issues/4039.
-- Set bring old values from `_old` COLUMNs to correct ones
-- UPDATE monitor SET kafka_producer_allow_auto_topic_creation = monitor.kafka_producer_allow_auto_topic_creation_old
-- WHERE monitor.kafka_producer_allow_auto_topic_creation_old IS NOT NULL;
-- UPDATE monitor SET kafka_producer_ssl = monitor.kafka_producer_ssl_old
-- WHERE monitor.kafka_producer_ssl_old IS NOT NULL;
-- Remove old COLUMNs
ALTER TABLE monitor
DROP COLUMN kafka_producer_allow_auto_topic_creation_old;
ALTER TABLE monitor
DROP COLUMN kafka_producer_ssl_old;
COMMIT;

View file

@ -0,0 +1,25 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD grpc_url VARCHAR(255) default null;
ALTER TABLE monitor
ADD grpc_protobuf TEXT default null;
ALTER TABLE monitor
ADD grpc_body TEXT default null;
ALTER TABLE monitor
ADD grpc_metadata TEXT default null;
ALTER TABLE monitor
ADD grpc_method VARCHAR(255) default null;
ALTER TABLE monitor
ADD grpc_service_name VARCHAR(255) default null;
ALTER TABLE monitor
ADD grpc_enable_tls BOOLEAN default 0 not null;
COMMIT;

View file

@ -0,0 +1,12 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor ADD http_body_encoding VARCHAR(25);
COMMIT;
BEGIN TRANSACTION;
UPDATE monitor SET http_body_encoding = 'json' WHERE (type = 'http' or type = 'keyword') AND http_body_encoding IS NULL;
COMMIT;

View file

@ -0,0 +1,11 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
DROP TABLE maintenance_timeslot;
-- 999 characters. https://stackoverflow.com/questions/46134830/maximum-length-for-cron-job
ALTER TABLE maintenance ADD cron TEXT;
ALTER TABLE maintenance ADD timezone VARCHAR(255);
ALTER TABLE maintenance ADD duration INTEGER;
COMMIT;

View file

@ -0,0 +1,83 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
-- Just for someone who tested maintenance before (patch-maintenance-table.sql)
DROP TABLE IF EXISTS maintenance_status_page;
DROP TABLE IF EXISTS monitor_maintenance;
DROP TABLE IF EXISTS maintenance;
DROP TABLE IF EXISTS maintenance_timeslot;
-- maintenance
CREATE TABLE [maintenance] (
[id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
[title] VARCHAR(150) NOT NULL,
[description] TEXT NOT NULL,
[user_id] INTEGER REFERENCES [user]([id]) ON DELETE SET NULL ON UPDATE CASCADE,
[active] BOOLEAN NOT NULL DEFAULT 1,
[strategy] VARCHAR(50) NOT NULL DEFAULT 'single',
[start_date] DATETIME,
[end_date] DATETIME,
[start_time] TIME,
[end_time] TIME,
[weekdays] VARCHAR2(250) DEFAULT '[]',
[days_of_month] TEXT DEFAULT '[]',
[interval_day] INTEGER
);
CREATE INDEX [manual_active] ON [maintenance] (
[strategy],
[active]
);
CREATE INDEX [active] ON [maintenance] ([active]);
CREATE INDEX [maintenance_user_id] ON [maintenance] ([user_id]);
-- maintenance_status_page
CREATE TABLE maintenance_status_page (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
status_page_id INTEGER NOT NULL,
maintenance_id INTEGER NOT NULL,
CONSTRAINT FK_maintenance FOREIGN KEY (maintenance_id) REFERENCES maintenance (id) ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT FK_status_page FOREIGN KEY (status_page_id) REFERENCES status_page (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE INDEX [status_page_id_index]
ON [maintenance_status_page]([status_page_id]);
CREATE INDEX [maintenance_id_index]
ON [maintenance_status_page]([maintenance_id]);
-- maintenance_timeslot
CREATE TABLE [maintenance_timeslot] (
[id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
[maintenance_id] INTEGER NOT NULL CONSTRAINT [FK_maintenance] REFERENCES [maintenance]([id]) ON DELETE CASCADE ON UPDATE CASCADE,
[start_date] DATETIME NOT NULL,
[end_date] DATETIME,
[generated_next] BOOLEAN DEFAULT 0
);
CREATE INDEX [maintenance_id] ON [maintenance_timeslot] ([maintenance_id] DESC);
CREATE INDEX [active_timeslot_index] ON [maintenance_timeslot] (
[maintenance_id] DESC,
[start_date] DESC,
[end_date] DESC
);
CREATE INDEX [generated_next_index] ON [maintenance_timeslot] ([generated_next]);
-- monitor_maintenance
CREATE TABLE monitor_maintenance (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
monitor_id INTEGER NOT NULL,
maintenance_id INTEGER NOT NULL,
CONSTRAINT FK_maintenance FOREIGN KEY (maintenance_id) REFERENCES maintenance (id) ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT FK_monitor FOREIGN KEY (monitor_id) REFERENCES monitor (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE INDEX [maintenance_id_index2] ON [monitor_maintenance]([maintenance_id]);
CREATE INDEX [monitor_id_index] ON [monitor_maintenance]([monitor_id]);
COMMIT;

View file

@ -0,0 +1,10 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD resend_interval INTEGER default 0 not null;
ALTER TABLE heartbeat
ADD down_count INTEGER default 0 not null;
COMMIT;

View file

@ -0,0 +1,19 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD oauth_client_id TEXT default null;
ALTER TABLE monitor
ADD oauth_client_secret TEXT default null;
ALTER TABLE monitor
ADD oauth_token_url TEXT default null;
ALTER TABLE monitor
ADD oauth_scopes TEXT default null;
ALTER TABLE monitor
ADD oauth_auth_method TEXT default null;
COMMIT;

View file

@ -0,0 +1,18 @@
BEGIN TRANSACTION;
PRAGMA writable_schema = TRUE;
UPDATE
SQLITE_MASTER
SET
sql = replace(sql,
'monitor_id INTEGER NOT NULL',
'monitor_id INTEGER NOT NULL REFERENCES [monitor] ([id]) ON DELETE CASCADE ON UPDATE CASCADE'
)
WHERE
name = 'monitor_tls_info'
AND type = 'table';
PRAGMA writable_schema = RESET;
COMMIT;

View file

@ -0,0 +1,13 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD tls_ca TEXT default null;
ALTER TABLE monitor
ADD tls_cert TEXT default null;
ALTER TABLE monitor
ADD tls_key TEXT default null;
COMMIT;

View file

@ -0,0 +1,10 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
-- SQLite: Change the data type of the column "config" from VARCHAR to TEXT
ALTER TABLE notification RENAME COLUMN config TO config_old;
ALTER TABLE notification ADD COLUMN config TEXT;
UPDATE notification SET config = config_old;
ALTER TABLE notification DROP COLUMN config_old;
COMMIT;

View file

@ -0,0 +1,7 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE monitor
ADD packet_size INTEGER DEFAULT 56 NOT NULL;
COMMIT;

View file

@ -18,5 +18,4 @@ drop table setting;
alter table setting_dg_tmp rename to setting;
COMMIT;

View file

@ -0,0 +1,11 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
ALTER TABLE status_page
ADD footer_text TEXT;
ALTER TABLE status_page
ADD custom_css TEXT;
ALTER TABLE status_page
ADD show_powered_by BOOLEAN NOT NULL DEFAULT 1;
COMMIT;

View file

@ -0,0 +1,7 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
UPDATE monitor SET timeout = (interval * 0.8)
WHERE timeout IS NULL OR timeout <= 0;
COMMIT;

View file

@ -1,3 +1,4 @@
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
BEGIN TRANSACTION;
CREATE TABLE monitor_tls_info (

Some files were not shown because too many files have changed in this diff Show more