Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
92f143163b | ||
|
|
3c1a1f1b81 | ||
|
|
4bc7c70e2e | ||
|
|
cc6ddf50b1 |
@@ -5,7 +5,7 @@
|
||||
},
|
||||
"extends": "eslint:recommended",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 2020
|
||||
"ecmaVersion": 8
|
||||
},
|
||||
"rules": {
|
||||
"indent": [
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,6 +1,5 @@
|
||||
node_modules/
|
||||
coverage/
|
||||
.nyc_output/
|
||||
webadmin/dist/
|
||||
installer/src/certs/server.key
|
||||
|
||||
|
||||
644
CHANGES
644
CHANGES
@@ -1854,647 +1854,3 @@
|
||||
* Make mail eventlog only visible to owners
|
||||
* Make app password work with sftp
|
||||
|
||||
[5.1.0]
|
||||
* Add turn addon
|
||||
* Fix disk usage display
|
||||
* Drop support for TLSv1 and TLSv1.1
|
||||
* Make cert validation work for ECC certs
|
||||
* Add type filter to mail eventlog
|
||||
* mail: Fix listing of mailboxes and aliases in the UI
|
||||
* branding: fix login page title
|
||||
* Only a Cloudron owner can install/update/exec apps with the docker addon
|
||||
* security: reset tokens are only valid for a day
|
||||
* mail: fix eventlog db perms
|
||||
* Fix various bugs in the disk graphs
|
||||
|
||||
[5.1.1]
|
||||
* Add turn addon
|
||||
* Fix disk usage display
|
||||
* Drop support for TLSv1 and TLSv1.1
|
||||
* Make cert validation work for ECC certs
|
||||
* Add type filter to mail eventlog
|
||||
* mail: Fix listing of mailboxes and aliases in the UI
|
||||
* branding: fix login page title
|
||||
* Only a Cloudron owner can install/update/exec apps with the docker addon
|
||||
* security: reset tokens are only valid for a day
|
||||
* mail: fix eventlog db perms
|
||||
* Fix various bugs in the disk graphs
|
||||
* Fix collectd installation
|
||||
* graphs: sort disk contents by usage
|
||||
* backups: show apps that are not automatically backed up in backup view
|
||||
|
||||
[5.1.2]
|
||||
* Add turn addon
|
||||
* Fix disk usage display
|
||||
* Drop support for TLSv1 and TLSv1.1
|
||||
* Make cert validation work for ECC certs
|
||||
* Add type filter to mail eventlog
|
||||
* mail: Fix listing of mailboxes and aliases in the UI
|
||||
* branding: fix login page title
|
||||
* Only a Cloudron owner can install/update/exec apps with the docker addon
|
||||
* security: reset tokens are only valid for a day
|
||||
* mail: fix eventlog db perms
|
||||
* Fix various bugs in the disk graphs
|
||||
* Fix collectd installation
|
||||
* graphs: sort disk contents by usage
|
||||
* backups: show apps that are not automatically backed up in backup view
|
||||
* turn: deny local address peers https://www.rtcsec.com/2020/04/01-slack-webrtc-turn-compromise/
|
||||
|
||||
[5.1.3]
|
||||
* Fix crash with misconfigured reverse proxy
|
||||
* Fix issue where invitation links are not working anymore
|
||||
|
||||
[5.1.4]
|
||||
* Add support for custom .well-known documents to be served
|
||||
* Add ECDHE-RSA-AES128-SHA256 to cipher list
|
||||
* Fix GPG signature verification
|
||||
|
||||
[5.1.5]
|
||||
* Check for .well-known routes upstream as fallback. This broke nextcloud's caldav/carddav
|
||||
|
||||
[5.2.0]
|
||||
* acme: request ECC certs
|
||||
* less-strict DKIM check to allow users to set a stronger DKIM key
|
||||
* Add members only flag to mailing list
|
||||
* oauth: add backward compat layer for backup and uninstall
|
||||
* fix bug in disk usage sorting
|
||||
* mail: aliases can be across domains
|
||||
* mail: allow an external MX to be set
|
||||
* Add UI to download backup config as JSON (and import it)
|
||||
* Ensure stopped apps are getting backed up
|
||||
* Add OVH Object Storage backend
|
||||
* Add per-app redis status and configuration to Services
|
||||
* spam: large emails were not scanned
|
||||
* mail relay: fix delivery event log
|
||||
* manual update check always gets the latest updates
|
||||
* graphs: fix issue where large number of apps would crash the box code (query param limit exceeded)
|
||||
* backups: fix various security issues in encypted backups (thanks @mehdi)
|
||||
* graphs: add app graphs
|
||||
* older encrypted backups cannot be used in this version
|
||||
* Add backup listing UI
|
||||
* stopping an app will stop dependent services
|
||||
* Add new wasabi s3 storage region us-east-2
|
||||
* mail: Fix bug where SRS translation was done on the main domain instead of mailing list domain
|
||||
* backups: add retention policy
|
||||
* Drop `NET_RAW` caps from container preventing sniffing of network traffic
|
||||
|
||||
[5.2.1]
|
||||
* Fix app disk graphs
|
||||
* restart apps on addon container change
|
||||
|
||||
[5.2.2]
|
||||
* regression: import UI
|
||||
* Mbps -> MBps
|
||||
* Remove verbose logs
|
||||
* Set dmode in tar extract
|
||||
* mail: fix crash in audit logs
|
||||
* import: fix crash because encryption is unset
|
||||
* create redis with the correct label
|
||||
|
||||
[5.2.3]
|
||||
* Do not restart stopped apps
|
||||
|
||||
[5.2.4]
|
||||
* mail: enable/disable incoming mail was showing an error
|
||||
* Do not trigger backup of stopped apps. Instead, we will just retain it's existing backups
|
||||
based on retention policy
|
||||
* remove broken disk graphs
|
||||
* fix OVH backups
|
||||
|
||||
[5.3.0]
|
||||
* better nginx config for higher loads
|
||||
* backups: add CIFS storage provider
|
||||
* backups: add SSHFS storage provider
|
||||
* backups: add NFS storage provider
|
||||
* s3: use vhost style
|
||||
* Fix crash when redis config was set
|
||||
* Update schedule was unselected in the UI
|
||||
* cloudron-setup: --provider is now optional
|
||||
* show warning for unstable updates
|
||||
* add forumUrl to app manifest
|
||||
* postgresql: add unaccent extension for peertube
|
||||
* mail: Add Auto-Submitted header to NDRs
|
||||
* backups: ensure that the latest backup of installed apps is always preserved
|
||||
* add nginx logs
|
||||
* mail: make authentication case insensitive
|
||||
* Fix timeout issues in postgresql and mysql addon
|
||||
* Do not count stopped apps for memory use
|
||||
* LDAP group synchronization
|
||||
|
||||
[5.3.1]
|
||||
* better nginx config for higher loads
|
||||
* backups: add CIFS storage provider
|
||||
* backups: add SSHFS storage provider
|
||||
* backups: add NFS storage provider
|
||||
* s3: use vhost style
|
||||
* Fix crash when redis config was set
|
||||
* Update schedule was unselected in the UI
|
||||
* cloudron-setup: --provider is now optional
|
||||
* show warning for unstable updates
|
||||
* add forumUrl to app manifest
|
||||
* postgresql: add unaccent extension for peertube
|
||||
* mail: Add Auto-Submitted header to NDRs
|
||||
* backups: ensure that the latest backup of installed apps is always preserved
|
||||
* add nginx logs
|
||||
* mail: make authentication case insensitive
|
||||
* Fix timeout issues in postgresql and mysql addon
|
||||
* Do not count stopped apps for memory use
|
||||
* LDAP group synchronization
|
||||
|
||||
[5.3.2]
|
||||
* Do not install sshfs package
|
||||
* 'provider' is not required anymore in various API calls
|
||||
* redis: Set maxmemory and maxmemory-policy
|
||||
* Add mlock capability to manifest (for vault app)
|
||||
|
||||
[5.3.3]
|
||||
* Fix issue where some postinstall messages where causing angular to infinite loop
|
||||
|
||||
[5.3.4]
|
||||
* Fix issue in database error handling
|
||||
|
||||
[5.4.0]
|
||||
* Update nginx to 1.18 for various security fixes
|
||||
* Add ping capability (for statping app)
|
||||
* Fix bug where aliases were displayed incorrectly in SOGo
|
||||
* Add univention as LDAP provider
|
||||
* Bump max_connection for postgres addon to 200
|
||||
* mail: Add pagination to mailing list API
|
||||
* Allow admin to lock email and display name of users
|
||||
* Allow admin to ensure all users have 2FA setup
|
||||
* ami: fix regression where we didn't send provider as part of get status call
|
||||
* nginx: hide version
|
||||
* backups: add b2 provider
|
||||
* Add filemanager webinterface
|
||||
* Add darkmode
|
||||
* Add note that password reset and invite links expire in 24 hours
|
||||
|
||||
[5.4.1]
|
||||
* Update nginx to 1.18 for various security fixes
|
||||
* Add ping capability (for statping app)
|
||||
* Fix bug where aliases were displayed incorrectly in SOGo
|
||||
* Add univention as LDAP provider
|
||||
* Bump max_connection for postgres addon to 200
|
||||
* mail: Add pagination to mailing list API
|
||||
* Allow admin to lock email and display name of users
|
||||
* Allow admin to ensure all users have 2FA setup
|
||||
* ami: fix regression where we didn't send provider as part of get status call
|
||||
* nginx: hide version
|
||||
* backups: add b2 provider
|
||||
* Add filemanager webinterface
|
||||
* Add darkmode
|
||||
* Add note that password reset and invite links expire in 24 hours
|
||||
|
||||
[5.5.0]
|
||||
* postgresql: update to PostgreSQL 11
|
||||
* postgresql: add citext extension to whitelist for loomio
|
||||
* postgresql: add btree_gist,postgres_fdw,pg_stat_statements,plpgsql extensions for gitlab
|
||||
* SFTP/Filebrowser: fix access of external data directories
|
||||
* Fix contrast issues in dark mode
|
||||
* Add option to delete mailbox data when mailbox is delete
|
||||
* Allow days/hours of backups and updates to be configurable
|
||||
* backup cleaner: fix issue where referenced backups where not counted against time periods
|
||||
* route53: fix issue where verification failed if user had more than 100 zones
|
||||
* rework task workers to run them in a separate cgroup
|
||||
* backups: now much faster thanks to reworking of task worker
|
||||
* When custom fallback cert is set, make sure it's used over LE certs
|
||||
* mongodb: update to MongoDB 4.0.19
|
||||
* List groups ordered by name
|
||||
* Invite links are now valid for a week
|
||||
* Update release GPG key
|
||||
* Add pre-defined variables ($CLOUDRON_APPID) for better post install messages
|
||||
* filemanager: show folder first
|
||||
|
||||
[5.6.0]
|
||||
* Remove IP nginx configuration that redirects to dashboard after activation
|
||||
* dashboard: looks for search string in app title as well
|
||||
* Add vaapi caps for transcoding
|
||||
* Fix issue where the long mongodb database names where causing app indices of rocket.chat to overflow (> 127)
|
||||
* Do not resize swap if swap file exists. This means that users can now control how swap is allocated on their own.
|
||||
* SFTP: fix issue where parallel rebuilds would cause an error
|
||||
* backups: make part size configurable
|
||||
* mail: set max email size
|
||||
* mail: allow mail server location to be set
|
||||
* spamassassin: custom configs and wl/bl
|
||||
* Do not automatically update to unstable release
|
||||
* scheduler: reduce container churn
|
||||
* mail: add API to set banner
|
||||
* Fix bug where systemd 237 ignores --nice value in systemd-run
|
||||
* postgresql: enable uuid-ossp extension
|
||||
* firewall: add blocklist
|
||||
* HTTP URLs now redirect directly to the HTTPS of the final domain
|
||||
* linode: Add singapore region
|
||||
* ovh: add sydney region
|
||||
* s3: makes multi-part copies in parallel
|
||||
|
||||
[5.6.1]
|
||||
* Blocklists are now stored in a text file instead of json
|
||||
* regenerate nginx configs
|
||||
|
||||
[5.6.2]
|
||||
* Update docker to 19.03.12
|
||||
* Fix sorting of user listing in the UI
|
||||
* namecheap: fix crash when server returns invalid response
|
||||
* unlink ghost file automatically on successful login
|
||||
* Bump mysql addon connection limit to 200
|
||||
* Fix install issue where `/dev/dri` may not be present
|
||||
* import: when importing filesystem backups, the input box is a path
|
||||
* firewall: fix race condition where blocklist was not added in correct position in the FORWARD chain
|
||||
* services: fix issue where services where scaled up/down too fast
|
||||
* turn: realm variable was not updated properly on dashboard change
|
||||
* nginx: add splash pages for IP based browser access
|
||||
* Give services panel a separate top-level view
|
||||
* Add app state filter
|
||||
* gcs: copy concurrency was not used
|
||||
* Mention why an app update cannot be applied and provide shortcut to start the app if stopped
|
||||
* Remove version from footer into the setting view
|
||||
* Give services panel a separate top-level view
|
||||
* postgresql: set collation order explicity when creating database to C.UTF-8 (for confluence)
|
||||
* rsync: fix error while goes missing when syncing
|
||||
* Pre-select app domain by default in the redirection drop down
|
||||
* robots: preseve leading and trailing whitespaces/newlines
|
||||
|
||||
[5.6.3]
|
||||
* Fix postgres locale issue
|
||||
|
||||
[6.0.0]
|
||||
* Focal support
|
||||
* Reduce duration of self-signed certs to 800 days
|
||||
* Better backup config filename when downloading
|
||||
* branding: footer can have template variables like %YEAR% and %VERSION%
|
||||
* sftp: secure the API with a token
|
||||
* filemanager: Add extract context menu item
|
||||
* Do not download docker images if present locally
|
||||
* sftp: disable access to non-admins by default
|
||||
* postgresql: whitelist pgcrypto extension for loomio
|
||||
* filemanager: Add new file creation action and collapse new and upload actions
|
||||
* rsync: add warning to remove lifecycle rules
|
||||
* Add volume management
|
||||
* backups: adjust node's heap size based on memory limit
|
||||
* s3: diasble per-chunk timeout
|
||||
* logs: more descriptive log file names on download
|
||||
* collectd: remove collectd config when app stopped (and add it back when started)
|
||||
* Apps can optionally request an authwall to be installed in front of them
|
||||
* mailbox can now owned by a group
|
||||
* linode: enable dns provider in setup view
|
||||
* dns: apps can now use the dns port
|
||||
* httpPaths: allow apps to specify forwarding from custom paths to container ports (for OLS)
|
||||
* add elasticemail smtp relay option
|
||||
* mail: add option to fts using solr
|
||||
* mail: change the namespace separator of new installations to /
|
||||
* mail: enable acl
|
||||
* Disable THP
|
||||
* filemanager: allow download dirs as zip files
|
||||
* aws: add china region
|
||||
* security: fix issue where apps could send with any username (but valid password)
|
||||
* i18n support
|
||||
|
||||
[6.0.1]
|
||||
* app: add export route
|
||||
* mail: on location change, fix lock up when one or more domains have invalid credentials
|
||||
* mail: fix crash because of write after timeout closure
|
||||
* scaleway: fix installation issue where THP is not enabled in kernel
|
||||
|
||||
[6.1.0]
|
||||
* mail: update haraka to 2.8.27. this fixes zero-length queue file crash
|
||||
* update: set/unset appStoreId from the update route
|
||||
* proxyauth: Do not follow redirects
|
||||
* proxyauth: add 2FA
|
||||
* appstore: add category translations
|
||||
* appstore: add media category
|
||||
* prepend the version to assets when sourcing to avoid cache hits on update
|
||||
* filemanger: list volumes of the app
|
||||
* Display upload size and size progress
|
||||
* nfs: chown the backups for hardlinks to work
|
||||
* remove user add/remove/role change email notifications
|
||||
* persist update indicator across restarts
|
||||
* cloudron-setup: add --generate-setup-token
|
||||
* dashboard: pass accessToken query param to automatically login
|
||||
* wellknown: add a way to set well known docs
|
||||
* oom: notification mails have links to dashboard
|
||||
* collectd: do not install xorg* packages
|
||||
* apptask: backup/restore tasks now use the backup memory limit configuration
|
||||
* eventlog: add logout event
|
||||
* mailbox: include alias in mailbox search
|
||||
* proxyAuth: add path exclusion
|
||||
* turn: fix for CVE-2020-26262
|
||||
* app password: fix regression where apps are not listed anymore in the UI
|
||||
* Support for multiDomain apps (domain aliases)
|
||||
* netcup: add dns provider
|
||||
* Container swap size is now dynamically determined based on system RAM/swap ratio
|
||||
|
||||
[6.1.1]
|
||||
* Fix bug where platform does not start if memory limits could not be applied
|
||||
|
||||
[6.1.2]
|
||||
* App disk usage was not shown in graphs
|
||||
* Email autoconfig
|
||||
* Fix SOGo login
|
||||
|
||||
[6.2.0]
|
||||
* ovh: object storage URL has changed from s3 to storage subdomain
|
||||
* ionos: add profit bricks object storage
|
||||
* update node to 14.15.4
|
||||
* update docker to 20.10.3
|
||||
* new base image 3.0.0
|
||||
* postgresql updated to 12.5
|
||||
* redis updated to 5.0.7
|
||||
* dovecot updated to 2.3.7
|
||||
* proxyAuth: fix docker UA detection
|
||||
* registry config: add UI to disable it
|
||||
* update solr to 8.8.1
|
||||
* firewall: fix issue where script errored when having more than 15 wl/bl ports
|
||||
* If groups are used, do not allow app installation without choosing the access settings
|
||||
* tls addon
|
||||
* Do not overwrite existing DMARC record
|
||||
* Sync dns records
|
||||
* Dry run restore
|
||||
* linode: show cloudron is installing when user SSHs
|
||||
* mysql: disable bin logs
|
||||
* Show cancel task button if task is still running after 2 minutes
|
||||
* filemanager: fix various bugs involving file names with spaces
|
||||
* Change Referrer-policy default to 'same-origin'
|
||||
* rsync: preserve and restore symlinks
|
||||
* Clean up backups function now removes missing backups
|
||||
|
||||
[6.2.1]
|
||||
* Avoid updown notifications on full restore
|
||||
* Add retries to downloader logic in installer
|
||||
|
||||
[6.2.2]
|
||||
* Fix ENOBUFS issue with backups when collecting fs metadata
|
||||
|
||||
[6.2.3]
|
||||
* Fix addon crashes with missing databases
|
||||
* Update mail container for LMTP cert fix
|
||||
* Fix services view showing yellow icon
|
||||
|
||||
[6.2.4]
|
||||
* Another addon crash fix
|
||||
|
||||
[6.2.5]
|
||||
* update: set memory limit properly
|
||||
* Fix bug where renew certs button did not work
|
||||
* sftp: fix rebuild condition
|
||||
* Fix display of user management/dashboard visiblity for email apps
|
||||
* graphite: disable tagdb and reduce log noise
|
||||
|
||||
[6.2.6]
|
||||
* Fix issue where collectd is restarted too quickly before graphite
|
||||
|
||||
[6.2.7]
|
||||
* redis: backup before upgrade
|
||||
|
||||
[6.2.8]
|
||||
* linode object storage: update aws sdk to make it work again
|
||||
* Fix crash in blocklist setting when source and list have mixed ip versions
|
||||
* mysql: bump connection limit to 200
|
||||
* namecheap: fix issue where DNS updates and del were not working
|
||||
* turn: turn off verbose logging
|
||||
* Fix crash when parsing df output (set LC_ALL for box service)
|
||||
|
||||
[6.3.0]
|
||||
* mail: allow TLS from internal hosts
|
||||
* tokens: add lastUsedTime
|
||||
* update: set memory limit properly
|
||||
* addons: better error handling
|
||||
* filemanager: various enhancements
|
||||
* sftp: fix rebuild condition
|
||||
* app mailbox is now optional
|
||||
* Fix display of user management/dashboard visiblity for email apps
|
||||
* graphite: disable tagdb and reduce log noise
|
||||
* hsts: change max-age to 2 years
|
||||
* clone: copy over redis memory limit
|
||||
* namecheap: fix bug where records were not removed
|
||||
* add UI to disable 2FA of a user
|
||||
* mail: add active flag to mailboxes and lists
|
||||
* Implement OCSP stapling
|
||||
* security: send new browser login location notification email
|
||||
* backups: add fqdn to the backup filename
|
||||
* import all boxdata settings into the database
|
||||
* volumes: generate systemd mount configs based on type
|
||||
* postgresql: set max conn limit per db
|
||||
* ubuntu 16: add alert about EOL
|
||||
* clone: save and restore app config
|
||||
* app import: restore icon, tag, label, proxy configs etc
|
||||
* sieve: fix redirects to not do SRS
|
||||
* notifications are now system level instead of per-user
|
||||
* vultr DNS
|
||||
* vultr object storage
|
||||
* mail: do not forward spam to mailing lists
|
||||
|
||||
[6.3.1]
|
||||
* Fix cert migration issues
|
||||
|
||||
[6.3.2]
|
||||
* Avatar was migrated as base64 instead of binary
|
||||
* Fix issue where filemanager came up empty for CIFS mounts
|
||||
|
||||
[6.3.3]
|
||||
* volumes: add filesystem volume type for shared folders
|
||||
* mail: enable sieve extension editheader
|
||||
* mail: update solr to 8.9.0
|
||||
|
||||
[6.3.4]
|
||||
* Fix issue where old nginx configs where not removed before upgrade
|
||||
|
||||
[6.3.5]
|
||||
* Fix permission issues with sshfs
|
||||
* filemanager: reset selection if directory has changed
|
||||
* branding: fix error highlight with empty cloudron name
|
||||
* better text instead of "Cloudron in the wild"
|
||||
* Make sso login hint translatable
|
||||
* Give unread notifications a small left border
|
||||
* Fix issue where clicking update indicator opened app in new tab
|
||||
* Ensure notifications are only fetched and shown for at least admins
|
||||
* setupaccount: Show input field errors below input field
|
||||
* Set focus automatically for new alias or redirect
|
||||
* eventlog: fix issue where old events are not periodically removed
|
||||
* ssfs: fix chown
|
||||
|
||||
[6.3.6]
|
||||
* Fix broken reboot button
|
||||
* app updated notification shown despite failure
|
||||
* Update translation for sso login information
|
||||
* Hide groups/tags/state filter in app listing for normal users
|
||||
* filemanager: Ensure breadcrumbs and hash are correctly updated on folder navigation
|
||||
* cloudron-setup: check if nginx/docker is already installed
|
||||
* Use the addresses of all available interfaces for port 53 binding
|
||||
* refresh config on appstore login
|
||||
* password reset: check 2fa when enabled
|
||||
|
||||
[7.0.0]
|
||||
* Ubuntu 16 is not supported anymore
|
||||
* Do not use Gravatar as the default but only an option
|
||||
* redis: suppress password warning
|
||||
* setup UI: fix dark mode
|
||||
* wellknown: response to .wellknown/matrix/client
|
||||
* purpose field is not required anymore during appstore signup
|
||||
* sftp: fix symlink deletion
|
||||
* Show correct/new app version info in updated finished notification
|
||||
* Make new login email translatable
|
||||
* Hide ticket form if cloudron.io mail is not verified
|
||||
* Refactor code to use async/await
|
||||
* postgresql: bump shm size and disable parallel queries
|
||||
* update nodejs to 14.17.6
|
||||
* external ldap: If we detect a local user with the same username as found on LDAP/AD we map it
|
||||
* add basic eventlog for apps in app view
|
||||
* Enable sshfs/cifs/nfs in app import UI
|
||||
* Require password for fallback email change
|
||||
* Make password reset logic translatable
|
||||
* support: only verified email address can open support tickets
|
||||
* Logout users without 2FA when mandatory 2fa is enabled
|
||||
* notifications: better oom message for redis
|
||||
* Add way to impersonate users for presetup
|
||||
* mail: open up port 465 for mail submission (TLS)
|
||||
* Implement operator role for apps
|
||||
* sftp: normal users do not have SFTP access anymore. Use operator role instead
|
||||
* eventlog: add service rebuild/restart/configure events
|
||||
* upcloud: add object storage integration
|
||||
* Each app can now have a custom crontab
|
||||
* services: add recovery mode
|
||||
* postgresql: fix restore issue with long table names
|
||||
* recvmail: make the addon work again
|
||||
* mail: update solr to 8.10.0
|
||||
* mail: POP3 support
|
||||
* update docker to 20.10.7
|
||||
* volumes: add remount button
|
||||
* mail: add spam eventlog filter type
|
||||
* mail: configure dnsbl
|
||||
* mail: add duplication detection for lists
|
||||
* mail: add SRS for Sieve Forwarding
|
||||
|
||||
[7.0.1]
|
||||
* Fix matrix wellKnown client migration
|
||||
|
||||
[7.0.2]
|
||||
* mail: POP3 flag was not returned correctly
|
||||
* external ldap: fix crash preventing users from logging in
|
||||
* volumes: ensure we don't crash if mount status is unexpected
|
||||
* backups: set default backup memory limit to 800
|
||||
* users: allow admins to specify password recovery email
|
||||
* retry startup tasks on database error
|
||||
|
||||
[7.0.3]
|
||||
* support: fix remoe support not working for 'root' user
|
||||
* Fix cog icon on app grid item hover for darkmode
|
||||
* Disable password reset and impersonate button for self user instead of hiding them
|
||||
* pop3: fix crash with auth of non-existent mailbox
|
||||
* mail: fix direction field in eventlog of deferred mails
|
||||
* mail: fix eventlog search
|
||||
* mail: save message-id in eventlog
|
||||
* backups: fix issue which resulted in incomplete backups when an app has backups disabled
|
||||
* restore: do not redirect until mail data has been restored
|
||||
* proxyauth: set viewport meta tag in login view
|
||||
|
||||
[7.0.4]
|
||||
* Add password reveal button to login pages
|
||||
* appstore: fix crash if account already registered
|
||||
* Do not nuke all the logrotate configs on update
|
||||
* Remove unused httpPaths from manifest
|
||||
* cloudron-support: add option to reset cloudron.io account
|
||||
* Fix flicker in login page
|
||||
* Fix LE account key re-use issue in DO 1-click image
|
||||
* mail: add non-tls ports for recvmail addon
|
||||
* backups: fix issue where mail backups where not cleaned up
|
||||
* notifications: fix automatic app update notifications
|
||||
|
||||
[7.1.0]
|
||||
* Add mail manager role
|
||||
* mailbox: app can be set as owner when recvmail addon enabled
|
||||
* domains: add well known config UI (for jitsi configuration)
|
||||
* Prefix email addon variables with CLOUDRON_EMAIL instead of CLOUDRON_MAIL
|
||||
* remove support for manifest version 1
|
||||
* Add option to enable/disable mailbox sharing
|
||||
* base image 3.2.0
|
||||
* Update node to 16.13.1
|
||||
* mongodb: update to 4.4
|
||||
* Add `upstreamVersion` to manifest
|
||||
* Add `logPaths` to manifest
|
||||
* Add cifs seal support for backup and volume mounts
|
||||
* add a way for admins to set username when profiles are locked
|
||||
* Add support for secondary domains
|
||||
* postgresql: enable postgis
|
||||
* remove nginx config of stopped apps
|
||||
* mail: use port25check.cloudron.io to check outbound port 25 connectivity
|
||||
* Add import/export of mailboxes and users
|
||||
* LDAP server can now be exposed
|
||||
* Update monaco-editor to 0.32.1
|
||||
* Update xterm.js to 4.17.0
|
||||
* Update docker to 20.10.12
|
||||
* IPv6 support
|
||||
|
||||
[7.1.1]
|
||||
* Fix issue where dkimKey of a mail domain is sometimes null
|
||||
* firewall: add retry for xtables lock
|
||||
* redis: fix issue where protected mode was enabled with no password
|
||||
|
||||
[7.1.2]
|
||||
* Fix crash in cloudron-firewall when ports are whitelisted
|
||||
* eventlog: add event for certificate cleanup
|
||||
* eventlog: log event for mailbox alias update
|
||||
* backups: fix incorrect mountpoint check with managed mounts
|
||||
|
||||
[7.1.3]
|
||||
* Fix security issue where an admin can impersonate an owner
|
||||
* block list: can upload up to 2MB
|
||||
* dns: fix issue where link local address was picked up for ipv6
|
||||
* setup: ufw may not be installed
|
||||
* mysql: fix default collation of databases
|
||||
|
||||
[7.1.4]
|
||||
* wildcard dns: fix handling of ENODATA
|
||||
* cloudflare: fix error handling
|
||||
* openvpn: ipv6 support
|
||||
* dyndns: fix issue where eventlog was getting filled with empty entries
|
||||
* mandatory 2fa: Fix typo in 2FA check
|
||||
|
||||
[7.2.0]
|
||||
* mail: hide log button for non-superadmins
|
||||
* firewall: do not add duplicate ldap redirect rules
|
||||
* ldap: respond to RootDSE
|
||||
* Check if CNAME record exists and remove it if overwrite is set
|
||||
* cifs: use credentials file for better password support
|
||||
* installer: rework script to fix DNS resolution issues
|
||||
* backup cleaner: do not clean if not mounted
|
||||
* restore: fix sftp private key perms
|
||||
* support: add a separate system user named cloudron-support
|
||||
* sshfs: fix bug where sshfs mounts were generated without unbound dependancy
|
||||
* cloudron-setup: add --setup-token
|
||||
* notifications: add installation event
|
||||
* backups: set label of backup and control it's retention
|
||||
* wasabi: add new regions (London, Frankfurt, Paris, Toronto)
|
||||
* docker: update to 20.10.14
|
||||
* Ensure LDAP usernames are always treated lowercase
|
||||
* Add a way to make LDAP users local
|
||||
* proxyAuth: set X-Remote-User (rfc3875)
|
||||
* GoDaddy: there is now a delete API
|
||||
* nginx: use ubuntu packages for ubuntu 20.04 and 22.04
|
||||
* Ubuntu 22.04 LTS support
|
||||
* Add Hetzner DNS
|
||||
* cron: add support for extensions (@reboot, @weekly etc)
|
||||
* Add profile backgroundImage api
|
||||
* exec: rework API to get exit code
|
||||
* Add update available filter
|
||||
|
||||
[7.2.1]
|
||||
* Refactor backup code to use async/await
|
||||
* mongodb: fix bug where a small timeout prevented import of large backups
|
||||
* Add update available filter
|
||||
* exec: rework API to get exit code
|
||||
* Add profile backgroundImage api
|
||||
* cron: add support for extensions (@reboot, @weekly etc)
|
||||
|
||||
[7.2.2]
|
||||
* Update cloudron-manifestformat for new scheduler patterns
|
||||
* collectd: FQDNLookup causes collectd install to fail
|
||||
|
||||
[7.2.3]
|
||||
* appstore: allow re-registration on server side delete
|
||||
* transfer ownership route is not used anymore
|
||||
* graphite: fix issue where disk names with '.' do not render
|
||||
* dark mode fixes
|
||||
* sendmail: mail from display name
|
||||
* Use volumes for app data instead of raw path
|
||||
* initial xfs support
|
||||
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,5 +1,5 @@
|
||||
The Cloudron Subscription license
|
||||
Copyright (c) 2022 Cloudron UG
|
||||
Copyright (c) 2020 Cloudron UG
|
||||
|
||||
With regard to the Cloudron Software:
|
||||
|
||||
|
||||
41
README.md
41
README.md
@@ -1,5 +1,3 @@
|
||||

|
||||
|
||||
# Cloudron
|
||||
|
||||
[Cloudron](https://cloudron.io) is the best way to run apps on your server.
|
||||
@@ -31,9 +29,9 @@ anyone to effortlessly host web applications on their server on their own terms.
|
||||
* Trivially migrate to another server keeping your apps and data (for example, switch your
|
||||
infrastructure provider or move to a bigger server).
|
||||
|
||||
* Comprehensive [REST API](https://docs.cloudron.io/api/).
|
||||
* Comprehensive [REST API](https://cloudron.io/documentation/developer/api/).
|
||||
|
||||
* [CLI](https://docs.cloudron.io/custom-apps/cli/) to configure apps.
|
||||
* [CLI](https://cloudron.io/documentation/cli/) to configure apps.
|
||||
|
||||
* Alerts, audit logs, graphs, dns management ... and much more
|
||||
|
||||
@@ -43,42 +41,25 @@ Try our demo at https://my.demo.cloudron.io (username: cloudron password: cloudr
|
||||
|
||||
## Installing
|
||||
|
||||
[Install script](https://docs.cloudron.io/installation/) - [Pricing](https://cloudron.io/pricing.html)
|
||||
[Install script](https://cloudron.io/documentation/installation/) - [Pricing](https://cloudron.io/pricing.html)
|
||||
|
||||
**Note:** This repo is a small part of what gets installed on your server - there is
|
||||
the dashboard, database addons, graph container, base image etc. Cloudron also relies
|
||||
on external services such as the App Store for apps to be installed. As such, don't
|
||||
clone this repo and npm install and expect something to work.
|
||||
|
||||
## Development
|
||||
## Documentation
|
||||
|
||||
This is the backend code of Cloudron. The frontend code is [here](https://git.cloudron.io/cloudron/dashboard).
|
||||
* [Documentation](https://cloudron.io/documentation/)
|
||||
|
||||
The way to develop is to first install a full instance of Cloudron in a VM. Then you can use the [hotfix](https://git.cloudron.io/cloudron/cloudron-machine)
|
||||
tool to patch the VM with the latest code.
|
||||
## Related repos
|
||||
|
||||
```
|
||||
SSH_PASSPHRASE=sshkeypassword cloudron-machine hotfix --cloudron my.example.com --release 6.0.0 --ssh-key keyname
|
||||
```
|
||||
The [base image repo](https://git.cloudron.io/cloudron/docker-base-image) is the parent image of all
|
||||
the containers in the Cloudron.
|
||||
|
||||
## License
|
||||
## Community
|
||||
|
||||
Please note that the Cloudron code is under a source-available license. This is not the same as an
|
||||
open source license but ensures the code is available for introspection (and hacking!).
|
||||
|
||||
## Contributions
|
||||
|
||||
Just to give some heads up, we are a bit restrictive in merging changes. We are a small team and
|
||||
would like to keep our maintenance burden low. It might be best to discuss features first in the [forum](https://forum.cloudron.io),
|
||||
to also figure out how many other people will use it to justify maintenance for a feature.
|
||||
|
||||
# Localization
|
||||
|
||||

|
||||
|
||||
## Support
|
||||
|
||||
* [Documentation](https://docs.cloudron.io/)
|
||||
* [Chat](https://chat.cloudron.io)
|
||||
* [Forum](https://forum.cloudron.io/)
|
||||
|
||||
* [Support](mailto:support@cloudron.io)
|
||||
|
||||
|
||||
193
baseimage/createAMI
Executable file
193
baseimage/createAMI
Executable file
@@ -0,0 +1,193 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
assertNotEmpty() {
|
||||
: "${!1:? "$1 is not set."}"
|
||||
}
|
||||
|
||||
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)"
|
||||
export JSON="${SOURCE_DIR}/node_modules/.bin/json"
|
||||
|
||||
INSTANCE_TYPE="t2.micro"
|
||||
BLOCK_DEVICE="DeviceName=/dev/sda1,Ebs={VolumeSize=20,DeleteOnTermination=true,VolumeType=gp2}"
|
||||
SSH_KEY_NAME="id_rsa_yellowtent"
|
||||
|
||||
revision=$(git rev-parse HEAD)
|
||||
ami_name=""
|
||||
server_id=""
|
||||
server_ip=""
|
||||
destroy_server="yes"
|
||||
deploy_env="prod"
|
||||
image_id=""
|
||||
|
||||
args=$(getopt -o "" -l "revision:,name:,no-destroy,env:,region:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--env) deploy_env="$2"; shift 2;;
|
||||
--revision) revision="$2"; shift 2;;
|
||||
--name) ami_name="$2"; shift 2;;
|
||||
--no-destroy) destroy_server="no"; shift 2;;
|
||||
--region)
|
||||
case "$2" in
|
||||
"us-east-1")
|
||||
image_id="ami-6edd3078"
|
||||
security_group="sg-a5e17fd9"
|
||||
subnet_id="subnet-b8fbc0f1"
|
||||
;;
|
||||
"eu-central-1")
|
||||
image_id="ami-5aee2235"
|
||||
security_group="sg-19f5a770" # everything open on eu-central-1
|
||||
subnet_id=""
|
||||
;;
|
||||
*)
|
||||
echo "Unknown aws region $2"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
export AWS_DEFAULT_REGION="$2" # used by the aws cli tool
|
||||
shift 2
|
||||
;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
# TODO fix this
|
||||
export AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY}"
|
||||
export AWS_SECRET_ACCESS_KEY="${AWS_ACCESS_SECRET}"
|
||||
|
||||
readonly ssh_keys="${HOME}/.ssh/id_rsa_yellowtent"
|
||||
readonly SSH="ssh -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
|
||||
|
||||
if [[ ! -f "${ssh_keys}" ]]; then
|
||||
echo "caas ssh key is missing at ${ssh_keys} (pick it up from secrets repo)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${image_id}" ]]; then
|
||||
echo "--region is required (us-east-1 or eu-central-1)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function get_pretty_revision() {
|
||||
local git_rev="$1"
|
||||
local sha1=$(git rev-parse --short "${git_rev}" 2>/dev/null)
|
||||
|
||||
echo "${sha1}"
|
||||
}
|
||||
|
||||
function wait_for_ssh() {
|
||||
echo "=> Waiting for ssh connection"
|
||||
while true; do
|
||||
echo -n "."
|
||||
|
||||
if $SSH ubuntu@${server_ip} echo "hello"; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
done
|
||||
}
|
||||
|
||||
now=$(date "+%Y-%m-%d-%H%M%S")
|
||||
pretty_revision=$(get_pretty_revision "${revision}")
|
||||
|
||||
if [[ -z "${ami_name}" ]]; then
|
||||
ami_name="box-${deploy_env}-${pretty_revision}-${now}"
|
||||
fi
|
||||
|
||||
echo "=> Create EC2 instance"
|
||||
id=$(aws ec2 run-instances --image-id "${image_id}" --instance-type "${INSTANCE_TYPE}" --security-group-ids "${security_group}" --block-device-mappings "${BLOCK_DEVICE}" --key-name "${SSH_KEY_NAME}" --subnet-id "${subnet_id}" --associate-public-ip-address \
|
||||
| $JSON Instances \
|
||||
| $JSON 0.InstanceId)
|
||||
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "Instance created ID $id"
|
||||
|
||||
echo "=> Waiting for instance to get a public IP"
|
||||
while true; do
|
||||
server_ip=$(aws ec2 describe-instances --instance-ids ${id} \
|
||||
| $JSON Reservations.0.Instances \
|
||||
| $JSON 0.PublicIpAddress)
|
||||
|
||||
if [[ ! -z "${server_ip}" ]]; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "Got public IP ${server_ip}"
|
||||
|
||||
wait_for_ssh
|
||||
|
||||
echo "=> Fetching cloudron-setup"
|
||||
while true; do
|
||||
|
||||
if $SSH ubuntu@${server_ip} wget "https://cloudron.io/cloudron-setup" -O "cloudron-setup"; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo "=> Running cloudron-setup"
|
||||
$SSH ubuntu@${server_ip} sudo /bin/bash "cloudron-setup" --env "${deploy_env}" --provider "ami" --skip-reboot
|
||||
|
||||
wait_for_ssh
|
||||
|
||||
echo "=> Removing ssh key"
|
||||
$SSH ubuntu@${server_ip} sudo rm /home/ubuntu/.ssh/authorized_keys /root/.ssh/authorized_keys
|
||||
|
||||
echo "=> Creating AMI"
|
||||
image_id=$(aws ec2 create-image --instance-id "${id}" --name "${ami_name}" | $JSON ImageId)
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "Creating AMI with Id ${image_id}"
|
||||
|
||||
echo "=> Waiting for AMI to be created"
|
||||
while true; do
|
||||
state=$(aws ec2 describe-images --image-ids ${image_id} \
|
||||
| $JSON Images \
|
||||
| $JSON 0.State)
|
||||
|
||||
if [[ "${state}" == "available" ]]; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
if [[ "${destroy_server}" == "yes" ]]; then
|
||||
echo "=> Deleting EC2 instance"
|
||||
|
||||
while true; do
|
||||
state=$(aws ec2 terminate-instances --instance-id "${id}" \
|
||||
| $JSON TerminatingInstances \
|
||||
| $JSON 0.CurrentState.Name)
|
||||
|
||||
if [[ "${state}" == "shutting-down" ]]; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 5
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Done."
|
||||
echo ""
|
||||
echo "New AMI is: ${image_id}"
|
||||
echo ""
|
||||
261
baseimage/digitalocean.sh
Normal file
261
baseimage/digitalocean.sh
Normal file
@@ -0,0 +1,261 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ -z "${DIGITAL_OCEAN_TOKEN}" ]]; then
|
||||
echo "Script requires DIGITAL_OCEAN_TOKEN env to be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${JSON}" ]]; then
|
||||
echo "Script requires JSON env to be set to path of JSON binary"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
readonly CURL="curl --retry 5 -s -u ${DIGITAL_OCEAN_TOKEN}:"
|
||||
|
||||
function debug() {
|
||||
echo "$@" >&2
|
||||
}
|
||||
|
||||
function get_ssh_key_id() {
|
||||
id=$($CURL "https://api.digitalocean.com/v2/account/keys" \
|
||||
| $JSON ssh_keys \
|
||||
| $JSON -c "this.name === \"$1\"" \
|
||||
| $JSON 0.id)
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "$id"
|
||||
}
|
||||
|
||||
function create_droplet() {
|
||||
local ssh_key_id="$1"
|
||||
local box_name="$2"
|
||||
|
||||
local image_region="sfo2"
|
||||
local ubuntu_image_slug="ubuntu-16-04-x64"
|
||||
local box_size="1gb"
|
||||
|
||||
local data="{\"name\":\"${box_name}\",\"size\":\"${box_size}\",\"region\":\"${image_region}\",\"image\":\"${ubuntu_image_slug}\",\"ssh_keys\":[ \"${ssh_key_id}\" ],\"backups\":false}"
|
||||
|
||||
id=$($CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets" | $JSON droplet.id)
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "$id"
|
||||
}
|
||||
|
||||
function get_droplet_ip() {
|
||||
local droplet_id="$1"
|
||||
ip=$($CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}" | $JSON "droplet.networks.v4[0].ip_address")
|
||||
[[ -z "$ip" ]] && exit 1
|
||||
echo "$ip"
|
||||
}
|
||||
|
||||
function get_droplet_id() {
|
||||
local droplet_name="$1"
|
||||
id=$($CURL "https://api.digitalocean.com/v2/droplets?per_page=200" | $JSON "droplets" | $JSON -c "this.name === '${droplet_name}'" | $JSON "[0].id")
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "$id"
|
||||
}
|
||||
|
||||
function power_off_droplet() {
|
||||
local droplet_id="$1"
|
||||
local data='{"type":"power_off"}'
|
||||
local response=$($CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions")
|
||||
local event_id=`echo "${response}" | $JSON action.id`
|
||||
|
||||
if [[ -z "${event_id}" ]]; then
|
||||
debug "Got no event id, assuming already powered off."
|
||||
debug "Response: ${response}"
|
||||
return
|
||||
fi
|
||||
|
||||
debug "Powered off droplet. Event id: ${event_id}"
|
||||
debug -n "Waiting for droplet to power off"
|
||||
|
||||
while true; do
|
||||
local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status`
|
||||
if [[ "${event_status}" == "completed" ]]; then
|
||||
break
|
||||
fi
|
||||
debug -n "."
|
||||
sleep 10
|
||||
done
|
||||
debug ""
|
||||
}
|
||||
|
||||
function power_on_droplet() {
|
||||
local droplet_id="$1"
|
||||
local data='{"type":"power_on"}'
|
||||
local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions" | $JSON action.id`
|
||||
|
||||
debug "Powered on droplet. Event id: ${event_id}"
|
||||
|
||||
if [[ -z "${event_id}" ]]; then
|
||||
debug "Got no event id, assuming already powered on"
|
||||
return
|
||||
fi
|
||||
|
||||
debug -n "Waiting for droplet to power on"
|
||||
|
||||
while true; do
|
||||
local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status`
|
||||
if [[ "${event_status}" == "completed" ]]; then
|
||||
break
|
||||
fi
|
||||
debug -n "."
|
||||
sleep 10
|
||||
done
|
||||
debug ""
|
||||
}
|
||||
|
||||
function get_image_id() {
|
||||
local snapshot_name="$1"
|
||||
local image_id=""
|
||||
|
||||
if ! response=$($CURL "https://api.digitalocean.com/v2/images?per_page=200"); then
|
||||
echo "Failed to get image listing. ${response}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! image_id=$(echo "$response" \
|
||||
| $JSON images \
|
||||
| $JSON -c "this.name === \"${snapshot_name}\"" 0.id); then
|
||||
echo "Failed to parse curl response: ${response}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -z "${image_id}" ]]; then
|
||||
echo "Failed to get image id of ${snapshot_name}. reponse: ${response}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "${image_id}"
|
||||
}
|
||||
|
||||
function snapshot_droplet() {
|
||||
local droplet_id="$1"
|
||||
local snapshot_name="$2"
|
||||
local data="{\"type\":\"snapshot\",\"name\":\"${snapshot_name}\"}"
|
||||
local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions" | $JSON action.id`
|
||||
|
||||
debug "Droplet snapshotted as ${snapshot_name}. Event id: ${event_id}"
|
||||
debug -n "Waiting for snapshot to complete"
|
||||
|
||||
while true; do
|
||||
if ! response=$($CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}"); then
|
||||
echo "Could not get action status. ${response}"
|
||||
continue
|
||||
fi
|
||||
if ! event_status=$(echo "${response}" | $JSON action.status); then
|
||||
echo "Could not parse action.status from response. ${response}"
|
||||
continue
|
||||
fi
|
||||
if [[ "${event_status}" == "completed" ]]; then
|
||||
break
|
||||
fi
|
||||
debug -n "."
|
||||
sleep 10
|
||||
done
|
||||
debug "! done"
|
||||
|
||||
if ! image_id=$(get_image_id "${snapshot_name}"); then
|
||||
return 1
|
||||
fi
|
||||
echo "${image_id}"
|
||||
}
|
||||
|
||||
function destroy_droplet() {
|
||||
local droplet_id="$1"
|
||||
# TODO: check for 204 status
|
||||
$CURL -X DELETE "https://api.digitalocean.com/v2/droplets/${droplet_id}"
|
||||
debug "Droplet destroyed"
|
||||
debug ""
|
||||
}
|
||||
|
||||
function transfer_image() {
|
||||
local image_id="$1"
|
||||
local region_slug="$2"
|
||||
local data="{\"type\":\"transfer\",\"region\":\"${region_slug}\"}"
|
||||
local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/images/${image_id}/actions" | $JSON action.id`
|
||||
echo "${event_id}"
|
||||
}
|
||||
|
||||
function wait_for_image_event() {
|
||||
local image_id="$1"
|
||||
local event_id="$2"
|
||||
|
||||
debug -n "Waiting for ${event_id}"
|
||||
|
||||
while true; do
|
||||
local event_status=`$CURL "https://api.digitalocean.com/v2/images/${image_id}/actions/${event_id}" | $JSON action.status`
|
||||
if [[ "${event_status}" == "completed" ]]; then
|
||||
break
|
||||
fi
|
||||
debug -n "."
|
||||
sleep 10
|
||||
done
|
||||
debug ""
|
||||
}
|
||||
|
||||
function transfer_image_to_all_regions() {
|
||||
local image_id="$1"
|
||||
|
||||
xfer_events=()
|
||||
image_regions=(ams2) ## sfo1 is where the image is created
|
||||
for image_region in ${image_regions[@]}; do
|
||||
xfer_event=$(transfer_image ${image_id} ${image_region})
|
||||
echo "Image transfer to ${image_region} initiated. Event id: ${xfer_event}"
|
||||
xfer_events+=("${xfer_event}")
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "Image transfer initiated, but they will take some time to get transferred."
|
||||
|
||||
for xfer_event in ${xfer_events[@]}; do
|
||||
$vps wait_for_image_event "${image_id}" "${xfer_event}"
|
||||
done
|
||||
}
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
debug "<command> <params...>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case $1 in
|
||||
get_ssh_key_id)
|
||||
get_ssh_key_id "${@:2}"
|
||||
;;
|
||||
|
||||
create)
|
||||
create_droplet "${@:2}"
|
||||
;;
|
||||
|
||||
get_id)
|
||||
get_droplet_id "${@:2}"
|
||||
;;
|
||||
|
||||
get_ip)
|
||||
get_droplet_ip "${@:2}"
|
||||
;;
|
||||
|
||||
power_on)
|
||||
power_on_droplet "${@:2}"
|
||||
;;
|
||||
|
||||
power_off)
|
||||
power_off_droplet "${@:2}"
|
||||
;;
|
||||
|
||||
snapshot)
|
||||
snapshot_droplet "${@:2}"
|
||||
;;
|
||||
|
||||
destroy)
|
||||
destroy_droplet "${@:2}"
|
||||
;;
|
||||
|
||||
transfer_image_to_all_regions)
|
||||
transfer_image_to_all_regions "${@:2}"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unknown command $1"
|
||||
exit 1
|
||||
esac
|
||||
154
baseimage/initializeBaseUbuntuImage.sh
Executable file
154
baseimage/initializeBaseUbuntuImage.sh
Executable file
@@ -0,0 +1,154 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euv -o pipefail
|
||||
|
||||
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
readonly arg_provider="${1:-generic}"
|
||||
readonly arg_infraversionpath="${SOURCE_DIR}/${2:-}"
|
||||
|
||||
function die {
|
||||
echo $1
|
||||
exit 1
|
||||
}
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# hold grub since updating it breaks on some VPS providers. also, dist-upgrade will trigger it
|
||||
apt-mark hold grub* >/dev/null
|
||||
apt-get -o Dpkg::Options::="--force-confdef" update -y
|
||||
apt-get -o Dpkg::Options::="--force-confdef" upgrade -y
|
||||
apt-mark unhold grub* >/dev/null
|
||||
|
||||
echo "==> Installing required packages"
|
||||
|
||||
debconf-set-selections <<< 'mysql-server mysql-server/root_password password password'
|
||||
debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password password'
|
||||
|
||||
# this enables automatic security upgrades (https://help.ubuntu.com/community/AutomaticSecurityUpdates)
|
||||
# resolvconf is needed for unbound to work property after disabling systemd-resolved in 18.04
|
||||
ubuntu_version=$(lsb_release -rs)
|
||||
ubuntu_codename=$(lsb_release -cs)
|
||||
gpg_package=$([[ "${ubuntu_version}" == "16.04" ]] && echo "gnupg" || echo "gpg")
|
||||
apt-get -y install \
|
||||
acl \
|
||||
build-essential \
|
||||
cifs-utils \
|
||||
cron \
|
||||
curl \
|
||||
debconf-utils \
|
||||
dmsetup \
|
||||
$gpg_package \
|
||||
iptables \
|
||||
libpython2.7 \
|
||||
linux-generic \
|
||||
logrotate \
|
||||
mysql-server-5.7 \
|
||||
nginx-full \
|
||||
openssh-server \
|
||||
pwgen \
|
||||
resolvconf \
|
||||
swaks \
|
||||
tzdata \
|
||||
unattended-upgrades \
|
||||
unbound \
|
||||
xfsprogs
|
||||
|
||||
# on some providers like scaleway the sudo file is changed and we want to keep the old one
|
||||
apt-get -o Dpkg::Options::="--force-confold" install -y sudo
|
||||
|
||||
# this ensures that unattended upgades are enabled, if it was disabled during ubuntu install time (see #346)
|
||||
# debconf-set-selection of unattended-upgrades/enable_auto_updates + dpkg-reconfigure does not work
|
||||
cp /usr/share/unattended-upgrades/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
|
||||
|
||||
echo "==> Installing node.js"
|
||||
mkdir -p /usr/local/node-10.18.1
|
||||
curl -sL https://nodejs.org/dist/v10.18.1/node-v10.18.1-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-10.18.1
|
||||
ln -sf /usr/local/node-10.18.1/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-10.18.1/bin/npm /usr/bin/npm
|
||||
apt-get install -y python # Install python which is required for npm rebuild
|
||||
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
|
||||
echo "==> Installing Docker"
|
||||
|
||||
# create systemd drop-in file. if you channge options here, be sure to fixup installer.sh as well
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
|
||||
# there are 3 packages for docker - containerd, CLI and the daemon
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.2.2-3_amd64.deb" -o /tmp/containerd.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_18.09.2~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_18.09.2~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
rm /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
|
||||
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
|
||||
if [[ "${storage_driver}" != "overlay2" ]]; then
|
||||
echo "Docker is using "${storage_driver}" instead of overlay2"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# do not upgrade grub because it might prompt user and break this script
|
||||
echo "==> Enable memory accounting"
|
||||
apt-get -y --no-upgrade install grub2-common
|
||||
sed -e 's/^GRUB_CMDLINE_LINUX="\(.*\)"$/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1 panic_on_oops=1 panic=5"/' -i /etc/default/grub
|
||||
update-grub
|
||||
|
||||
echo "==> Downloading docker images"
|
||||
if [ ! -f "${arg_infraversionpath}/infra_version.js" ]; then
|
||||
echo "No infra_versions.js found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
images=$(node -e "var i = require('${arg_infraversionpath}/infra_version.js'); console.log(i.baseImages.map(function (x) { return x.tag; }).join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
|
||||
|
||||
echo -e "\tPulling docker images: ${images}"
|
||||
for image in ${images}; do
|
||||
docker pull "${image}"
|
||||
docker pull "${image%@sha256:*}" # this will tag the image for readability
|
||||
done
|
||||
|
||||
echo "==> Install collectd"
|
||||
if ! apt-get install -y collectd collectd-utils; then
|
||||
# FQDNLookup is true in default debian config. The box code has a custom collectd.conf that fixes this
|
||||
echo "Failed to install collectd. Presumably because of http://mailman.verplant.org/pipermail/collectd/2015-March/006491.html"
|
||||
sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf
|
||||
fi
|
||||
|
||||
echo "==> Configuring host"
|
||||
sed -e 's/^#NTP=/NTP=0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org/' -i /etc/systemd/timesyncd.conf
|
||||
timedatectl set-ntp 1
|
||||
# mysql follows the system timezone
|
||||
timedatectl set-timezone UTC
|
||||
|
||||
echo "==> Adding sshd configuration warning"
|
||||
sed -e '/Port 22/ i # NOTE: Cloudron only supports moving SSH to port 202. See https://cloudron.io/documentation/security/#securing-ssh-access' -i /etc/ssh/sshd_config
|
||||
|
||||
# https://bugs.launchpad.net/ubuntu/+source/base-files/+bug/1701068
|
||||
echo "==> Disabling motd news"
|
||||
sed -i 's/^ENABLED=.*/ENABLED=0/' /etc/default/motd-news
|
||||
|
||||
# Disable bind for good measure (on online.net, kimsufi servers these are pre-installed and conflicts with unbound)
|
||||
systemctl stop bind9 || true
|
||||
systemctl disable bind9 || true
|
||||
|
||||
# on ovh images dnsmasq seems to run by default
|
||||
systemctl stop dnsmasq || true
|
||||
systemctl disable dnsmasq || true
|
||||
|
||||
# on ssdnodes postfix seems to run by default
|
||||
systemctl stop postfix || true
|
||||
systemctl disable postfix || true
|
||||
|
||||
# on ubuntu 18.04, this is the default. this requires resolvconf for DNS to work further after the disable
|
||||
systemctl stop systemd-resolved || true
|
||||
systemctl disable systemd-resolved || true
|
||||
|
||||
# ubuntu's default config for unbound does not work if ipv6 is disabled. this config is overwritten in start.sh
|
||||
# we need unbound to work as this is required for installer.sh to do any DNS requests
|
||||
ip6=$([[ -s /proc/net/if_inet6 ]] && echo "yes" || echo "no")
|
||||
echo -e "server:\n\tinterface: 127.0.0.1\n\tdo-ip6: ${ip6}" > /etc/unbound/unbound.conf.d/cloudron-network.conf
|
||||
systemctl restart unbound
|
||||
|
||||
107
box.js
107
box.js
@@ -2,76 +2,57 @@
|
||||
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs'),
|
||||
ldap = require('./src/ldap.js'),
|
||||
paths = require('./src/paths.js'),
|
||||
proxyAuth = require('./src/proxyauth.js'),
|
||||
safe = require('safetydance'),
|
||||
server = require('./src/server.js'),
|
||||
settings = require('./src/settings.js'),
|
||||
userdirectory = require('./src/userdirectory.js');
|
||||
|
||||
let logFd;
|
||||
|
||||
async function setupLogging() {
|
||||
if (process.env.BOX_ENV === 'test') return;
|
||||
|
||||
logFd = fs.openSync(paths.BOX_LOG_FILE, 'a');
|
||||
// we used to write using a stream before but it caches internally and there is no way to flush it when things crash
|
||||
process.stdout.write = process.stderr.write = function (...args) {
|
||||
const callback = typeof args[args.length-1] === 'function' ? args.pop() : function () {}; // callback is required for fs.write
|
||||
fs.write.apply(fs, [logFd, ...args, callback]);
|
||||
// prefix all output with a timestamp
|
||||
// debug() already prefixes and uses process.stderr NOT console.*
|
||||
['log', 'info', 'warn', 'debug', 'error'].forEach(function (log) {
|
||||
var orig = console[log];
|
||||
console[log] = function () {
|
||||
orig.apply(console, [new Date().toISOString()].concat(Array.prototype.slice.call(arguments)));
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
// this is also used as the 'uncaughtException' handler which can only have synchronous functions
|
||||
function exitSync(status) {
|
||||
if (status.error) fs.write(logFd, status.error.stack + '\n', function () {});
|
||||
fs.fsyncSync(logFd);
|
||||
fs.closeSync(logFd);
|
||||
process.exit(status.code);
|
||||
}
|
||||
require('supererror')({ splatchError: true });
|
||||
|
||||
async function startServers() {
|
||||
await setupLogging();
|
||||
await server.start(); // do this first since it also inits the database
|
||||
await proxyAuth.start();
|
||||
await ldap.start();
|
||||
let async = require('async'),
|
||||
constants = require('./src/constants.js'),
|
||||
dockerProxy = require('./src/dockerproxy.js'),
|
||||
ldap = require('./src/ldap.js'),
|
||||
server = require('./src/server.js');
|
||||
|
||||
const conf = await settings.getUserDirectoryConfig();
|
||||
if (conf.enabled) await userdirectory.start();
|
||||
}
|
||||
console.log();
|
||||
console.log('==========================================');
|
||||
console.log(` Cloudron ${constants.VERSION} `);
|
||||
console.log('==========================================');
|
||||
console.log();
|
||||
|
||||
async function main() {
|
||||
const [error] = await safe(startServers());
|
||||
if (error) return exitSync({ error: new Error(`Error starting server: ${JSON.stringify(error)}`), code: 1 });
|
||||
async.series([
|
||||
server.start,
|
||||
ldap.start,
|
||||
dockerProxy.start
|
||||
], function (error) {
|
||||
if (error) {
|
||||
console.error('Error starting server', error);
|
||||
process.exit(1);
|
||||
}
|
||||
console.log('Cloudron is up and running');
|
||||
});
|
||||
|
||||
// require this here so that logging handler is already setup
|
||||
const debug = require('debug')('box:box');
|
||||
var NOOP_CALLBACK = function () { };
|
||||
|
||||
process.on('SIGINT', async function () {
|
||||
debug('Received SIGINT. Shutting down.');
|
||||
process.on('SIGINT', function () {
|
||||
console.log('Received SIGINT. Shutting down.');
|
||||
|
||||
await proxyAuth.stop();
|
||||
await server.stop();
|
||||
await userdirectory.stop();
|
||||
await ldap.stop();
|
||||
setTimeout(process.exit.bind(process), 3000);
|
||||
});
|
||||
server.stop(NOOP_CALLBACK);
|
||||
ldap.stop(NOOP_CALLBACK);
|
||||
dockerProxy.stop(NOOP_CALLBACK);
|
||||
setTimeout(process.exit.bind(process), 3000);
|
||||
});
|
||||
|
||||
process.on('SIGTERM', async function () {
|
||||
debug('Received SIGTERM. Shutting down.');
|
||||
process.on('SIGTERM', function () {
|
||||
console.log('Received SIGTERM. Shutting down.');
|
||||
|
||||
await proxyAuth.stop();
|
||||
await server.stop();
|
||||
await userdirectory.stop();
|
||||
await ldap.stop();
|
||||
setTimeout(process.exit.bind(process), 3000);
|
||||
});
|
||||
|
||||
process.on('uncaughtException', (error) => exitSync({ error, code: 1 }));
|
||||
|
||||
console.log(`Cloudron is up and running. Logs are at ${paths.BOX_LOG_FILE}`); // this goes to journalctl
|
||||
}
|
||||
|
||||
main();
|
||||
server.stop(NOOP_CALLBACK);
|
||||
ldap.stop(NOOP_CALLBACK);
|
||||
dockerProxy.stop(NOOP_CALLBACK);
|
||||
setTimeout(process.exit.bind(process), 3000);
|
||||
});
|
||||
|
||||
@@ -2,21 +2,27 @@
|
||||
|
||||
'use strict';
|
||||
|
||||
const database = require('./src/database.js');
|
||||
var database = require('./src/database.js');
|
||||
|
||||
const crashNotifier = require('./src/crashnotifier.js');
|
||||
var crashNotifier = require('./src/crashnotifier.js');
|
||||
|
||||
// This is triggered by systemd with the crashed unit name as argument
|
||||
async function main() {
|
||||
function main() {
|
||||
if (process.argv.length !== 3) return console.error('Usage: crashnotifier.js <unitName>');
|
||||
|
||||
const unitName = process.argv[2];
|
||||
var unitName = process.argv[2];
|
||||
console.log('Started crash notifier for', unitName);
|
||||
|
||||
// eventlog api needs the db
|
||||
await database.initialize();
|
||||
database.initialize(function (error) {
|
||||
if (error) return console.error('Cannot connect to database. Unable to send crash log.', error);
|
||||
|
||||
await crashNotifier.sendFailureLogs(unitName);
|
||||
crashNotifier.sendFailureLogs(unitName, function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
process.exit();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
main();
|
||||
|
||||
@@ -12,6 +12,8 @@ exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM users WHERE admin=1', function (error, results) {
|
||||
if (error) return done(error);
|
||||
|
||||
console.dir(results);
|
||||
|
||||
async.eachSeries(results, function (r, next) {
|
||||
db.runSql('INSERT INTO groupMembers (groupId, userId) VALUES (?, ?)', [ ADMIN_GROUP_ID, r.id ], next);
|
||||
}, done);
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN resetTokenCreationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN resetTokenCreationTime', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,28 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
let async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps MODIFY mailboxDomain VARCHAR(128)', [], function (error) { // make it nullable
|
||||
if (error) console.error(error);
|
||||
|
||||
// clear mailboxName/Domain for apps that do not use mail addons
|
||||
db.all('SELECT * FROM apps', function (error, apps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(apps, function (app, iteratorDone) {
|
||||
var manifest = JSON.parse(app.manifestJson);
|
||||
if (manifest.addons['sendmail'] || manifest.addons['recvmail']) return iteratorDone();
|
||||
|
||||
db.runSql('UPDATE apps SET mailboxName=?, mailboxDomain=? WHERE id=?', [ null, null, app.id ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps MODIFY manifestJson VARCHAR(128) NOT NULL', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,17 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mailboxes ADD COLUMN membersOnly BOOLEAN DEFAULT 0', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback();
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN membersOnly', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes ADD COLUMN aliasDomain VARCHAR(128)'),
|
||||
function setAliasDomain(done) {
|
||||
db.all('SELECT * FROM mailboxes', function (error, mailboxes) {
|
||||
async.eachSeries(mailboxes, function (mailbox, iteratorDone) {
|
||||
if (!mailbox.aliasTarget) return iteratorDone();
|
||||
|
||||
db.runSql('UPDATE mailboxes SET aliasDomain=? WHERE name=? AND domain=?', [ mailbox.domain, mailbox.name, mailbox.domain ], iteratorDone);
|
||||
}, done);
|
||||
});
|
||||
},
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes ADD CONSTRAINT mailboxes_aliasDomain_constraint FOREIGN KEY(aliasDomain) REFERENCES mail(domain)'),
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes CHANGE aliasTarget aliasName VARCHAR(128)')
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes DROP FOREIGN KEY mailboxes_aliasDomain_constraint'),
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes DROP COLUMN aliasDomain'),
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes CHANGE aliasName aliasTarget VARCHAR(128)')
|
||||
], callback);
|
||||
};
|
||||
@@ -1,15 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN servicesConfigJson TEXT', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN servicesConfigJson', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,15 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN bindsJson TEXT', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN bindsJson', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,35 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const backups = require('../src/backups.js'),
|
||||
fs = require('fs');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="backup_config"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
var backupConfig = JSON.parse(results[0].value);
|
||||
if (backupConfig.key) {
|
||||
backupConfig.encryption = backups.generateEncryptionKeysSync(backupConfig.key);
|
||||
backups.cleanupCacheFilesSync();
|
||||
|
||||
fs.writeFileSync('/home/yellowtent/platformdata/BACKUP_PASSWORD',
|
||||
'This file contains your Cloudron backup password.\nBefore Cloudron v5.2, this was saved in the database.' +
|
||||
'From Cloudron 5.2, this password is not required anymore. We generate strong keys based off this password and use those keys to encrypt the backups.\n' +
|
||||
'This means that the password is only required at decryption/restore time.\n\n' +
|
||||
'This file can be safely removed and only exists for the off-chance that you do not remember your backup password.\n\n' +
|
||||
`Password: ${backupConfig.key}\n`,
|
||||
'utf8');
|
||||
|
||||
} else {
|
||||
backupConfig.encryption = null;
|
||||
}
|
||||
|
||||
delete backupConfig.key;
|
||||
|
||||
db.runSql('UPDATE settings SET value=? WHERE name="backup_config"', [ JSON.stringify(backupConfig) ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,15 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups CHANGE version packageVersion VARCHAR(128) NOT NULL', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups CHANGE packageVersion version VARCHAR(128) NOT NULL', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,24 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups ADD COLUMN encryptionVersion INTEGER', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.all('SELECT value FROM settings WHERE name="backup_config"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
var backupConfig = JSON.parse(results[0].value);
|
||||
if (!backupConfig.encryption) return callback(null);
|
||||
|
||||
// mark old encrypted backups as v1
|
||||
db.runSql('UPDATE backups SET encryptionVersion=1', callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN encryptionVersion', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,18 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="backup_config"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
var backupConfig = JSON.parse(results[0].value);
|
||||
backupConfig.retentionPolicy = { keepWithinSecs: backupConfig.retentionSecs };
|
||||
delete backupConfig.retentionSecs;
|
||||
|
||||
// mark old encrypted backups as v1
|
||||
db.runSql('UPDATE settings SET value=? WHERE name="backup_config"', [ JSON.stringify(backupConfig) ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,18 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="backup_config"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
var backupConfig = JSON.parse(results[0].value);
|
||||
if (backupConfig.provider !== 'minio' && backupConfig.provider !== 's3-v4-compat') return callback();
|
||||
|
||||
backupConfig.s3ForcePathStyle = true; // usually minio is self-hosted. s3 v4 compat, we don't know
|
||||
|
||||
db.runSql('UPDATE settings SET value=? WHERE name="backup_config"', [ JSON.stringify(backupConfig) ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,17 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
// http://stackoverflow.com/questions/386294/what-is-the-maximum-length-of-a-valid-email-address
|
||||
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE appPasswords DROP INDEX name'),
|
||||
db.runSql.bind(db, 'ALTER TABLE appPasswords ADD CONSTRAINT appPasswords_name_userId_identifier UNIQUE (name, userId, identifier)'),
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,17 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE userGroups ADD COLUMN source VARCHAR(128) DEFAULT ""', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback();
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE userGroups DROP COLUMN source', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups ADD COLUMN identifier VARCHAR(128)', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
|
||||
db.all('SELECT * FROM backups', function (error, backups) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(backups, function (backup, next) {
|
||||
let identifier = 'unknown';
|
||||
|
||||
if (backup.type === 'box') {
|
||||
identifier = 'box';
|
||||
} else {
|
||||
const match = backup.id.match(/app_(.+?)_.+/);
|
||||
if (match) identifier = match[1];
|
||||
}
|
||||
|
||||
db.runSql('UPDATE backups SET identifier=? WHERE id=?', [ identifier, backup.id ], next);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('ALTER TABLE backups MODIFY COLUMN identifier VARCHAR(128) NOT NULL', callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN identifier', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,16 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP', function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
db.runSql('ALTER TABLE users DROP COLUMN modifiedAt', callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN ts', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,29 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="backup_config"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
var backupConfig = JSON.parse(results[0].value);
|
||||
if (backupConfig.intervalSecs === 6 * 60 * 60) { // every 6 hours
|
||||
backupConfig.schedulePattern = '00 00 5,11,17,23 * * *';
|
||||
} else if (backupConfig.intervalSecs === 12 * 60 * 60) { // every 12 hours
|
||||
backupConfig.schedulePattern = '00 00 5,17 * * *';
|
||||
} else if (backupConfig.intervalSecs === 24 * 60 * 60) { // every day
|
||||
backupConfig.schedulePattern = '00 00 23 * * *';
|
||||
} else if (backupConfig.intervalSecs === 3 * 24 * 60 * 60) { // every 3 days (based on day)
|
||||
backupConfig.schedulePattern = '00 00 23 * * 1,3,5';
|
||||
} else if (backupConfig.intervalSecs === 7 * 24 * 60 * 60) { // every week (saturday)
|
||||
backupConfig.schedulePattern = '00 00 23 * * 6';
|
||||
} else { // default to everyday
|
||||
backupConfig.schedulePattern = '00 00 23 * * *';
|
||||
}
|
||||
|
||||
delete backupConfig.intervalSecs;
|
||||
db.runSql('UPDATE settings SET value=? WHERE name="backup_config"', [ JSON.stringify(backupConfig) ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,23 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="admin_domain"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
const adminDomain = results[0].value;
|
||||
|
||||
async.series([
|
||||
db.runSql.bind(db, 'INSERT INTO settings (name, value) VALUES (?, ?)', [ 'mail_domain', adminDomain ]),
|
||||
db.runSql.bind(db, 'INSERT INTO settings (name, value) VALUES (?, ?)', [ 'mail_fqdn', `my.${adminDomain}` ])
|
||||
], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'DELETE FROM settings WHERE name="mail_domain"'),
|
||||
db.runSql.bind(db, 'DELETE FROM settings WHERE name="mail_fqdn"'),
|
||||
], callback);
|
||||
};
|
||||
@@ -1,22 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('SELECT * FROM settings WHERE name=?', ['app_autoupdate_pattern'], function (error, results) {
|
||||
if (error || results.length === 0) return callback(error); // will use defaults from box code
|
||||
|
||||
var updatePattern = results[0].value; // use app auto update patter for the box as well
|
||||
|
||||
async.series([
|
||||
db.runSql.bind(db, 'START TRANSACTION;'),
|
||||
db.runSql.bind(db, 'DELETE FROM settings WHERE name=? OR name=?', ['app_autoupdate_pattern', 'box_autoupdate_pattern']),
|
||||
db.runSql.bind(db, 'INSERT settings (name, value) VALUES(?, ?)', ['autoupdate_pattern', updatePattern]),
|
||||
db.runSql.bind(db, 'COMMIT')
|
||||
], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,15 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mail ADD COLUMN bannerJson TEXT', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mail DROP COLUMN bannerJson', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,27 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const OLD_FIREWALL_CONFIG_JSON = '/home/yellowtent/boxdata/firewall-config.json';
|
||||
const PORTS_FILE = '/home/yellowtent/boxdata/firewall/ports.json';
|
||||
const BLOCKLIST_FILE = '/home/yellowtent/boxdata/firewall/blocklist.txt';
|
||||
|
||||
const fs = require('fs');
|
||||
|
||||
exports.up = function (db, callback) {
|
||||
if (!fs.existsSync(OLD_FIREWALL_CONFIG_JSON)) return callback();
|
||||
|
||||
try {
|
||||
const dataJson = fs.readFileSync(OLD_FIREWALL_CONFIG_JSON, 'utf8');
|
||||
const data = JSON.parse(dataJson);
|
||||
fs.writeFileSync(BLOCKLIST_FILE, data.blocklist.join('\n') + '\n', 'utf8');
|
||||
fs.writeFileSync(PORTS_FILE, JSON.stringify({ allowed_tcp_ports: data.allowed_tcp_ports }, null, 4), 'utf8');
|
||||
fs.unlinkSync(OLD_FIREWALL_CONFIG_JSON);
|
||||
} catch (error) {
|
||||
console.log('Error migrating old firewall config', error);
|
||||
}
|
||||
|
||||
callback();
|
||||
};
|
||||
|
||||
exports.down = function (db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,40 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var cmd1 = 'CREATE TABLE volumes(' +
|
||||
'id VARCHAR(128) NOT NULL UNIQUE,' +
|
||||
'name VARCHAR(256) NOT NULL UNIQUE,' +
|
||||
'hostPath VARCHAR(1024) NOT NULL UNIQUE,' +
|
||||
'creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,' +
|
||||
'PRIMARY KEY (id)) CHARACTER SET utf8 COLLATE utf8_bin';
|
||||
|
||||
var cmd2 = 'CREATE TABLE appMounts(' +
|
||||
'appId VARCHAR(128) NOT NULL,' +
|
||||
'volumeId VARCHAR(128) NOT NULL,' +
|
||||
'readOnly BOOLEAN DEFAULT 1,' +
|
||||
'UNIQUE KEY appMounts_appId_volumeId (appId, volumeId),' +
|
||||
'FOREIGN KEY(appId) REFERENCES apps(id),' +
|
||||
'FOREIGN KEY(volumeId) REFERENCES volumes(id)) CHARACTER SET utf8 COLLATE utf8_bin;';
|
||||
|
||||
db.runSql(cmd1, function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
db.runSql(cmd2, function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN bindsJson', callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('DROP TABLE appMounts', function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
db.runSql('DROP TABLE volumes', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN proxyAuth BOOLEAN DEFAULT 0', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN proxyAuth', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes ADD COLUMN ownerType VARCHAR(16)'),
|
||||
db.runSql.bind(db, 'UPDATE mailboxes SET ownerType=?', [ 'user' ]),
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes MODIFY ownerType VARCHAR(16) NOT NULL'),
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mailboxes DROP COLUMN ownerType', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,13 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE apps DROP COLUMN httpPort')
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,29 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
iputils = require('../src/iputils.js');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN containerIp VARCHAR(16) UNIQUE', function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
let baseIp = iputils.intFromIp('172.18.16.0');
|
||||
|
||||
db.all('SELECT * FROM apps', function (error, apps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(apps, function (app, iteratorDone) {
|
||||
const nextIp = iputils.ipFromInt(++baseIp);
|
||||
db.runSql('UPDATE apps SET containerIp=? WHERE id=?', [ nextIp, app.id ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN containerIp', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM settings WHERE name=?', ['platform_config'], function (error, results) {
|
||||
let value;
|
||||
if (error || results.length === 0) {
|
||||
value = { sftp: { requireAdmin: true } };
|
||||
} else {
|
||||
value = JSON.parse(results[0].value);
|
||||
if (!value.sftp) value.sftp = {};
|
||||
value.sftp.requireAdmin = true;
|
||||
}
|
||||
|
||||
// existing installations may not even have the key. so use REPLACE instead of UPDATE
|
||||
db.runSql('REPLACE INTO settings (name, value) VALUES (?, ?)', [ 'platform_config', JSON.stringify(value) ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,18 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'CREATE TABLE groupMembers_copy(groupId VARCHAR(128) NOT NULL, userId VARCHAR(128) NOT NULL, FOREIGN KEY(groupId) REFERENCES userGroups(id), FOREIGN KEY(userId) REFERENCES users(id), UNIQUE (groupId, userId)) CHARACTER SET utf8 COLLATE utf8_bin'), // In mysql CREATE TABLE.. LIKE does not copy indexes
|
||||
db.runSql.bind(db, 'INSERT INTO groupMembers_copy SELECT * FROM groupMembers GROUP BY groupId, userId'),
|
||||
db.runSql.bind(db, 'DROP TABLE groupMembers'),
|
||||
db.runSql.bind(db, 'ALTER TABLE groupMembers_copy RENAME TO groupMembers')
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE groupMembers DROP INDEX groupMembers_member'),
|
||||
], callback);
|
||||
};
|
||||
@@ -1,51 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
safe = require('safetydance');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE domains ADD COLUMN wellKnownJson TEXT', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// keep the paths around, so that we don't need to trigger a re-configure. the old nginx config will use the paths
|
||||
// the new one will proxy calls to the box code
|
||||
const WELLKNOWN_DIR = '/home/yellowtent/boxdata/well-known';
|
||||
const output = safe.child_process.execSync('find . -type f -printf "%P\n"', { cwd: WELLKNOWN_DIR, encoding: 'utf8' });
|
||||
if (!output) return callback();
|
||||
const paths = output.trim().split('\n');
|
||||
if (paths.length === 0) return callback(); // user didn't configure any well-known
|
||||
|
||||
let wellKnown = {};
|
||||
for (let path of paths) {
|
||||
const fqdn = path.split('/', 1)[0];
|
||||
const loc = path.slice(fqdn.length+1);
|
||||
const doc = safe.fs.readFileSync(`${WELLKNOWN_DIR}/${path}`, { encoding: 'utf8' });
|
||||
if (!doc) continue;
|
||||
|
||||
wellKnown[fqdn] = {};
|
||||
wellKnown[fqdn][loc] = doc;
|
||||
}
|
||||
|
||||
console.log('Migrating well-known', JSON.stringify(wellKnown, null, 4));
|
||||
|
||||
async.eachSeries(Object.keys(wellKnown), function (fqdn, iteratorDone) {
|
||||
db.runSql('UPDATE domains SET wellKnownJson=? WHERE domain=?', [ JSON.stringify(wellKnown[fqdn]), fqdn ], function (error, result) {
|
||||
if (error) {
|
||||
console.error(error); // maybe the domain does not exist anymore
|
||||
} else if (result.affectedRows === 0) {
|
||||
console.log(`Could not migrate wellknown as domain ${fqdn} is missing`);
|
||||
}
|
||||
iteratorDone();
|
||||
});
|
||||
}, function (error) {
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE domains DROP COLUMN wellKnownJson', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,23 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM settings WHERE name=?', ['platform_config'], function (error, results) {
|
||||
if (error || results.length === 0) return callback(null);
|
||||
|
||||
let value = JSON.parse(results[0].value);
|
||||
|
||||
for (const serviceName of Object.keys(value)) {
|
||||
const service = value[serviceName];
|
||||
if (!service.memorySwap) continue;
|
||||
service.memoryLimit = service.memorySwap;
|
||||
delete service.memorySwap;
|
||||
delete service.memory;
|
||||
}
|
||||
|
||||
db.runSql('UPDATE settings SET value=? WHERE name=?', [ JSON.stringify(value), 'platform_config' ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,28 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM apps', function (error, apps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(apps, function (app, iteratorDone) {
|
||||
if (!app.servicesConfigJson) return iteratorDone();
|
||||
|
||||
let servicesConfig = JSON.parse(app.servicesConfigJson);
|
||||
for (const serviceName of Object.keys(servicesConfig)) {
|
||||
const service = servicesConfig[serviceName];
|
||||
if (!service.memorySwap) continue;
|
||||
service.memoryLimit = service.memorySwap;
|
||||
delete service.memorySwap;
|
||||
delete service.memory;
|
||||
}
|
||||
|
||||
db.runSql('UPDATE apps SET servicesConfigJson=? WHERE id=?', [ JSON.stringify(servicesConfig), app.id ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,9 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('UPDATE settings SET name=? WHERE name=?', [ 'services_config', 'platform_config' ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('UPDATE settings SET name=? WHERE name=?', [ 'platform_config', 'services_config' ], callback);
|
||||
};
|
||||
@@ -1,10 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
/* this contained an invalid migration of OVH URLs from s3 subdomain to storage subdomain. See https://forum.cloudron.io/topic/4584/issue-with-backups-listings-and-saving-backup-config-in-6-2 */
|
||||
callback();
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,16 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="registry_config"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
var registryConfig = JSON.parse(results[0].value);
|
||||
if (!registryConfig.provider) registryConfig.provider = 'other';
|
||||
|
||||
db.runSql('UPDATE settings SET value=? WHERE name="registry_config"', [ JSON.stringify(registryConfig) ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,15 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE tokens ADD COLUMN lastUsedTime TIMESTAMP NULL', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE tokens DROP COLUMN lastUsedTime', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,16 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN enableMailbox BOOLEAN DEFAULT 1', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN enableMailbox', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mailboxes ADD COLUMN active BOOLEAN DEFAULT 1', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback();
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mailboxes DROP COLUMN active', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
fs = require('fs'),
|
||||
path = require('path');
|
||||
|
||||
const AVATAR_DIR = '/home/yellowtent/boxdata/profileicons';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN avatar MEDIUMBLOB', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
fs.readdir(AVATAR_DIR, function (error, filenames) {
|
||||
if (error && error.code === 'ENOENT') return callback();
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(filenames, function (filename, iteratorCallback) {
|
||||
const avatar = fs.readFileSync(path.join(AVATAR_DIR, filename));
|
||||
const userId = filename;
|
||||
|
||||
db.runSql('UPDATE users SET avatar=? WHERE id=?', [ avatar, userId ], iteratorCallback);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
fs.rmdir(AVATAR_DIR, { recursive: true }, callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN avatar', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE settings ADD COLUMN valueBlob MEDIUMBLOB', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
fs.readFile('/home/yellowtent/boxdata/avatar.png', function (error, avatar) {
|
||||
if (error && error.code === 'ENOENT') return callback();
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('INSERT INTO settings (name, valueBlob) VALUES (?, ?)', [ 'cloudron_avatar', avatar ], callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,15 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN loginLocationsJson TEXT', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN loginLocationsJson', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,42 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
fs = require('fs'),
|
||||
path = require('path');
|
||||
|
||||
const APPICONS_DIR = '/home/yellowtent/boxdata/appicons';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE apps ADD COLUMN icon MEDIUMBLOB'),
|
||||
db.runSql.bind(db, 'ALTER TABLE apps ADD COLUMN appStoreIcon MEDIUMBLOB'),
|
||||
function migrateIcons(next) {
|
||||
fs.readdir(APPICONS_DIR, function (error, filenames) {
|
||||
if (error && error.code === 'ENOENT') return next();
|
||||
if (error) return next(error);
|
||||
|
||||
async.eachSeries(filenames, function (filename, iteratorCallback) {
|
||||
const icon = fs.readFileSync(path.join(APPICONS_DIR, filename));
|
||||
const appId = filename.split('.')[0];
|
||||
|
||||
if (filename.endsWith('.user.png')) {
|
||||
db.runSql('UPDATE apps SET icon=? WHERE id=?', [ icon, appId ], iteratorCallback);
|
||||
} else {
|
||||
db.runSql('UPDATE apps SET appStoreIcon=? WHERE id=?', [ icon, appId ], iteratorCallback);
|
||||
}
|
||||
}, function (error) {
|
||||
if (error) return next(error);
|
||||
|
||||
fs.rmdir(APPICONS_DIR, { recursive: true }, next);
|
||||
});
|
||||
});
|
||||
}
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE apps DROP COLUMN icon'),
|
||||
db.runSql.bind(db, 'ALTER TABLE apps DROP COLUMN appStoreIcon'),
|
||||
], callback);
|
||||
};
|
||||
@@ -1,15 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps MODIFY ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps MODIFY ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,20 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
const cmd = 'CREATE TABLE blobs(' +
|
||||
'id VARCHAR(128) NOT NULL UNIQUE,' +
|
||||
'value MEDIUMBLOB,' +
|
||||
'PRIMARY KEY (id)) CHARACTER SET utf8 COLLATE utf8_bin';
|
||||
|
||||
db.runSql(cmd, function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('DROP TABLE blobs', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,49 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
fs = require('fs'),
|
||||
safe = require('safetydance');
|
||||
|
||||
const BOX_DATA_DIR = '/home/yellowtent/boxdata';
|
||||
const PLATFORM_DATA_DIR = '/home/yellowtent/platformdata';
|
||||
|
||||
exports.up = function (db, callback) {
|
||||
let funcs = [];
|
||||
|
||||
const acmeKey = safe.fs.readFileSync(`${BOX_DATA_DIR}/acme/acme.key`);
|
||||
if (acmeKey) {
|
||||
funcs.push(db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?)', [ 'acme_account_key', acmeKey ]));
|
||||
funcs.push(fs.rmdir.bind(fs, `${BOX_DATA_DIR}/acme`, { recursive: true }));
|
||||
}
|
||||
const dhparams = safe.fs.readFileSync(`${BOX_DATA_DIR}/dhparams.pem`);
|
||||
if (dhparams) {
|
||||
safe.fs.writeFileSync(`${PLATFORM_DATA_DIR}/dhparams.pem`, dhparams);
|
||||
funcs.push(db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?)', [ 'dhparams', dhparams ]));
|
||||
// leave the dhparms here for the moment because startup code regenerates box nginx config and reloads nginx. at that point,
|
||||
// nginx config of apps has not been re-generated yet and the reload fails. post 6.3, this file can be removed in start.sh
|
||||
// funcs.push(fs.unlink.bind(fs, `${BOX_DATA_DIR}/dhparams.pem`));
|
||||
}
|
||||
const turnSecret = safe.fs.readFileSync(`${BOX_DATA_DIR}/addon-turn-secret`);
|
||||
if (turnSecret) {
|
||||
funcs.push(db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?)', [ 'addon_turn_secret', turnSecret ]));
|
||||
funcs.push(fs.unlink.bind(fs, `${BOX_DATA_DIR}/addon-turn-secret`));
|
||||
}
|
||||
|
||||
// sftp keys get moved to platformdata in start.sh
|
||||
const sftpPublicKey = safe.fs.readFileSync(`${BOX_DATA_DIR}/sftp/ssh/ssh_host_rsa_key.pub`);
|
||||
const sftpPrivateKey = safe.fs.readFileSync(`${BOX_DATA_DIR}/sftp/ssh/ssh_host_rsa_key`);
|
||||
if (sftpPublicKey) {
|
||||
safe.fs.writeFileSync(`${PLATFORM_DATA_DIR}/sftp/ssh/ssh_host_rsa_key.pub`, sftpPublicKey);
|
||||
safe.fs.writeFileSync(`${PLATFORM_DATA_DIR}/sftp/ssh/ssh_host_rsa_key`, sftpPrivateKey);
|
||||
safe.fs.chmodSync(`${PLATFORM_DATA_DIR}/sftp/ssh/ssh_host_rsa_key`, 0o600);
|
||||
funcs.push(db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?)', [ 'sftp_public_key', sftpPublicKey ]));
|
||||
funcs.push(db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?)', [ 'sftp_private_key', sftpPrivateKey ]));
|
||||
funcs.push(fs.rmdir.bind(fs, `${BOX_DATA_DIR}/sftp`, { recursive: true }));
|
||||
}
|
||||
|
||||
async.series(funcs, callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,31 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
fs = require('fs'),
|
||||
safe = require('safetydance');
|
||||
|
||||
const BOX_DATA_DIR = '/home/yellowtent/boxdata';
|
||||
const PLATFORM_DATA_DIR = '/home/yellowtent/platformdata';
|
||||
|
||||
exports.up = function (db, callback) {
|
||||
if (!fs.existsSync(`${BOX_DATA_DIR}/firewall`)) return callback();
|
||||
|
||||
const ports = safe.fs.readFileSync(`${BOX_DATA_DIR}/firewall/ports.json`);
|
||||
if (ports) {
|
||||
safe.fs.writeFileSync(`${PLATFORM_DATA_DIR}/firewall/ports.json`, ports);
|
||||
}
|
||||
|
||||
const blocklist = safe.fs.readFileSync(`${BOX_DATA_DIR}/firewall/blocklist.txt`);
|
||||
async.series([
|
||||
(next) => {
|
||||
if (!blocklist) return next();
|
||||
db.runSql('INSERT INTO settings (name, valueBlob) VALUES (?, ?)', [ 'firewall_blocklist', blocklist ], next);
|
||||
},
|
||||
fs.writeFile.bind(fs, `${PLATFORM_DATA_DIR}/firewall/blocklist.txt`, blocklist || ''),
|
||||
fs.rmdir.bind(fs, `${BOX_DATA_DIR}/firewall`, { recursive: true })
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,38 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
safe = require('safetydance');
|
||||
|
||||
const CERTS_DIR = '/home/yellowtent/boxdata/certs',
|
||||
PLATFORM_CERTS_DIR = '/home/yellowtent/platformdata/nginx/cert';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE domains ADD COLUMN fallbackCertificateJson MEDIUMTEXT', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.all('SELECT * FROM domains', [ ], function (error, domains) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(domains, function (domain, iteratorDone) {
|
||||
// b94dbf5fa33a6d68d784571721ff44348c2d88aa seems to have moved certs from platformdata to boxdata
|
||||
let cert = safe.fs.readFileSync(`${CERTS_DIR}/${domain.domain}.host.cert`, 'utf8');
|
||||
let key = safe.fs.readFileSync(`${CERTS_DIR}/${domain.domain}.host.key`, 'utf8');
|
||||
|
||||
if (!cert) {
|
||||
cert = safe.fs.readFileSync(`${PLATFORM_CERTS_DIR}/${domain.domain}.host.cert`, 'utf8');
|
||||
key = safe.fs.readFileSync(`${PLATFORM_CERTS_DIR}/${domain.domain}.host.key`, 'utf8');
|
||||
}
|
||||
|
||||
const fallbackCertificate = { cert, key };
|
||||
|
||||
db.runSql('UPDATE domains SET fallbackCertificateJson=? WHERE domain=?', [ JSON.stringify(fallbackCertificate), domain.domain ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.run(db, 'ALTER TABLE domains DROP COLUMN fallbackCertificateJson')
|
||||
], callback);
|
||||
};
|
||||
@@ -1,34 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
fs = require('fs'),
|
||||
safe = require('safetydance');
|
||||
|
||||
const CERTS_DIR = '/home/yellowtent/boxdata/certs';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE subdomains ADD COLUMN certificateJson MEDIUMTEXT', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.all('SELECT * FROM subdomains', [ ], function (error, subdomains) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(subdomains, function (subdomain, iteratorDone) {
|
||||
const cert = safe.fs.readFileSync(`${CERTS_DIR}/${subdomain.subdomain}.${subdomain.domain}.user.cert`, 'utf8');
|
||||
const key = safe.fs.readFileSync(`${CERTS_DIR}/${subdomain.subdomain}.${subdomain.domain}.user.key`, 'utf8');
|
||||
|
||||
if (!cert || !key) return iteratorDone();
|
||||
|
||||
const certificate = { cert, key };
|
||||
|
||||
db.runSql('UPDATE subdomains SET certificateJson=? WHERE domain=? AND subdomain=?', [ JSON.stringify(certificate), subdomain.domain, subdomain.subdomain ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.run(db, 'ALTER TABLE subdomains DROP COLUMN certificateJson')
|
||||
], callback);
|
||||
};
|
||||
@@ -1,52 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
child_process = require('child_process'),
|
||||
fs = require('fs'),
|
||||
path = require('path'),
|
||||
safe = require('safetydance');
|
||||
|
||||
const OLD_CERTS_DIR = '/home/yellowtent/boxdata/certs';
|
||||
const NEW_CERTS_DIR = '/home/yellowtent/platformdata/nginx/cert';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
fs.readdir(OLD_CERTS_DIR, function (error, filenames) {
|
||||
if (error && error.code === 'ENOENT') return callback();
|
||||
if (error) return callback(error);
|
||||
|
||||
filenames = filenames.filter(f => f.endsWith('.key') && !f.endsWith('.host.key') && !f.endsWith('.user.key')); // ignore fallback and user keys
|
||||
|
||||
async.eachSeries(filenames, function (filename, iteratorCallback) {
|
||||
const privateKeyFile = filename;
|
||||
const privateKey = fs.readFileSync(path.join(OLD_CERTS_DIR, filename));
|
||||
const certificateFile = filename.replace(/\.key$/, '.cert');
|
||||
const certificate = safe.fs.readFileSync(path.join(OLD_CERTS_DIR, certificateFile));
|
||||
if (!certificate) {
|
||||
console.log(`${certificateFile} is missing. skipping migration`);
|
||||
return iteratorCallback();
|
||||
}
|
||||
const csrFile = filename.replace(/\.key$/, '.csr');
|
||||
const csr = safe.fs.readFileSync(path.join(OLD_CERTS_DIR, csrFile));
|
||||
if (!csr) {
|
||||
console.log(`${csrFile} is missing. skipping migration`);
|
||||
return iteratorCallback();
|
||||
}
|
||||
|
||||
async.series([
|
||||
db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?) ON DUPLICATE KEY UPDATE value=VALUES(value)', `cert-${privateKeyFile}`, privateKey),
|
||||
db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?) ON DUPLICATE KEY UPDATE value=VALUES(value)', `cert-${certificateFile}`, certificate),
|
||||
db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?) ON DUPLICATE KEY UPDATE value=VALUES(value)', `cert-${csrFile}`, csr),
|
||||
], iteratorCallback);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
child_process.execSync(`cp ${OLD_CERTS_DIR}/* ${NEW_CERTS_DIR}`); // this way we copy the non-migrated ones like .host, .user etc as well
|
||||
fs.rmdir(OLD_CERTS_DIR, { recursive: true }, callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE volumes ADD COLUMN mountType VARCHAR(16) DEFAULT "noop"'),
|
||||
db.runSql.bind(db, 'ALTER TABLE volumes ADD COLUMN mountOptionsJson TEXT')
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE volumes DROP COLUMN mountType'),
|
||||
db.runSql.bind(db, 'ALTER TABLE volumes DROP COLUMN mountOptionsJson')
|
||||
], callback);
|
||||
};
|
||||
@@ -1,21 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE backups ADD INDEX creationTime_index (creationTime)'),
|
||||
db.runSql.bind(db, 'ALTER TABLE eventlog ADD INDEX creationTime_index (creationTime)'),
|
||||
db.runSql.bind(db, 'ALTER TABLE notifications ADD INDEX creationTime_index (creationTime)'),
|
||||
db.runSql.bind(db, 'ALTER TABLE tasks ADD INDEX creationTime_index (creationTime)'),
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE backups DROP INDEX creationTime_index'),
|
||||
db.runSql.bind(db, 'ALTER TABLE eventlog DROP INDEX creationTime_index'),
|
||||
db.runSql.bind(db, 'ALTER TABLE notifications DROP INDEX creationTime_index'),
|
||||
db.runSql.bind(db, 'ALTER TABLE tasks DROP INDEX creationTime_index'),
|
||||
], callback);
|
||||
};
|
||||
@@ -1,33 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('ALTER TABLE users ADD INDEX creationTime_index (creationTime)', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.all('SELECT id, createdAt FROM users', function (error, results) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(results, function (r, iteratorDone) {
|
||||
const creationTime = new Date(r.createdAt);
|
||||
db.runSql('UPDATE users SET creationTime=? WHERE id=?', [ creationTime, r.id ], iteratorDone);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('ALTER TABLE users DROP COLUMN createdAt', callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN creationTime', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,27 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="backup_config"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
const backupConfig = JSON.parse(results[0].value);
|
||||
if (backupConfig.provider === 'sshfs' || backupConfig.provider === 'cifs' || backupConfig.provider === 'nfs' || backupConfig.externalDisk) {
|
||||
backupConfig.chown = backupConfig.provider === 'nfs' || backupConfig.provider === 'sshfs' || backupConfig.externalDisk;
|
||||
backupConfig.preserveAttributes = !!backupConfig.externalDisk;
|
||||
backupConfig.provider = 'mountpoint';
|
||||
if (backupConfig.externalDisk) {
|
||||
backupConfig.mountPoint = backupConfig.backupFolder;
|
||||
backupConfig.prefix = '';
|
||||
delete backupConfig.backupFolder;
|
||||
delete backupConfig.externalDisk;
|
||||
}
|
||||
db.runSql('UPDATE settings SET value=? WHERE name="backup_config"', [JSON.stringify(backupConfig)], callback);
|
||||
} else {
|
||||
callback();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,13 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE notifications DROP COLUMN userId', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('DELETE FROM notifications', callback); // just clear notifications table
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE notifications ADD COLUMN userId VARCHAR(128) NOT NULL', callback);
|
||||
};
|
||||
@@ -1,26 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
safe = require('safetydance');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM volumes', function (error, volumes) {
|
||||
if (error || volumes.length === 0) return callback(error);
|
||||
|
||||
async.eachSeries(volumes, function (volume, iteratorDone) {
|
||||
if (volume.mountType !== 'noop') return iteratorDone();
|
||||
|
||||
let mountType;
|
||||
if (safe.child_process.execSync(`mountpoint -q -- ${volume.hostPath}`)) {
|
||||
mountType = 'mountpoint';
|
||||
} else {
|
||||
mountType = 'filesystem';
|
||||
}
|
||||
db.runSql('UPDATE volumes SET mountType=? WHERE id=?', [ mountType, volume.id ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,13 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('UPDATE users SET avatar="gravatar" WHERE avatar IS NULL', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('ALTER TABLE users MODIFY avatar MEDIUMBLOB NOT NULL', callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users MODIFY avatar MEDIUMBLOB', callback);
|
||||
};
|
||||
@@ -1,30 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
safe = require('safetydance');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * from domains', [], function (error, results) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(results, function (r, iteratorDone) {
|
||||
if (!r.wellKnownJson) return iteratorDone();
|
||||
|
||||
const wellKnown = safe.JSON.parse(r.wellKnownJson);
|
||||
if (!wellKnown || !wellKnown['matrix/server']) return iteratorDone();
|
||||
const matrixHostname = JSON.parse(wellKnown['matrix/server'])['m.server'];
|
||||
|
||||
wellKnown['matrix/client'] = JSON.stringify({
|
||||
'm.homeserver': {
|
||||
'base_url': 'https://' + matrixHostname
|
||||
}
|
||||
});
|
||||
|
||||
db.runSql('UPDATE domains SET wellKnownJson=? WHERE domain=?', [ JSON.stringify(wellKnown), r.domain ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,15 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE appAddonConfigs MODIFY value TEXT NOT NULL', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE appAddonConfigs MODIFY value VARCHAR(512)', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,15 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users MODIFY loginLocationsJson MEDIUMTEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users MODIFY loginLocationsJson TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,9 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN operatorsJson TEXT', callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN operatorsJson', callback);
|
||||
};
|
||||
@@ -1,9 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN crontab TEXT', callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN crontab', callback);
|
||||
};
|
||||
@@ -1,9 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN inviteToken VARCHAR(128) DEFAULT ""', callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN inviteToken', callback);
|
||||
};
|
||||
@@ -1,19 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE apps ADD COLUMN enableInbox BOOLEAN DEFAULT 0'),
|
||||
db.runSql.bind(db, 'ALTER TABLE apps ADD COLUMN inboxName VARCHAR(128)'),
|
||||
db.runSql.bind(db, 'ALTER TABLE apps ADD COLUMN inboxDomain VARCHAR(128)'),
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE apps DROP COLUMN enableInbox'),
|
||||
db.runSql.bind(db, 'ALTER TABLE apps DROP COLUMN inboxName'),
|
||||
db.runSql.bind(db, 'ALTER TABLE apps DROP COLUMN inboxDomain'),
|
||||
], callback);
|
||||
};
|
||||
@@ -1,35 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
reverseProxy = require('../src/reverseproxy.js'),
|
||||
safe = require('safetydance');
|
||||
|
||||
const NGINX_CERT_DIR = '/home/yellowtent/platformdata/nginx/cert';
|
||||
|
||||
// ensure fallbackCertificate of domains are present in database and the cert dir. it seems a bad migration lost them.
|
||||
// https://forum.cloudron.io/topic/5683/data-argument-must-be-of-type-received-null-error-during-restore-process
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM domains', [ ], function (error, domains) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// this code is br0ken since async 3.x since async functions won't get iteratorDone anymore
|
||||
// no point fixing this migration though since it won't run again in old cloudrons. and in new cloudron domains will be empty
|
||||
async.eachSeries(domains, async function (domain, iteratorDone) {
|
||||
let fallbackCertificate = safe.JSON.parse(domain.fallbackCertificateJson);
|
||||
if (!fallbackCertificate || !fallbackCertificate.cert || !fallbackCertificate.key) {
|
||||
let error;
|
||||
[error, fallbackCertificate] = await safe(reverseProxy.generateFallbackCertificate(domain.domain));
|
||||
if (error) return iteratorDone(error);
|
||||
}
|
||||
|
||||
if (!safe.fs.writeFileSync(`${NGINX_CERT_DIR}/${domain.domain}.host.cert`, fallbackCertificate.cert, 'utf8')) return iteratorDone(safe.error);
|
||||
if (!safe.fs.writeFileSync(`${NGINX_CERT_DIR}/${domain.domain}.host.key`, fallbackCertificate.key, 'utf8')) return iteratorDone(safe.error);
|
||||
|
||||
db.runSql('UPDATE domains SET fallbackCertificateJson=? WHERE domain=?', [ JSON.stringify(fallbackCertificate), domain.domain ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,16 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mailboxes ADD COLUMN enablePop3 BOOLEAN DEFAULT 0', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mailboxes DROP COLUMN enablePop3', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
fs = require('fs'),
|
||||
path = require('path'),
|
||||
safe = require('safetydance');
|
||||
|
||||
const MAIL_DATA_DIR = '/home/yellowtent/boxdata/mail';
|
||||
const DKIM_DIR = `${MAIL_DATA_DIR}/dkim`;
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mail ADD COLUMN dkimKeyJson MEDIUMTEXT', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
fs.readdir(DKIM_DIR, function (error, filenames) {
|
||||
if (error && error.code === 'ENOENT') return callback();
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(filenames, function (filename, iteratorCallback) {
|
||||
const domain = filename;
|
||||
const publicKey = safe.fs.readFileSync(path.join(DKIM_DIR, domain, 'public'), 'utf8');
|
||||
const privateKey = safe.fs.readFileSync(path.join(DKIM_DIR, domain, 'private'), 'utf8');
|
||||
if (!publicKey || !privateKey) return iteratorCallback();
|
||||
|
||||
const dkimKey = {
|
||||
publicKey,
|
||||
privateKey
|
||||
};
|
||||
|
||||
db.runSql('UPDATE mail SET dkimKeyJson=? WHERE domain=?', [ JSON.stringify(dkimKey), domain ], iteratorCallback);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
fs.rmdir(DKIM_DIR, { recursive: true }, callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.run(db, 'ALTER TABLE mail DROP COLUMN dkimKeyJson')
|
||||
], callback);
|
||||
};
|
||||
@@ -1,9 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('DELETE FROM blobs WHERE id=?', [ 'dhparams' ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,17 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE eventlog CHANGE source sourceJson TEXT', []),
|
||||
db.runSql.bind(db, 'ALTER TABLE eventlog CHANGE data dataJson TEXT', []),
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE eventlog CHANGE sourceJson source TEXT', []),
|
||||
db.runSql.bind(db, 'ALTER TABLE eventlog CHANGE dataJson data TEXT', []),
|
||||
], callback);
|
||||
};
|
||||
@@ -1,17 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('SELECT value FROM settings WHERE name=?', [ 'sysinfo_config' ], function (error, result) {
|
||||
if (error || result.length === 0) return callback(error);
|
||||
const sysinfoConfig = JSON.parse(result[0].value);
|
||||
if (sysinfoConfig.provider !== 'fixed' || !sysinfoConfig.ip) return callback();
|
||||
sysinfoConfig.ipv4 = sysinfoConfig.ip;
|
||||
delete sysinfoConfig.ip;
|
||||
|
||||
db.runSql('REPLACE INTO settings (name, value) VALUES(?, ?)', [ 'sysinfo_config', JSON.stringify(sysinfoConfig) ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,9 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('UPDATE settings SET name=? WHERE name=?', [ 'directory_config', 'profile_config' ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('UPDATE settings SET name=? WHERE name=?', [ 'profile_config', 'directory_config' ], callback);
|
||||
};
|
||||
@@ -1,15 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE subdomains ADD COLUMN environmentVariable VARCHAR(128)', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE subdomains DROP COLUMN environmentVariable', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,19 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const safe = require('safetydance');
|
||||
|
||||
const PROXY_AUTH_TOKEN_SECRET_FILE = '/home/yellowtent/platformdata/proxy-auth-token-secret';
|
||||
|
||||
exports.up = function (db, callback) {
|
||||
const token = safe.fs.readFileSync(PROXY_AUTH_TOKEN_SECRET_FILE);
|
||||
if (!token) return callback();
|
||||
db.runSql('INSERT INTO blobs (id, value) VALUES (?, ?)', [ 'proxy_auth_token_secret', token ], function (error) {
|
||||
if (error) return callback(error);
|
||||
safe.fs.unlinkSync(PROXY_AUTH_TOKEN_SECRET_FILE);
|
||||
callback();
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,12 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('RENAME TABLE subdomains TO locations', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,27 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
mail = require('../src/mail.js'),
|
||||
safe = require('safetydance'),
|
||||
util = require('util');
|
||||
|
||||
// it seems some mail domains do not have dkimKey in the database for some reason because of some previous bad migration
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM mail', [ ], function (error, mailDomains) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(mailDomains, function (mailDomain, iteratorDone) {
|
||||
let dkimKey = safe.JSON.parse(mailDomain.dkimKeyJson);
|
||||
if (dkimKey && dkimKey.publicKey && dkimKey.privateKey) return iteratorDone();
|
||||
console.log(`${mailDomain.domain} has no dkim key in the database. generating a new one`);
|
||||
util.callbackify(mail.generateDkimKey)(function (error, dkimKey) {
|
||||
if (error) return iteratorDone(error);
|
||||
db.runSql('UPDATE mail SET dkimKeyJson=? WHERE domain=?', [ JSON.stringify(dkimKey), mailDomain.domain ], iteratorDone);
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,9 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('DELETE FROM settings WHERE name=?', [ 'license_key' ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,9 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('UPDATE settings SET name=? WHERE name=?', [ 'appstore_api_token', 'cloudron_token' ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,37 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const superagent = require('superagent');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="api_server_origin"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
const apiServerOrigin = results[0].value;
|
||||
|
||||
db.all('SELECT value FROM settings WHERE name="appstore_api_token"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
const apiToken = results[0].value;
|
||||
|
||||
console.log(`Getting appstore web token from ${apiServerOrigin}`);
|
||||
|
||||
superagent.post(`${apiServerOrigin}/api/v1/user_token`)
|
||||
.send({})
|
||||
.query({ accessToken: apiToken })
|
||||
.timeout(30 * 1000).end(function (error, response) {
|
||||
if (error && !error.response) {
|
||||
console.log('Network error getting web token', error);
|
||||
return callback();
|
||||
}
|
||||
if (response.statusCode !== 201 || !response.body.accessToken) {
|
||||
console.log(`Bad status getting web token: ${response.status} ${response.text}`);
|
||||
return callback();
|
||||
}
|
||||
|
||||
db.runSql('INSERT settings (name, value) VALUES(?, ?)', [ 'appstore_web_token', response.body.accessToken ], callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,16 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups ADD COLUMN label VARCHAR(128) DEFAULT ""', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN label', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
hat = require('../src/hat.js');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * from backups', function (error, allBackups) {
|
||||
if (error) return callback(error);
|
||||
|
||||
console.log(`Fixing up ${allBackups.length} backup entries`);
|
||||
const idMap = {};
|
||||
allBackups.forEach(b => {
|
||||
b.remotePath = b.id;
|
||||
b.id = `${b.type}_${b.identifier}_v${b.packageVersion}_${hat(256)}`; // id is used by the UI to derive dependent packages. making this a UUID will require a lot of db querying
|
||||
idMap[b.remotePath] = b.id;
|
||||
});
|
||||
|
||||
db.runSql('ALTER TABLE backups ADD COLUMN remotePath VARCHAR(256)', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('ALTER TABLE backups CHANGE COLUMN dependsOn dependsOnJson TEXT', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(allBackups, function (backup, iteratorDone) {
|
||||
const dependsOnPaths = backup.dependsOn ? backup.dependsOn.split(',') : []; // previously, it was paths
|
||||
let dependsOnIds = [];
|
||||
dependsOnPaths.forEach(p => { if (idMap[p]) dependsOnIds.push(idMap[p]); });
|
||||
|
||||
db.runSql('UPDATE backups SET id = ?, remotePath = ?, dependsOnJson = ? WHERE id = ?', [ backup.id, backup.remotePath, JSON.stringify(dependsOnIds), backup.remotePath ], iteratorDone);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('ALTER TABLE backups MODIFY COLUMN remotePath VARCHAR(256) NOT NULL UNIQUE', callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN remotePath', function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
db.runSql('ALTER TABLE backups RENAME COLUMN dependsOnJson to dependsOn', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM apps', function (error, apps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(apps, function (app, iteratorDone) {
|
||||
const manifest = JSON.parse(app.manifestJson);
|
||||
const hasSso = !!manifest.addons['proxyAuth'] || !!manifest.addons['ldap'];
|
||||
if (hasSso || !app.sso) return iteratorDone();
|
||||
|
||||
console.log(`Unsetting sso flag of ${app.id}`);
|
||||
db.runSql('UPDATE apps SET sso=? WHERE id=?', [ 0, app.id ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,20 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM settings WHERE name = ?', [ 'api_server_origin' ], function (error, result) {
|
||||
if (error || result.length === 0) return callback(error);
|
||||
|
||||
let consoleOrigin;
|
||||
switch (result[0].value) {
|
||||
case 'https://api.dev.cloudron.io': consoleOrigin = 'https://console.dev.cloudron.io'; break;
|
||||
case 'https://api.staging.cloudron.io': consoleOrigin = 'https://console.staging.cloudron.io'; break;
|
||||
default: consoleOrigin = 'https://console.cloudron.io'; break;
|
||||
}
|
||||
|
||||
db.runSql('REPLACE INTO settings (name, value) VALUES (?, ?)', [ 'console_server_origin', consoleOrigin ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,9 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN backgroundImage MEDIUMBLOB', callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN backgroundImage', callback);
|
||||
};
|
||||
@@ -1,12 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN mailboxDisplayName VARCHAR(128) DEFAULT "" NOT NULL', [], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN mailboxDisplayName', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -1,51 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const path = require('path'),
|
||||
safe = require('safetydance'),
|
||||
uuid = require('uuid');
|
||||
|
||||
function getMountPoint(dataDir) {
|
||||
const output = safe.child_process.execSync(`df --output=target "${dataDir}" | tail -1`, { encoding: 'utf8' });
|
||||
if (!output) return dataDir;
|
||||
const mountPoint = output.trim();
|
||||
if (mountPoint === '/') return dataDir;
|
||||
return mountPoint;
|
||||
}
|
||||
|
||||
exports.up = async function(db) {
|
||||
await db.runSql('ALTER TABLE apps ADD storageVolumeId VARCHAR(128), ADD FOREIGN KEY(storageVolumeId) REFERENCES volumes(id)');
|
||||
await db.runSql('ALTER TABLE apps ADD storageVolumePrefix VARCHAR(128)');
|
||||
await db.runSql('ALTER TABLE apps ADD CONSTRAINT apps_storageVolume UNIQUE (storageVolumeId, storageVolumePrefix)');
|
||||
|
||||
const apps = await db.runSql('SELECT * FROM apps WHERE dataDir IS NOT NULL');
|
||||
const allVolumes = await db.runSql('SELECT * FROM volumes');
|
||||
|
||||
for (const app of apps) {
|
||||
console.log(`data-dir (${app.id}): migrating data dir ${app.dataDir}`);
|
||||
|
||||
const mountPoint = getMountPoint(app.dataDir);
|
||||
const prefix = path.relative(mountPoint, app.dataDir);
|
||||
|
||||
console.log(`data-dir (${app.id}): migrating to mountpoint ${mountPoint} and prefix ${prefix}`);
|
||||
|
||||
const volume = allVolumes.find(v => v.hostPath === mountPoint);
|
||||
if (volume) {
|
||||
console.log(`data-dir (${app.id}): using existing volume ${volume.id}`);
|
||||
await db.runSql('UPDATE apps SET storageVolumeId=?, storageVolumePrefix=? WHERE id=?', [ volume.id, prefix, app.id ]);
|
||||
continue;
|
||||
}
|
||||
|
||||
const id = uuid.v4().replace(/-/g, ''); // to make systemd mount file names more readable
|
||||
const name = `app-${app.id}`;
|
||||
const type = app.dataDir === mountPoint ? 'filesystem' : 'mountpoint';
|
||||
|
||||
console.log(`data-dir (${app.id}): creating new volume ${id}`);
|
||||
await db.runSql('INSERT INTO volumes (id, name, hostPath, mountType, mountOptionsJson) VALUES (?, ?, ?, ?, ?)', [ id, name, mountPoint, type, JSON.stringify({}) ]);
|
||||
await db.runSql('UPDATE apps SET storageVolumeId=?, storageVolumePrefix=? WHERE id=?', [ id, prefix, app.id ]);
|
||||
}
|
||||
|
||||
await db.runSql('ALTER TABLE apps DROP COLUMN dataDir');
|
||||
};
|
||||
|
||||
exports.down = async function(/*db*/) {
|
||||
};
|
||||
@@ -6,7 +6,7 @@
|
||||
#### Strict mode is enabled
|
||||
#### VARCHAR - stored as part of table row (use for strings)
|
||||
#### TEXT - stored offline from table row (use for strings)
|
||||
#### BLOB (64KB), MEDIUMBLOB (16MB), LONGBLOB (4GB) - stored offline from table row (use for binary data)
|
||||
#### BLOB - stored offline from table row (use for binary data)
|
||||
#### https://dev.mysql.com/doc/refman/5.0/en/storage-requirements.html
|
||||
#### Times are stored in the database in UTC. And precision is seconds
|
||||
|
||||
@@ -20,37 +20,27 @@ CREATE TABLE IF NOT EXISTS users(
|
||||
email VARCHAR(254) NOT NULL UNIQUE,
|
||||
password VARCHAR(1024) NOT NULL,
|
||||
salt VARCHAR(512) NOT NULL,
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
createdAt VARCHAR(512) NOT NULL,
|
||||
modifiedAt VARCHAR(512) NOT NULL,
|
||||
displayName VARCHAR(512) DEFAULT "",
|
||||
fallbackEmail VARCHAR(512) DEFAULT "",
|
||||
twoFactorAuthenticationSecret VARCHAR(128) DEFAULT "",
|
||||
twoFactorAuthenticationEnabled BOOLEAN DEFAULT false,
|
||||
source VARCHAR(128) DEFAULT "",
|
||||
role VARCHAR(32),
|
||||
inviteToken VARCHAR(128) DEFAULT "",
|
||||
resetToken VARCHAR(128) DEFAULT "",
|
||||
resetTokenCreationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
active BOOLEAN DEFAULT 1,
|
||||
avatar MEDIUMBLOB NOT NULL,
|
||||
backgroundImage MEDIUMBLOB,
|
||||
loginLocationsJson MEDIUMTEXT, // { locations: [{ ip, userAgent, city, country, ts }] }
|
||||
|
||||
INDEX creationTime_index (creationTime),
|
||||
PRIMARY KEY(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS userGroups(
|
||||
id VARCHAR(128) NOT NULL UNIQUE,
|
||||
name VARCHAR(254) NOT NULL UNIQUE,
|
||||
source VARCHAR(128) DEFAULT "",
|
||||
PRIMARY KEY(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS groupMembers(
|
||||
groupId VARCHAR(128) NOT NULL,
|
||||
userId VARCHAR(128) NOT NULL,
|
||||
FOREIGN KEY(groupId) REFERENCES userGroups(id),
|
||||
FOREIGN KEY(userId) REFERENCES users(id),
|
||||
UNIQUE (groupId, userId));
|
||||
FOREIGN KEY(userId) REFERENCES users(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tokens(
|
||||
id VARCHAR(128) NOT NULL UNIQUE,
|
||||
@@ -60,7 +50,6 @@ CREATE TABLE IF NOT EXISTS tokens(
|
||||
clientId VARCHAR(128),
|
||||
scope VARCHAR(512) NOT NULL,
|
||||
expires BIGINT NOT NULL, // FIXME: make this a timestamp
|
||||
lastUsedTime TIMESTAMP NULL,
|
||||
PRIMARY KEY(accessToken));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS apps(
|
||||
@@ -72,6 +61,9 @@ CREATE TABLE IF NOT EXISTS apps(
|
||||
healthTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, // when the app last responded
|
||||
containerId VARCHAR(128),
|
||||
manifestJson TEXT,
|
||||
httpPort INTEGER, // this is the nginx proxy port and not manifest.httpPort
|
||||
location VARCHAR(128) NOT NULL,
|
||||
domain VARCHAR(128) NOT NULL,
|
||||
accessRestrictionJson TEXT, // { users: [ ], groups: [ ] }
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, // when the app was installed
|
||||
updateTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, // when the last app update was done
|
||||
@@ -84,29 +76,16 @@ CREATE TABLE IF NOT EXISTS apps(
|
||||
reverseProxyConfigJson TEXT, // { robotsTxt, csp }
|
||||
enableBackup BOOLEAN DEFAULT 1, // misnomer: controls automatic daily backups
|
||||
enableAutomaticUpdate BOOLEAN DEFAULT 1,
|
||||
enableMailbox BOOLEAN DEFAULT 1, // whether sendmail addon is enabled
|
||||
mailboxName VARCHAR(128), // mailbox of this app
|
||||
mailboxDomain VARCHAR(128), // mailbox domain of this app
|
||||
mailboxDisplayName VARCHAR(128), // mailbox display name
|
||||
enableInbox BOOLEAN DEFAULT 0, // whether recvmail addon is enabled
|
||||
inboxName VARCHAR(128), // mailbox of this app
|
||||
inboxDomain VARCHAR(128), // mailbox domain of this app
|
||||
mailboxName VARCHAR(128), // mailbox of this app. default allocated as '.app'
|
||||
mailboxDomain VARCHAR(128) NOT NULL, // mailbox domain of this apps
|
||||
label VARCHAR(128), // display name
|
||||
tagsJson VARCHAR(2048), // array of tags
|
||||
storageVolumeId VARCHAR(128),
|
||||
storageVolumePrefix VARCHAR(128),
|
||||
dataDir VARCHAR(256) UNIQUE,
|
||||
taskId INTEGER, // current task
|
||||
errorJson TEXT,
|
||||
servicesConfigJson TEXT, // app services configuration
|
||||
containerIp VARCHAR(16) UNIQUE, // this is not-null because of ip allocation fails, user can 'repair'
|
||||
appStoreIcon MEDIUMBLOB,
|
||||
icon MEDIUMBLOB,
|
||||
crontab TEXT,
|
||||
|
||||
FOREIGN KEY(mailboxDomain) REFERENCES domains(domain),
|
||||
FOREIGN KEY(taskId) REFERENCES tasks(id),
|
||||
FOREIGN KEY(storageVolumeId) REFERENCES volumes(id),
|
||||
UNIQUE (storageVolumeId, storageVolumePrefix),
|
||||
PRIMARY KEY(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS appPortBindings(
|
||||
@@ -120,14 +99,13 @@ CREATE TABLE IF NOT EXISTS appPortBindings(
|
||||
CREATE TABLE IF NOT EXISTS settings(
|
||||
name VARCHAR(128) NOT NULL UNIQUE,
|
||||
value TEXT,
|
||||
valueBlob MEDIUMBLOB,
|
||||
PRIMARY KEY(name));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS appAddonConfigs(
|
||||
appId VARCHAR(128) NOT NULL,
|
||||
addonId VARCHAR(32) NOT NULL,
|
||||
name VARCHAR(128) NOT NULL,
|
||||
value TEXT NOT NULL,
|
||||
value VARCHAR(512) NOT NULL,
|
||||
FOREIGN KEY(appId) REFERENCES apps(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS appEnvVars(
|
||||
@@ -138,30 +116,24 @@ CREATE TABLE IF NOT EXISTS appEnvVars(
|
||||
|
||||
CREATE TABLE IF NOT EXISTS backups(
|
||||
id VARCHAR(128) NOT NULL,
|
||||
remotePath VARCHAR(256) NOT NULL UNIQUE,
|
||||
label VARCHAR(128) DEFAULT "",
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
packageVersion VARCHAR(128) NOT NULL, /* app version or box version */
|
||||
encryptionVersion INTEGER, /* when null, unencrypted backup */
|
||||
version VARCHAR(128) NOT NULL, /* app version or box version */
|
||||
type VARCHAR(16) NOT NULL, /* 'box' or 'app' */
|
||||
identifier VARCHAR(128) NOT NULL, /* 'box' or the app id */
|
||||
dependsOnJson TEXT, /* comma separate list of objects this backup depends on */
|
||||
dependsOn TEXT, /* comma separate list of objects this backup depends on */
|
||||
state VARCHAR(16) NOT NULL,
|
||||
manifestJson TEXT, /* to validate if the app can be installed in this version of box */
|
||||
format VARCHAR(16) DEFAULT "tgz",
|
||||
preserveSecs INTEGER DEFAULT 0,
|
||||
|
||||
INDEX creationTime_index (creationTime),
|
||||
PRIMARY KEY (id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS eventlog(
|
||||
id VARCHAR(128) NOT NULL,
|
||||
action VARCHAR(128) NOT NULL,
|
||||
sourceJson TEXT, /* { userId, username, ip }. userId can be null for cron,sysadmin */
|
||||
dataJson TEXT, /* free flowing json based on action */
|
||||
source TEXT, /* { userId, username, ip }. userId can be null for cron,sysadmin */
|
||||
data TEXT, /* free flowing json based on action */
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
|
||||
INDEX creationTime_index (creationTime),
|
||||
PRIMARY KEY (id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS domains(
|
||||
@@ -170,9 +142,6 @@ CREATE TABLE IF NOT EXISTS domains(
|
||||
provider VARCHAR(16) NOT NULL,
|
||||
configJson TEXT, /* JSON containing the dns backend provider config */
|
||||
tlsConfigJson TEXT, /* JSON containing the tls provider config */
|
||||
wellKnownJson TEXT, /* JSON containing well known docs for this domain */
|
||||
|
||||
fallbackCertificateJson MEDIUMTEXT,
|
||||
|
||||
PRIMARY KEY (domain))
|
||||
|
||||
@@ -186,9 +155,7 @@ CREATE TABLE IF NOT EXISTS mail(
|
||||
mailFromValidation BOOLEAN DEFAULT 1,
|
||||
catchAllJson TEXT,
|
||||
relayJson TEXT,
|
||||
bannerJson TEXT,
|
||||
|
||||
dkimKeyJson MEDIUMTEXT,
|
||||
dkimSelector VARCHAR(128) NOT NULL DEFAULT "cloudron",
|
||||
|
||||
FOREIGN KEY(domain) REFERENCES domains(domain),
|
||||
@@ -207,28 +174,19 @@ CREATE TABLE IF NOT EXISTS mailboxes(
|
||||
name VARCHAR(128) NOT NULL,
|
||||
type VARCHAR(16) NOT NULL, /* 'mailbox', 'alias', 'list' */
|
||||
ownerId VARCHAR(128) NOT NULL, /* user id */
|
||||
ownerType VARCHAR(16) NOT NULL,
|
||||
aliasName VARCHAR(128), /* the target name type is an alias */
|
||||
aliasDomain VARCHAR(128), /* the target domain */
|
||||
aliasTarget VARCHAR(128), /* the target name type is an alias */
|
||||
membersJson TEXT, /* members of a group. fully qualified */
|
||||
membersOnly BOOLEAN DEFAULT false,
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
domain VARCHAR(128),
|
||||
active BOOLEAN DEFAULT 1,
|
||||
enablePop3 BOOLEAN DEFAULT 0,
|
||||
|
||||
FOREIGN KEY(domain) REFERENCES mail(domain),
|
||||
FOREIGN KEY(aliasDomain) REFERENCES mail(domain),
|
||||
UNIQUE (name, domain));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS locations(
|
||||
CREATE TABLE IF NOT EXISTS subdomains(
|
||||
appId VARCHAR(128) NOT NULL,
|
||||
domain VARCHAR(128) NOT NULL,
|
||||
subdomain VARCHAR(128) NOT NULL,
|
||||
type VARCHAR(128) NOT NULL, /* primary, secondary, redirect, alias */
|
||||
environmentVariable VARCHAR(128), /* only set for secondary */
|
||||
|
||||
certificateJson MEDIUMTEXT,
|
||||
type VARCHAR(128) NOT NULL, /* primary or redirect */
|
||||
|
||||
FOREIGN KEY(domain) REFERENCES domains(domain),
|
||||
FOREIGN KEY(appId) REFERENCES apps(id),
|
||||
@@ -237,27 +195,23 @@ CREATE TABLE IF NOT EXISTS locations(
|
||||
CREATE TABLE IF NOT EXISTS tasks(
|
||||
id int NOT NULL AUTO_INCREMENT,
|
||||
type VARCHAR(32) NOT NULL,
|
||||
argsJson TEXT,
|
||||
percent INTEGER DEFAULT 0,
|
||||
message TEXT,
|
||||
errorJson TEXT,
|
||||
resultJson TEXT,
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
|
||||
INDEX creationTime_index (creationTime),
|
||||
PRIMARY KEY (id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS notifications(
|
||||
id int NOT NULL AUTO_INCREMENT,
|
||||
userId VARCHAR(128) NOT NULL,
|
||||
eventId VARCHAR(128), // reference to eventlog. can be null
|
||||
title VARCHAR(512) NOT NULL,
|
||||
message TEXT,
|
||||
acknowledged BOOLEAN DEFAULT false,
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
|
||||
INDEX creationTime_index (creationTime),
|
||||
FOREIGN KEY(eventId) REFERENCES eventlog(id),
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
@@ -268,33 +222,9 @@ CREATE TABLE IF NOT EXISTS appPasswords(
|
||||
identifier VARCHAR(128) NOT NULL, // resourceId: app id or mail or webadmin
|
||||
hashedPassword VARCHAR(1024) NOT NULL,
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE KEY appPasswords_name_appId_identifier (name, userId, identifier)
|
||||
FOREIGN KEY(userId) REFERENCES users(id),
|
||||
|
||||
UNIQUE (name, userId),
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS volumes(
|
||||
id VARCHAR(128) NOT NULL UNIQUE,
|
||||
name VARCHAR(256) NOT NULL UNIQUE,
|
||||
hostPath VARCHAR(1024) NOT NULL UNIQUE,
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
mountType VARCHAR(16) DEFAULT "noop",
|
||||
mountOptionsJson TEXT,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS appMounts(
|
||||
appId VARCHAR(128) NOT NULL,
|
||||
volumeId VARCHAR(128) NOT NULL,
|
||||
readOnly BOOLEAN DEFAULT 1,
|
||||
UNIQUE KEY appMounts_appId_volumeId (appId, volumeId),
|
||||
FOREIGN KEY(appId) REFERENCES apps(id),
|
||||
FOREIGN KEY(volumeId) REFERENCES volumes(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS blobs(
|
||||
id VARCHAR(128) NOT NULL UNIQUE,
|
||||
value MEDIUMBLOB,
|
||||
PRIMARY KEY(id));
|
||||
|
||||
CHARACTER SET utf8 COLLATE utf8_bin;
|
||||
|
||||
13980
package-lock.json
generated
13980
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
98
package.json
98
package.json
@@ -10,68 +10,76 @@
|
||||
"type": "git",
|
||||
"url": "https://git.cloudron.io/cloudron/box.git"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4.0.0 <=4.1.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google-cloud/dns": "^2.2.4",
|
||||
"@google-cloud/storage": "^5.19.2",
|
||||
"@google-cloud/dns": "^1.1.0",
|
||||
"@google-cloud/storage": "^2.5.0",
|
||||
"@sindresorhus/df": "git+https://github.com/cloudron-io/df.git#type",
|
||||
"async": "^3.2.3",
|
||||
"aws-sdk": "^2.1115.0",
|
||||
"basic-auth": "^2.0.1",
|
||||
"body-parser": "^1.20.0",
|
||||
"cloudron-manifestformat": "^5.16.0",
|
||||
"async": "^2.6.3",
|
||||
"aws-sdk": "^2.610.0",
|
||||
"body-parser": "^1.19.0",
|
||||
"cloudron-manifestformat": "^4.0.0",
|
||||
"connect": "^3.7.0",
|
||||
"connect-lastmile": "^2.1.1",
|
||||
"connect-lastmile": "^1.2.2",
|
||||
"connect-timeout": "^1.9.0",
|
||||
"cookie-parser": "^1.4.6",
|
||||
"cookie-session": "^2.0.0",
|
||||
"cookie-session": "^1.4.0",
|
||||
"cron": "^1.8.2",
|
||||
"db-migrate": "^0.11.13",
|
||||
"db-migrate-mysql": "^2.2.0",
|
||||
"debug": "^4.3.4",
|
||||
"dockerode": "^3.3.1",
|
||||
"ejs": "^3.1.6",
|
||||
"ejs-cli": "^2.2.3",
|
||||
"express": "^4.17.3",
|
||||
"ipaddr.js": "^2.0.1",
|
||||
"js-yaml": "^4.1.0",
|
||||
"json": "^11.0.0",
|
||||
"jsonwebtoken": "^8.5.1",
|
||||
"ldapjs": "^2.3.2",
|
||||
"lodash": "^4.17.21",
|
||||
"moment": "^2.29.2",
|
||||
"moment-timezone": "^0.5.34",
|
||||
"morgan": "^1.10.0",
|
||||
"multiparty": "^4.2.3",
|
||||
"db-migrate": "^0.11.6",
|
||||
"db-migrate-mysql": "^1.1.10",
|
||||
"debug": "^4.1.1",
|
||||
"dockerode": "^2.5.8",
|
||||
"ejs": "^2.6.1",
|
||||
"ejs-cli": "^2.1.1",
|
||||
"express": "^4.17.1",
|
||||
"js-yaml": "^3.13.1",
|
||||
"json": "^9.0.6",
|
||||
"ldapjs": "^1.0.2",
|
||||
"lodash": "^4.17.15",
|
||||
"lodash.chunk": "^4.2.0",
|
||||
"mime": "^2.4.4",
|
||||
"moment-timezone": "^0.5.27",
|
||||
"morgan": "^1.9.1",
|
||||
"multiparty": "^4.2.1",
|
||||
"mysql": "^2.18.1",
|
||||
"nodemailer": "^6.7.3",
|
||||
"nodemailer": "^6.4.2",
|
||||
"nodemailer-smtp-transport": "^2.7.4",
|
||||
"once": "^1.4.0",
|
||||
"parse-links": "^0.1.0",
|
||||
"pretty-bytes": "^5.3.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"qrcode": "^1.5.0",
|
||||
"readdirp": "^3.6.0",
|
||||
"safetydance": "^2.2.0",
|
||||
"semver": "^7.3.7",
|
||||
"proxy-middleware": "^0.15.0",
|
||||
"qrcode": "^1.4.4",
|
||||
"readdirp": "^3.3.0",
|
||||
"request": "^2.88.0",
|
||||
"rimraf": "^2.6.3",
|
||||
"s3-block-read-stream": "^0.5.0",
|
||||
"safetydance": "^1.0.0",
|
||||
"semver": "^6.1.1",
|
||||
"showdown": "^1.9.1",
|
||||
"speakeasy": "^2.0.0",
|
||||
"split": "^1.0.1",
|
||||
"superagent": "^7.1.1",
|
||||
"superagent": "^5.2.1",
|
||||
"supererror": "^0.7.2",
|
||||
"tar-fs": "github:cloudron-io/tar-fs#ignore_stat_error",
|
||||
"tar-stream": "^2.2.0",
|
||||
"tar-stream": "^2.1.0",
|
||||
"tldjs": "^2.3.1",
|
||||
"ua-parser-js": "^1.0.2",
|
||||
"underscore": "^1.13.2",
|
||||
"uuid": "^8.3.2",
|
||||
"validator": "^13.7.0",
|
||||
"ws": "^8.5.0",
|
||||
"underscore": "^1.9.2",
|
||||
"uuid": "^3.4.0",
|
||||
"validator": "^11.0.0",
|
||||
"ws": "^7.2.1",
|
||||
"xml2js": "^0.4.23"
|
||||
},
|
||||
"devDependencies": {
|
||||
"expect.js": "*",
|
||||
"hock": "^1.4.1",
|
||||
"js2xmlparser": "^4.0.2",
|
||||
"mocha": "^9.2.2",
|
||||
"hock": "^1.3.3",
|
||||
"js2xmlparser": "^4.0.0",
|
||||
"mocha": "^6.1.4",
|
||||
"mock-aws-s3": "git+https://github.com/cloudron-io/mock-aws-s3.git",
|
||||
"nock": "^13.2.4",
|
||||
"node-sass": "^7.0.1",
|
||||
"nyc": "^15.1.0"
|
||||
"nock": "^10.0.6",
|
||||
"node-sass": "^4.12.0",
|
||||
"recursive-readdir": "^2.2.2"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "./runTests",
|
||||
|
||||
44
runTests
44
runTests
@@ -2,11 +2,11 @@
|
||||
|
||||
set -eu
|
||||
|
||||
readonly source_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly SOURCE_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly DATA_DIR="${HOME}/.cloudron_test"
|
||||
readonly DEFAULT_TESTS="./src/test/*-test.js ./src/routes/test/*-test.js"
|
||||
|
||||
! "${source_dir}/src/test/checkInstall" && exit 1
|
||||
! "${SOURCE_dir}/src/test/checkInstall" && exit 1
|
||||
|
||||
# cleanup old data dirs some of those docker container data requires sudo to be removed
|
||||
echo "=> Provide root password to purge any leftover data in ${DATA_DIR} and load apparmor profile:"
|
||||
@@ -22,30 +22,19 @@ fi
|
||||
mkdir -p ${DATA_DIR}
|
||||
cd ${DATA_DIR}
|
||||
mkdir -p appsdata
|
||||
mkdir -p boxdata/box boxdata/mail boxdata/certs boxdata/mail/dkim/localhost boxdata/mail/dkim/foobar.com
|
||||
mkdir -p platformdata/addons/mail/banner platformdata/nginx/cert platformdata/nginx/applications platformdata/collectd/collectd.conf.d platformdata/addons platformdata/logrotate.d platformdata/backup platformdata/logs/tasks platformdata/sftp/ssh platformdata/firewall platformdata/update
|
||||
sudo mkdir -p /mnt/cloudron-test-music /media/cloudron-test-music # volume test
|
||||
|
||||
# translations
|
||||
mkdir -p box/dashboard/dist/translation
|
||||
cp -r ${source_dir}/../dashboard/dist/translation/* box/dashboard/dist/translation
|
||||
mkdir -p boxdata/profileicons boxdata/appicons boxdata/mail boxdata/certs boxdata/mail/dkim/localhost boxdata/mail/dkim/foobar.com
|
||||
mkdir -p platformdata/addons/mail platformdata/nginx/cert platformdata/nginx/applications platformdata/collectd/collectd.conf.d platformdata/addons platformdata/logrotate.d platformdata/backup platformdata/logs/tasks
|
||||
|
||||
# put cert
|
||||
echo "=> Generating a localhost selfsigned cert"
|
||||
openssl req -x509 -newkey rsa:2048 -keyout platformdata/nginx/cert/host.key -out platformdata/nginx/cert/host.cert -days 3650 -subj '/CN=localhost' -nodes -config <(cat /etc/ssl/openssl.cnf <(printf "\n[SAN]\nsubjectAltName=DNS:*.localhost"))
|
||||
|
||||
# clear out any containers if FAST is unset
|
||||
if [[ -z ${FAST+x} ]]; then
|
||||
echo "=> Delete all docker containers first"
|
||||
docker ps -qa --filter "label=isCloudronManaged" | xargs --no-run-if-empty docker rm -f
|
||||
docker rm -f mysql-server
|
||||
echo "==> To skip this run with: FAST=1 ./runTests"
|
||||
else
|
||||
echo "==> WARNING!! Skipping docker container cleanup, the database might not be pristine!"
|
||||
fi
|
||||
# clear out any containers
|
||||
echo "=> Delete all docker containers first"
|
||||
docker ps -qa | xargs --no-run-if-empty docker rm -f
|
||||
|
||||
# create docker network (while the infra code does this, most tests skip infra setup)
|
||||
docker network create --subnet=172.18.0.0/16 --ip-range=172.18.0.0/20 --gateway 172.18.0.1 cloudron --ipv6 --subnet=fd00:c107:d509::/64 || true
|
||||
docker network create --subnet=172.18.0.0/16 cloudron || true
|
||||
|
||||
# create the same mysql server version to test with
|
||||
OUT=`docker inspect mysql-server` || true
|
||||
@@ -63,12 +52,6 @@ while ! mysqladmin ping -h"${MYSQL_IP}" --silent; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "=> Ensure local base image"
|
||||
docker pull cloudron/base:3.0.0@sha256:455c70428723e3a823198c57472785437eb6eab082e79b3ff04ea584faf46e92
|
||||
|
||||
echo "=> Create iptables blocklist"
|
||||
sudo ipset create cloudron_blocklist hash:net || true
|
||||
|
||||
echo "=> Starting cloudron-syslog"
|
||||
cloudron-syslog --logdir "${DATA_DIR}/platformdata/logs/" &
|
||||
|
||||
@@ -76,18 +59,13 @@ echo "=> Ensure database"
|
||||
mysql -h"${MYSQL_IP}" -uroot -ppassword -e 'CREATE DATABASE IF NOT EXISTS box'
|
||||
|
||||
echo "=> Run database migrations"
|
||||
cd "${source_dir}"
|
||||
cd "${SOURCE_dir}"
|
||||
BOX_ENV=test DATABASE_URL=mysql://root:password@${MYSQL_IP}/box node_modules/.bin/db-migrate up
|
||||
|
||||
echo "=> Run tests with mocha"
|
||||
TESTS=${DEFAULT_TESTS}
|
||||
if [[ $# -gt 0 ]]; then
|
||||
TESTS="$*"
|
||||
fi
|
||||
|
||||
if [[ -z ${COVERAGE+x} ]]; then
|
||||
echo "=> Run tests with mocha"
|
||||
BOX_ENV=test ./node_modules/.bin/mocha --bail --no-timeouts --exit -R spec ${TESTS}
|
||||
else
|
||||
echo "=> Run tests with mocha and coverage"
|
||||
BOX_ENV=test ./node_modules/.bin/nyc --reporter=html ./node_modules/.bin/mocha --no-timeouts --exit -R spec ${TESTS}
|
||||
fi
|
||||
BOX_ENV=test ./node_modules/mocha/bin/_mocha --bail --no-timeouts --exit -R spec ${TESTS}
|
||||
|
||||
@@ -2,12 +2,6 @@
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
function exitHandler() {
|
||||
rm -f /etc/update-motd.d/91-cloudron-install-in-progress
|
||||
}
|
||||
|
||||
trap exitHandler EXIT
|
||||
|
||||
# change this to a hash when we make a upgrade release
|
||||
readonly LOG_FILE="/var/log/cloudron-setup.log"
|
||||
readonly MINIMUM_DISK_SIZE_GB="18" # this is the size of "/" and required to fit in docker images 18 is a safe bet for different reporting on 20GB min
|
||||
@@ -26,8 +20,8 @@ readonly GREEN='\033[32m'
|
||||
readonly DONE='\033[m'
|
||||
|
||||
# verify the system has minimum requirements met
|
||||
if [[ "${rootfs_type}" != "ext4" && "${rootfs_type}" != "xfs" ]]; then
|
||||
echo "Error: Cloudron requires '/' to be ext4 or xfs" # see #364
|
||||
if [[ "${rootfs_type}" != "ext4" ]]; then
|
||||
echo "Error: Cloudron requires '/' to be ext4" # see #364
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -41,61 +35,41 @@ if [[ "${disk_size_gb}" -lt "${MINIMUM_DISK_SIZE_GB}" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$(uname -m)" != "x86_64" ]]; then
|
||||
echo "Error: Cloudron only supports amd64/x86_64"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if cvirt=$(systemd-detect-virt --container); then
|
||||
echo "Error: Cloudron does not support ${cvirt}, only runs on bare metal or with full hardware virtualization"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# do not use is-active in case box service is down and user attempts to re-install
|
||||
if systemctl cat box.service >/dev/null 2>&1; then
|
||||
if systemctl -q is-active box; then
|
||||
echo "Error: Cloudron is already installed. To reinstall, start afresh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
provider="generic"
|
||||
initBaseImage="true"
|
||||
# provisioning data
|
||||
provider=""
|
||||
requestedVersion=""
|
||||
installServerOrigin="https://api.cloudron.io"
|
||||
apiServerOrigin="https://api.cloudron.io"
|
||||
webServerOrigin="https://cloudron.io"
|
||||
consoleServerOrigin="https://console.cloudron.io"
|
||||
sourceTarballUrl=""
|
||||
rebootServer="true"
|
||||
setupToken="" # this is a OTP for securing an installation (https://forum.cloudron.io/topic/6389/add-password-for-initial-configuration)
|
||||
appstoreSetupToken=""
|
||||
redo="false"
|
||||
license=""
|
||||
|
||||
args=$(getopt -o "" -l "help,provider:,version:,env:,skip-reboot,generate-setup-token,setup-token:,redo" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "help,skip-baseimage-init,provider:,version:,env:,skip-reboot,license:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--help) echo "See https://docs.cloudron.io/installation/ on how to install Cloudron"; exit 0;;
|
||||
--help) echo "See https://cloudron.io/documentation/installation/ on how to install Cloudron"; exit 0;;
|
||||
--provider) provider="$2"; shift 2;;
|
||||
--version) requestedVersion="$2"; shift 2;;
|
||||
--env)
|
||||
if [[ "$2" == "dev" ]]; then
|
||||
apiServerOrigin="https://api.dev.cloudron.io"
|
||||
webServerOrigin="https://dev.cloudron.io"
|
||||
consoleServerOrigin="https://console.dev.cloudron.io"
|
||||
installServerOrigin="https://api.dev.cloudron.io"
|
||||
elif [[ "$2" == "staging" ]]; then
|
||||
apiServerOrigin="https://api.staging.cloudron.io"
|
||||
webServerOrigin="https://staging.cloudron.io"
|
||||
consoleServerOrigin="https://console.staging.cloudron.io"
|
||||
installServerOrigin="https://api.staging.cloudron.io"
|
||||
elif [[ "$2" == "unstable" ]]; then
|
||||
installServerOrigin="https://api.dev.cloudron.io"
|
||||
fi
|
||||
shift 2;;
|
||||
--license) license="$2"; shift 2;;
|
||||
--skip-baseimage-init) initBaseImage="false"; shift;;
|
||||
--skip-reboot) rebootServer="false"; shift;;
|
||||
--redo) redo="true"; shift;;
|
||||
--setup-token) appstoreSetupToken="$2"; shift 2;;
|
||||
--generate-setup-token) setupToken="$(openssl rand -hex 10)"; shift;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
@@ -109,41 +83,56 @@ fi
|
||||
|
||||
# Only --help works with mismatched ubuntu
|
||||
ubuntu_version=$(lsb_release -rs)
|
||||
if [[ "${ubuntu_version}" != "16.04" && "${ubuntu_version}" != "18.04" && "${ubuntu_version}" != "20.04" && "${ubuntu_version}" != "22.04" ]]; then
|
||||
echo "Cloudron requires Ubuntu 18.04, 20.04, 22.04" > /dev/stderr
|
||||
if [[ "${ubuntu_version}" != "16.04" && "${ubuntu_version}" != "18.04" ]]; then
|
||||
echo "Cloudron requires Ubuntu 16.04 or 18.04" > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if which nginx >/dev/null || which docker >/dev/null || which node > /dev/null; then
|
||||
if [[ "${redo}" == "false" ]]; then
|
||||
echo "Error: Some packages like nginx/docker/nodejs are already installed. Cloudron requires specific versions of these packages and will install them as part of it's installation. Please start with a fresh Ubuntu install and run this script again." > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Install MOTD file for stack script style installations. this is removed by the trap exit handler. Heredoc quotes prevents parameter expansion
|
||||
cat > /etc/update-motd.d/91-cloudron-install-in-progress <<'EOF'
|
||||
#!/bin/bash
|
||||
|
||||
printf "**********************************************************************\n\n"
|
||||
|
||||
printf "\t\t\tWELCOME TO CLOUDRON\n"
|
||||
printf "\t\t\t-------------------\n"
|
||||
|
||||
printf '\n\e[1;32m%-6s\e[m\n\n' "Cloudron is installing. Run 'tail -f /var/log/cloudron-setup.log' to view progress."
|
||||
|
||||
printf "Cloudron overview - https://docs.cloudron.io/ \n"
|
||||
printf "Cloudron setup - https://docs.cloudron.io/installation/#setup \n"
|
||||
|
||||
printf "\nFor help and more information, visit https://forum.cloudron.io\n\n"
|
||||
|
||||
printf "**********************************************************************\n"
|
||||
EOF
|
||||
chmod +x /etc/update-motd.d/91-cloudron-install-in-progress
|
||||
|
||||
# Can only write after we have confirmed script has root access
|
||||
echo "Running cloudron-setup with args : $@" > "${LOG_FILE}"
|
||||
|
||||
# validate arguments in the absence of data
|
||||
readonly AVAILABLE_PROVIDERS="azure, caas, cloudscale, contabo, digitalocean, ec2, exoscale, gce, hetzner, interox, lightsail, linode, netcup, ovh, rosehosting, scaleway, skysilk, time4vps, upcloud, vultr or generic"
|
||||
if [[ -z "${provider}" ]]; then
|
||||
echo "--provider is required ($AVAILABLE_PROVIDERS)"
|
||||
exit 1
|
||||
elif [[ \
|
||||
"${provider}" != "ami" && \
|
||||
"${provider}" != "azure" && \
|
||||
"${provider}" != "azure-image" && \
|
||||
"${provider}" != "caas" && \
|
||||
"${provider}" != "cloudscale" && \
|
||||
"${provider}" != "contabo" && \
|
||||
"${provider}" != "digitalocean" && \
|
||||
"${provider}" != "digitalocean-mp" && \
|
||||
"${provider}" != "ec2" && \
|
||||
"${provider}" != "exoscale" && \
|
||||
"${provider}" != "gce" && \
|
||||
"${provider}" != "hetzner" && \
|
||||
"${provider}" != "interox" && \
|
||||
"${provider}" != "interox-image" && \
|
||||
"${provider}" != "lightsail" && \
|
||||
"${provider}" != "linode" && \
|
||||
"${provider}" != "linode-oneclick" && \
|
||||
"${provider}" != "linode-stackscript" && \
|
||||
"${provider}" != "netcup" && \
|
||||
"${provider}" != "netcup-image" && \
|
||||
"${provider}" != "ovh" && \
|
||||
"${provider}" != "rosehosting" && \
|
||||
"${provider}" != "scaleway" && \
|
||||
"${provider}" != "skysilk" && \
|
||||
"${provider}" != "skysilk-image" && \
|
||||
"${provider}" != "time4vps" && \
|
||||
"${provider}" != "time4vps-image" && \
|
||||
"${provider}" != "upcloud" && \
|
||||
"${provider}" != "upcloud-image" && \
|
||||
"${provider}" != "vultr" && \
|
||||
"${provider}" != "generic" \
|
||||
]]; then
|
||||
echo "--provider must be one of: $AVAILABLE_PROVIDERS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "##############################################"
|
||||
echo " Cloudron Setup (${requestedVersion:-latest})"
|
||||
@@ -155,19 +144,33 @@ echo ""
|
||||
echo " Join us at https://forum.cloudron.io for any questions."
|
||||
echo ""
|
||||
|
||||
echo "=> Updating apt and installing script dependencies"
|
||||
if ! apt-get update &>> "${LOG_FILE}"; then
|
||||
echo "Could not update package repositories. See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "${initBaseImage}" == "true" ]]; then
|
||||
echo "=> Installing software-properties-common"
|
||||
if ! apt-get install -y software-properties-common &>> "${LOG_FILE}"; then
|
||||
echo "Could not install software-properties-common (for add-apt-repository below). See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install --no-install-recommends curl python3 ubuntu-standard software-properties-common -y &>> "${LOG_FILE}"; then
|
||||
echo "Could not install setup dependencies (curl). See ${LOG_FILE}"
|
||||
exit 1
|
||||
echo "=> Ensure required apt sources"
|
||||
if ! add-apt-repository universe &>> "${LOG_FILE}"; then
|
||||
echo "Could not add required apt sources (for nginx-full). See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "=> Updating apt and installing script dependencies"
|
||||
if ! apt-get update &>> "${LOG_FILE}"; then
|
||||
echo "Could not update package repositories. See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install curl python3 ubuntu-standard -y &>> "${LOG_FILE}"; then
|
||||
echo "Could not install setup dependencies (curl). See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "=> Checking version"
|
||||
if ! releaseJson=$($curl -s "${installServerOrigin}/api/v1/releases?boxVersion=${requestedVersion}"); then
|
||||
if ! releaseJson=$($curl -s "${apiServerOrigin}/api/v1/releases?boxVersion=${requestedVersion}"); then
|
||||
echo "Failed to get release information"
|
||||
exit 1
|
||||
fi
|
||||
@@ -183,7 +186,7 @@ if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "=> Downloading Cloudron version ${version} ..."
|
||||
echo "=> Downloading version ${version} ..."
|
||||
box_src_tmp_dir=$(mktemp -dt box-src-XXXXXX)
|
||||
|
||||
if ! $curl -sL "${sourceTarballUrl}" | tar -zxf - -C "${box_src_tmp_dir}"; then
|
||||
@@ -191,19 +194,21 @@ if ! $curl -sL "${sourceTarballUrl}" | tar -zxf - -C "${box_src_tmp_dir}"; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -n "=> Installing base dependencies and downloading docker images (this takes some time) ..."
|
||||
init_ubuntu_script=$(test -f "${box_src_tmp_dir}/scripts/init-ubuntu.sh" && echo "${box_src_tmp_dir}/scripts/init-ubuntu.sh" || echo "${box_src_tmp_dir}/baseimage/initializeBaseUbuntuImage.sh")
|
||||
if ! /bin/bash "${init_ubuntu_script}" &>> "${LOG_FILE}"; then
|
||||
echo "Init script failed. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
if [[ "${initBaseImage}" == "true" ]]; then
|
||||
echo -n "=> Installing base dependencies and downloading docker images (this takes some time) ..."
|
||||
if ! /bin/bash "${box_src_tmp_dir}/baseimage/initializeBaseUbuntuImage.sh" "${provider}" "../src" &>> "${LOG_FILE}"; then
|
||||
echo "Init script failed. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# The provider flag is still used for marketplace images
|
||||
echo "=> Installing Cloudron version ${version} (this takes some time) ..."
|
||||
# NOTE: this install script only supports 4.2 and above
|
||||
echo "=> Installing version ${version} (this takes some time) ..."
|
||||
mkdir -p /etc/cloudron
|
||||
echo "${provider}" > /etc/cloudron/PROVIDER
|
||||
[[ ! -z "${setupToken}" ]] && echo "${setupToken}" > /etc/cloudron/SETUP_TOKEN
|
||||
|
||||
[[ -n "${license}" ]] && echo -n "$license" > /etc/cloudron/LICENSE
|
||||
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
@@ -212,39 +217,20 @@ fi
|
||||
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('api_server_origin', '${apiServerOrigin}');" 2>/dev/null
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('web_server_origin', '${webServerOrigin}');" 2>/dev/null
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('console_server_origin', '${consoleServerOrigin}');" 2>/dev/null
|
||||
|
||||
if [[ -n "${appstoreSetupToken}" ]]; then
|
||||
if ! setupResponse=$(curl -sX POST -H "Content-type: application/json" --data "{\"setupToken\": \"${appstoreSetupToken}\"}" "${apiServerOrigin}/api/v1/cloudron_setup_done"); then
|
||||
echo "Could not complete setup. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cloudronId=$(echo "${setupResponse}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["cloudronId"])')
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('cloudron_id', '${cloudronId}');" 2>/dev/null
|
||||
|
||||
appstoreApiToken=$(echo "${setupResponse}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["cloudronToken"])')
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('appstore_api_token', '${appstoreApiToken}');" 2>/dev/null
|
||||
fi
|
||||
|
||||
echo -n "=> Waiting for cloudron to be ready (this takes some time) ..."
|
||||
while true; do
|
||||
echo -n "."
|
||||
if status=$($curl -s -f "http://localhost:3000/api/v1/cloudron/status" 2>/dev/null); then
|
||||
if status=$($curl -q -f "http://localhost:3000/api/v1/cloudron/status" 2>/dev/null); then
|
||||
break # we are up and running
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
|
||||
if ! ip=$(curl -s --fail --connect-timeout 2 --max-time 2 https://ipv4.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p'); then
|
||||
if ! ip=$(curl --fail --connect-timeout 2 --max-time 2 -q https://api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p'); then
|
||||
ip='<IP>'
|
||||
fi
|
||||
if [[ -z "${setupToken}" ]]; then
|
||||
url="https://${ip}"
|
||||
else
|
||||
url="https://${ip}/?setupToken=${setupToken}"
|
||||
fi
|
||||
echo -e "\n\n${GREEN}After reboot, visit ${url} and accept the self-signed certificate to finish setup.${DONE}\n"
|
||||
echo -e "\n\n${GREEN}Visit https://${ip} and accept the self-signed certificate to finish setup.${DONE}\n"
|
||||
|
||||
if [[ "${rebootServer}" == "true" ]]; then
|
||||
systemctl stop box mysql # sometimes mysql ends up having corrupt privilege tables
|
||||
@@ -252,7 +238,7 @@ if [[ "${rebootServer}" == "true" ]]; then
|
||||
read -p "The server has to be rebooted to apply all the settings. Reboot now ? [Y/n] " yn
|
||||
yn=${yn:-y}
|
||||
case $yn in
|
||||
[Yy]* ) exitHandler; systemctl reboot;;
|
||||
[Yy]* ) systemctl reboot;;
|
||||
* ) exit;;
|
||||
esac
|
||||
fi
|
||||
|
||||
@@ -8,15 +8,14 @@ set -eu -o pipefail
|
||||
PASTEBIN="https://paste.cloudron.io"
|
||||
OUT="/tmp/cloudron-support.log"
|
||||
LINE="\n========================================================\n"
|
||||
CLOUDRON_SUPPORT_PUBLIC_KEY="ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGWS+930b8QdzbchGljt3KSljH9wRhYvht8srrtQHdzg support@cloudron.io"
|
||||
CLOUDRON_SUPPORT_PUBLIC_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQVilclYAIu+ioDp/sgzzFz6YU0hPcRYY7ze/LiF/lC7uQqK062O54BFXTvQ3ehtFZCx3bNckjlT2e6gB8Qq07OM66De4/S/g+HJW4TReY2ppSPMVNag0TNGxDzVH8pPHOysAm33LqT2b6L/wEXwC6zWFXhOhHjcMqXvi8Ejaj20H1HVVcf/j8qs5Thkp9nAaFTgQTPu8pgwD8wDeYX1hc9d0PYGesTADvo6HF4hLEoEnefLw7PaStEbzk2fD3j7/g5r5HcgQQXBe74xYZ/1gWOX2pFNuRYOBSEIrNfJEjFJsqk3NR1+ZoMGK7j+AZBR4k0xbrmncQLcQzl6MMDzkp support@cloudron.io"
|
||||
HELP_MESSAGE="
|
||||
This script collects diagnostic information to help debug server related issues.
|
||||
This script collects diagnostic information to help debug server related issues
|
||||
|
||||
Options:
|
||||
--owner-login Login as owner
|
||||
--enable-ssh Enable SSH access for the Cloudron support team
|
||||
--reset-appstore-account Reset associated cloudron.io account
|
||||
--help Show this message
|
||||
--admin-login Login as administrator
|
||||
--enable-ssh Enable SSH access for the Cloudron support team
|
||||
--help Show this message
|
||||
"
|
||||
|
||||
# We require root
|
||||
@@ -27,7 +26,7 @@ fi
|
||||
|
||||
enableSSH="false"
|
||||
|
||||
args=$(getopt -o "" -l "help,enable-ssh,admin-login,owner-login,reset-appstore-account" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "help,enable-ssh,admin-login" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
@@ -35,23 +34,12 @@ while true; do
|
||||
--help) echo -e "${HELP_MESSAGE}"; exit 0;;
|
||||
--enable-ssh) enableSSH="true"; shift;;
|
||||
--admin-login)
|
||||
# fall through
|
||||
;&
|
||||
--owner-login)
|
||||
admin_username=$(mysql -NB -uroot -ppassword -e "SELECT username FROM box.users WHERE role='owner' AND username IS NOT NULL ORDER BY creationTime LIMIT 1" 2>/dev/null)
|
||||
admin_username=$(mysql -NB -uroot -ppassword -e "SELECT username FROM box.users WHERE role='owner' LIMIT 1" 2>/dev/null)
|
||||
admin_password=$(pwgen -1s 12)
|
||||
dashboard_domain=$(mysql -NB -uroot -ppassword -e "SELECT value FROM box.settings WHERE name='admin_fqdn'" 2>/dev/null)
|
||||
mysql -NB -uroot -ppassword -e "INSERT INTO box.settings (name, value) VALUES ('ghosts_config', '{\"${admin_username}\":\"${admin_password}\"}') ON DUPLICATE KEY UPDATE name='ghosts_config', value='{\"${admin_username}\":\"${admin_password}\"}'" 2>/dev/null
|
||||
echo "Login at https://${dashboard_domain} as ${admin_username} / ${admin_password} . This password may only be used once."
|
||||
exit 0
|
||||
;;
|
||||
--reset-appstore-account)
|
||||
echo -e "This will reset the Cloudron.io account associated with this Cloudron. Once reset, you can re-login with a different account in the Cloudron Dashboard. See https://docs.cloudron.io/appstore/#change-account for more information.\n"
|
||||
read -e -p "Reset the Cloudron.io account? [y/N] " choice
|
||||
[[ "$choice" != [Yy]* ]] && exit 1
|
||||
mysql -uroot -ppassword -e "DELETE FROM box.settings WHERE name='cloudron_token';" 2>/dev/null
|
||||
dashboard_domain=$(mysql -NB -uroot -ppassword -e "SELECT value FROM box.settings WHERE name='admin_fqdn'" 2>/dev/null)
|
||||
echo "Account reset. Please re-login at https://${dashboard_domain}/#/appstore"
|
||||
ghost_file=/home/yellowtent/platformdata/cloudron_ghost.json
|
||||
printf '{"%s":"%s"}\n' "${admin_username}" "${admin_password}" > "${ghost_file}"
|
||||
chown yellowtent:yellowtent "${ghost_file}" && chmod o-r,g-r "${ghost_file}"
|
||||
echo "Login as ${admin_username} / ${admin_password} . Remove ${ghost_file} when done."
|
||||
exit 0
|
||||
;;
|
||||
--) break;;
|
||||
@@ -66,7 +54,7 @@ if [[ "`df --output="avail" / | sed -n 2p`" -lt "10240" ]]; then
|
||||
echo ""
|
||||
df -h
|
||||
echo ""
|
||||
echo "To recover from a full disk, follow the guide at https://docs.cloudron.io/troubleshooting/#recovery-after-disk-full"
|
||||
echo "To recover from a full disk, follow the guide at https://cloudron.io/documentation/troubleshooting/#recovery-after-disk-full"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -77,39 +65,11 @@ if [[ "`df --output="avail" /tmp | sed -n 2p`" -lt "5120" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${enableSSH}" == "true" ]]; then
|
||||
ssh_port=$(cat /etc/ssh/sshd_config | grep "Port " | sed -e "s/.*Port //")
|
||||
|
||||
ssh_user="cloudron-support"
|
||||
keys_file="/home/cloudron-support/.ssh/authorized_keys"
|
||||
|
||||
echo -e $LINE"SSH"$LINE >> $OUT
|
||||
echo "Username: ${ssh_user}" >> $OUT
|
||||
echo "Port: ${ssh_port}" >> $OUT
|
||||
echo "Key file: ${keys_file}" >> $OUT
|
||||
|
||||
echo -n "Enabling ssh access for the Cloudron support team..."
|
||||
mkdir -p $(dirname "${keys_file}") # .ssh does not exist sometimes
|
||||
touch "${keys_file}" # required for concat to work
|
||||
if ! grep -q "${CLOUDRON_SUPPORT_PUBLIC_KEY}" "${keys_file}"; then
|
||||
echo -e "\n${CLOUDRON_SUPPORT_PUBLIC_KEY}" >> "${keys_file}"
|
||||
chmod 600 "${keys_file}"
|
||||
chown "${ssh_user}" "${keys_file}"
|
||||
fi
|
||||
|
||||
echo "Done"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -n "Generating Cloudron Support stats..."
|
||||
|
||||
# clear file
|
||||
rm -rf $OUT
|
||||
|
||||
echo -e $LINE"DASHBOARD DOMAIN"$LINE >> $OUT
|
||||
mysql -NB -uroot -ppassword -e "SELECT value FROM box.settings WHERE name='admin_fqdn'" &>> $OUT 2>/dev/null || true
|
||||
|
||||
echo -e $LINE"PROVIDER"$LINE >> $OUT
|
||||
cat /etc/cloudron/PROVIDER &>> $OUT || true
|
||||
|
||||
@@ -131,12 +91,12 @@ echo -e $LINE"Backup stats (possibly misleading)"$LINE >> $OUT
|
||||
du -hcsL /var/backups/* &>> $OUT || true
|
||||
|
||||
echo -e $LINE"System daemon status"$LINE >> $OUT
|
||||
systemctl status --lines=100 box mysql unbound cloudron-syslog nginx collectd docker &>> $OUT
|
||||
systemctl status --lines=100 cloudron.target box mysql unbound cloudron-syslog nginx collectd docker &>> $OUT
|
||||
|
||||
echo -e $LINE"Box logs"$LINE >> $OUT
|
||||
tail -n 100 /home/yellowtent/platformdata/logs/box.log &>> $OUT
|
||||
|
||||
echo -e $LINE"Interface Info"$LINE >> $OUT
|
||||
echo -e $LINE"Firewall chains"$LINE >> $OUT
|
||||
ip addr &>> $OUT
|
||||
|
||||
echo -e $LINE"Firewall chains"$LINE >> $OUT
|
||||
@@ -144,8 +104,40 @@ iptables -L &>> $OUT
|
||||
|
||||
echo "Done"
|
||||
|
||||
if [[ "${enableSSH}" == "true" ]]; then
|
||||
ssh_port=$(cat /etc/ssh/sshd_config | grep "Port " | sed -e "s/.*Port //")
|
||||
permit_root_login=$(grep -q ^PermitRootLogin.*yes /etc/ssh/sshd_config && echo "yes" || echo "no")
|
||||
|
||||
# support.js uses similar logic
|
||||
if $(grep -q "ec2\|lightsail\|ami" /etc/cloudron/PROVIDER); then
|
||||
ssh_user="ubuntu"
|
||||
keys_file="/home/ubuntu/.ssh/authorized_keys"
|
||||
else
|
||||
ssh_user="root"
|
||||
keys_file="/root/.ssh/authorized_keys"
|
||||
fi
|
||||
|
||||
echo -e $LINE"SSH"$LINE >> $OUT
|
||||
echo "Username: ${ssh_user}" >> $OUT
|
||||
echo "Port: ${ssh_port}" >> $OUT
|
||||
echo "PermitRootLogin: ${permit_root_login}" >> $OUT
|
||||
echo "Key file: ${keys_file}" >> $OUT
|
||||
|
||||
echo -n "Enabling ssh access for the Cloudron support team..."
|
||||
mkdir -p $(dirname "${keys_file}") # .ssh does not exist sometimes
|
||||
touch "${keys_file}" # required for concat to work
|
||||
if ! grep -q "${CLOUDRON_SUPPORT_PUBLIC_KEY}" "${keys_file}"; then
|
||||
echo -e "\n${CLOUDRON_SUPPORT_PUBLIC_KEY}" >> "${keys_file}"
|
||||
chmod 600 "${keys_file}"
|
||||
chown "${ssh_user}" "${keys_file}"
|
||||
fi
|
||||
|
||||
echo "Done"
|
||||
fi
|
||||
|
||||
echo -n "Uploading information..."
|
||||
paste_key=$(curl -X POST ${PASTEBIN}/documents --silent --data-binary "@$OUT" | python3 -c "import sys, json; print(json.load(sys.stdin)['key'])")
|
||||
# for some reason not using $(cat $OUT) will not contain newlines!?
|
||||
paste_key=$(curl -X POST ${PASTEBIN}/documents --silent -d "$(cat $OUT)" | python3 -c "import sys, json; print(json.load(sys.stdin)['key'])")
|
||||
echo "Done"
|
||||
|
||||
echo ""
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user