Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 700a7637b6 | |||
| feb61c27d9 | |||
| 31d742fa67 | |||
| dd5737f948 | |||
| 50d7610bfd | |||
| e51dd8f530 | |||
| bad6e39d59 |
@@ -2289,3 +2289,12 @@
|
||||
[6.3.2]
|
||||
* Avatar was migrated as base64 instead of binary
|
||||
* Fix issue where filemanager came up empty for CIFS mounts
|
||||
|
||||
[6.3.3]
|
||||
* volumes: add filesystem volume type for shared folders
|
||||
* mail: enable sieve extension editheader
|
||||
* mail: update solr to 8.9.0
|
||||
|
||||
[6.3.4]
|
||||
* Fix issue where old nginx configs where not removed before upgrade
|
||||
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
safe = require('safetydance');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM volumes', function (error, volumes) {
|
||||
if (error || volumes.length === 0) return callback(error);
|
||||
|
||||
async.eachSeries(volumes, function (volume, iteratorDone) {
|
||||
if (volume.mountType !== 'noop') return iteratorDone();
|
||||
|
||||
let mountType;
|
||||
if (safe.child_process.execSync(`mountpoint -q -- ${volume.hostPath}`)) {
|
||||
mountType = 'mountpoint';
|
||||
} else {
|
||||
mountType = 'filesystem';
|
||||
}
|
||||
db.runSql('UPDATE volumes SET mountType=? WHERE id=?', [ mountType, volume.id ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
Generated
+3
-3
@@ -705,9 +705,9 @@
|
||||
}
|
||||
},
|
||||
"connect-lastmile": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/connect-lastmile/-/connect-lastmile-2.1.0.tgz",
|
||||
"integrity": "sha512-nLV3loAO+1N5GK8OmwbNMEhObgrhNwn9qR1j3tgjfM63BPru5badZYxzQ5qsOP/MiXteFJCmRpga1IH8UV6JgQ==",
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/connect-lastmile/-/connect-lastmile-2.1.1.tgz",
|
||||
"integrity": "sha512-723vDmZuy6KBUAmuXff1mb+l9ZMs+JqXJuAGHgWNI3fNYAu9DKXC+GYdxqY0+9oMXyVJNf5AscoONcq9Nqb0Ig==",
|
||||
"requires": {
|
||||
"underscore": "^1.13.1"
|
||||
}
|
||||
|
||||
+1
-2
@@ -20,7 +20,7 @@
|
||||
"body-parser": "^1.19.0",
|
||||
"cloudron-manifestformat": "^5.10.2",
|
||||
"connect": "^3.7.0",
|
||||
"connect-lastmile": "^2.1.0",
|
||||
"connect-lastmile": "^2.1.1",
|
||||
"connect-timeout": "^1.9.0",
|
||||
"cookie-parser": "^1.4.5",
|
||||
"cookie-session": "^1.4.0",
|
||||
@@ -30,7 +30,6 @@
|
||||
"debug": "^4.3.1",
|
||||
"delay": "^5.0.0",
|
||||
"dockerode": "^3.3.0",
|
||||
"delay": "^5.0.0",
|
||||
"ejs": "^3.1.6",
|
||||
"ejs-cli": "^2.2.1",
|
||||
"express": "^4.17.1",
|
||||
|
||||
@@ -37,7 +37,7 @@ while true; do
|
||||
# fall through
|
||||
;&
|
||||
--owner-login)
|
||||
admin_username=$(mysql -NB -uroot -ppassword -e "SELECT username FROM box.users WHERE role='owner' AND username IS NOT NULL ORDER BY createdAt LIMIT 1" 2>/dev/null)
|
||||
admin_username=$(mysql -NB -uroot -ppassword -e "SELECT username FROM box.users WHERE role='owner' AND username IS NOT NULL ORDER BY creationTime LIMIT 1" 2>/dev/null)
|
||||
admin_password=$(pwgen -1s 12)
|
||||
dashboard_domain=$(mysql -NB -uroot -ppassword -e "SELECT value FROM box.settings WHERE name='admin_fqdn'" 2>/dev/nul)
|
||||
ghost_file=/home/yellowtent/platformdata/cloudron_ghost.json
|
||||
|
||||
+48
-15
@@ -15,6 +15,48 @@ function log() {
|
||||
echo -e "$(date +'%Y-%m-%dT%H:%M:%S')" "==> installer: $1"
|
||||
}
|
||||
|
||||
apt_ready="no"
|
||||
function prepare_apt_once() {
|
||||
[[ "${apt_ready}" == "yes" ]] && return
|
||||
|
||||
log "Making sure apt is in a good state"
|
||||
|
||||
log "Waiting for all dpkg tasks to finish..."
|
||||
while fuser /var/lib/dpkg/lock; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# it's unclear what needs to be run first or whether both these command should be run. so keep trying both
|
||||
for count in {1..3}; do
|
||||
# alternative to apt-install -y --fix-missing ?
|
||||
if ! dpkg --force-confold --configure -a; then
|
||||
log "dpkg reconfigure failed (try $count)"
|
||||
dpkg_configure="no"
|
||||
else
|
||||
dpkg_configure="yes"
|
||||
fi
|
||||
|
||||
if ! apt update -y; then
|
||||
log "apt update failed (try $count)"
|
||||
apt_update="no"
|
||||
else
|
||||
apt_update="yes"
|
||||
fi
|
||||
|
||||
[[ "${dpkg_configure}" == "yes" && "${apt_update}" == "yes" ]] && break
|
||||
|
||||
sleep 1
|
||||
done
|
||||
|
||||
apt_ready="yes"
|
||||
|
||||
if [[ "${dpkg_configure}" == "yes" && "${apt_update}" == "yes" ]]; then
|
||||
log "apt is ready"
|
||||
else
|
||||
log "apt is not ready but proceeding anyway"
|
||||
fi
|
||||
}
|
||||
|
||||
readonly user=yellowtent
|
||||
readonly box_src_dir=/home/${user}/box
|
||||
|
||||
@@ -38,21 +80,7 @@ if [[ $(docker version --format {{.Client.Version}}) != "${docker_version}" ]];
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
|
||||
|
||||
log "Waiting for all dpkg tasks to finish..."
|
||||
while fuser /var/lib/dpkg/lock; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
while ! dpkg --force-confold --configure -a; do
|
||||
log "Failed to fix packages. Retry"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# the latest docker might need newer packages
|
||||
while ! apt update -y; do
|
||||
log "Failed to update packages. Retry"
|
||||
sleep 1
|
||||
done
|
||||
prepare_apt_once
|
||||
|
||||
while ! apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb; do
|
||||
log "Failed to install docker. Retry"
|
||||
@@ -66,6 +94,9 @@ readonly nginx_version=$(nginx -v 2>&1)
|
||||
if [[ "${nginx_version}" != *"1.18."* ]]; then
|
||||
log "installing nginx 1.18"
|
||||
$curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
|
||||
|
||||
prepare_apt_once
|
||||
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes /tmp/nginx.deb
|
||||
rm /tmp/nginx.deb
|
||||
@@ -73,11 +104,13 @@ fi
|
||||
|
||||
if ! which mount.nfs; then
|
||||
log "installing nfs-common"
|
||||
prepare_apt_once
|
||||
apt install -y nfs-common
|
||||
fi
|
||||
|
||||
if ! which sshfs; then
|
||||
log "installing sshfs"
|
||||
prepare_apt_once
|
||||
apt install -y sshfs
|
||||
fi
|
||||
|
||||
|
||||
@@ -45,6 +45,8 @@ function initialize(callback) {
|
||||
// https://github.com/mysqljs/mysql#pool-options
|
||||
gConnectionPool = mysql.createPool({
|
||||
connectionLimit: 5,
|
||||
acquireTimeout: 60000,
|
||||
connectTimeout: 60000,
|
||||
host: gDatabase.hostname,
|
||||
user: gDatabase.username,
|
||||
password: gDatabase.password,
|
||||
|
||||
@@ -20,7 +20,7 @@ exports = module.exports = {
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:4.0.6@sha256:e583082e15e8e41b0e3b80c3efc917ec429f19fa08a19e14fc27144a8bfe446a' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:4.0.2@sha256:9df297ccc3370f38c54f8d614e214e082b363777cd1c6c9522e29663cc8f5362' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.0.3@sha256:37e5222e01ae89bc5a742ce12030631de25a127b5deec8a0e992c68df0fdec10' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.3.2@sha256:d0d153612f478a0ef099809d4c3c72c3e02f43a55a796987d922f16367e7881e' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.3.3@sha256:b1093e6f38bebf4a9ae903ca385aea3a32e7cccae5ede7f2e01a34681e361a5f' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:3.0.1@sha256:bed9f6b5d06fe2c5289e895e806cfa5b74ad62993d705be55d4554a67d128029' },
|
||||
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.3.0@sha256:183c11150d5a681cb02f7d2bd542ddb8a8f097422feafb7fac8fdbca0ca55d47' }
|
||||
}
|
||||
|
||||
+9
-8
@@ -27,8 +27,8 @@ function validateMountOptions(type, options) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
switch (type) {
|
||||
case 'noop': // volume provider
|
||||
case 'mountpoint': // backup provider
|
||||
case 'filesystem':
|
||||
case 'mountpoint':
|
||||
return null;
|
||||
case 'cifs':
|
||||
if (typeof options.username !== 'string') return new BoxError(BoxError.BAD_FIELD, 'username is not a string');
|
||||
@@ -88,8 +88,8 @@ function renderMountFile(volume) {
|
||||
options = `allow_other,port=${mountOptions.port},IdentityFile=${keyFilePath},StrictHostKeyChecking=no,reconnect`; // allow_other means non-root users can access it
|
||||
break;
|
||||
}
|
||||
case 'noop': // volume provider
|
||||
case 'mountpoint': // backup provider
|
||||
case 'filesystem':
|
||||
case 'mountpoint':
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -115,9 +115,10 @@ async function getStatus(mountType, hostPath) {
|
||||
assert.strictEqual(typeof mountType, 'string');
|
||||
assert.strictEqual(typeof hostPath, 'string');
|
||||
|
||||
if (mountType === 'noop' || mountType === 'mountpoint') { // noop is from volume provider and mountpoint is from backup provider
|
||||
safe.child_process.execSync(`mountpoint -q -- ${hostPath}`, { encoding: 'utf8' });
|
||||
if (!safe.error) {
|
||||
if (mountType === 'filesystem') return { state: 'active', message: 'Mounted' };
|
||||
|
||||
if (mountType === 'mountpoint') {
|
||||
if (safe.child_process.execSync(`mountpoint -q -- ${hostPath}`)) {
|
||||
return { state: 'active', message: 'Mounted' };
|
||||
} else {
|
||||
return { state: 'inactive', message: 'Not mounted' };
|
||||
@@ -148,7 +149,7 @@ async function tryAddMount(volume, options) {
|
||||
assert.strictEqual(typeof volume, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
if (volume.mountType === 'noop' || volume.mountType === 'mountpoint') return; // noop is from volume provider and mountpoint is from backup provider
|
||||
if (volume.mountType === 'mountpoint') return;
|
||||
|
||||
if (constants.TEST) return;
|
||||
|
||||
|
||||
+4
-1
@@ -788,8 +788,11 @@ function checkCerts(options, auditSource, progressCallback, callback) {
|
||||
}
|
||||
|
||||
function removeAppConfigs() {
|
||||
const dashboardConfigFilename = `${settings.dashboardFqdn()}.conf`;
|
||||
|
||||
// remove all configs which are not the default or current dashboard
|
||||
for (let appConfigFile of fs.readdirSync(paths.NGINX_APPCONFIG_DIR)) {
|
||||
if (appConfigFile !== constants.NGINX_DEFAULT_CONFIG_FILE_NAME && !appConfigFile.startsWith(constants.DASHBOARD_LOCATION)) {
|
||||
if (appConfigFile !== constants.NGINX_DEFAULT_CONFIG_FILE_NAME && appConfigFile !== dashboardConfigFilename) {
|
||||
fs.unlinkSync(path.join(paths.NGINX_APPCONFIG_DIR, appConfigFile));
|
||||
}
|
||||
}
|
||||
|
||||
+5
-6
@@ -61,7 +61,7 @@ function validateHostPath(hostPath, mountType) {
|
||||
|
||||
if (!allowedPaths.some(p => hostPath.startsWith(p))) return new BoxError(BoxError.BAD_FIELD, 'hostPath must be under /mnt, /media, /opt or /srv', { field: 'hostPath' });
|
||||
|
||||
if (!constants.TEST && mountType === 'noop') { // we expect user to have already mounted this
|
||||
if (!constants.TEST) { // we expect user to have already mounted this
|
||||
const stat = safe.fs.lstatSync(hostPath);
|
||||
if (!stat) return new BoxError(BoxError.BAD_FIELD, 'mount point does not exist. Please create it on the server first', { field: 'hostPath' });
|
||||
if (!stat.isDirectory()) return new BoxError(BoxError.BAD_FIELD, 'mount point is not a directory', { field: 'hostPath' });
|
||||
@@ -84,15 +84,14 @@ async function add(volume, auditSource) {
|
||||
|
||||
const id = uuid.v4().replace(/-/g, ''); // to make systemd mount file names more readable
|
||||
|
||||
if (mountType === 'noop') {
|
||||
if (mountType === 'mountpoint' || mountType === 'filesystem') {
|
||||
error = validateHostPath(volume.hostPath, mountType);
|
||||
if (error) throw error;
|
||||
} else {
|
||||
volume.hostPath = path.join(paths.VOLUMES_MOUNT_DIR, id);
|
||||
await mounts.tryAddMount(volume, { timeout: 10 }); // 10 seconds
|
||||
}
|
||||
|
||||
if (volume.mountType !== 'noop') await mounts.tryAddMount(volume, { timeout: 10 }); // 10 seconds
|
||||
|
||||
try {
|
||||
await database.query('INSERT INTO volumes (id, name, hostPath, mountType, mountOptionsJson) VALUES (?, ?, ?, ?, ?)', [ id, name, volume.hostPath, mountType, JSON.stringify(mountOptions) ]);
|
||||
} catch (error) {
|
||||
@@ -103,7 +102,7 @@ async function add(volume, auditSource) {
|
||||
}
|
||||
|
||||
eventlog.add(eventlog.ACTION_VOLUME_ADD, auditSource, { id, name, hostPath: volume.hostPath });
|
||||
// in theory, we only need to do this noop volumes. but for some reason a restart is required to detect new "mounts"
|
||||
// in theory, we only need to do this mountpoint volumes. but for some reason a restart is required to detect new "mounts"
|
||||
services.rebuildService('sftp', NOOP_CALLBACK);
|
||||
|
||||
const collectdConf = ejs.render(COLLECTD_CONFIG_EJS, { volumeId: id, hostPath: volume.hostPath });
|
||||
@@ -159,7 +158,7 @@ async function del(volume, auditSource) {
|
||||
|
||||
eventlog.add(eventlog.ACTION_VOLUME_REMOVE, auditSource, { volume });
|
||||
|
||||
if (volume.mountType === 'noop') {
|
||||
if (volume.mountType === 'mountpoint' || volume.mountType === 'filesystem') {
|
||||
services.rebuildService('sftp', NOOP_CALLBACK);
|
||||
} else {
|
||||
await safe(mounts.removeMount(volume));
|
||||
|
||||
Reference in New Issue
Block a user