Compare commits

...

2 Commits

Author SHA1 Message Date
Girish Ramakrishnan
9bb4c8127e docker based tests 2024-09-22 11:13:26 +02:00
Girish Ramakrishnan
27f7bcd040 test: add simple gitlab-ci file 2024-09-21 09:46:38 +02:00
19 changed files with 107 additions and 130 deletions

24
.gitlab-ci.yml Normal file
View File

@@ -0,0 +1,24 @@
run_tests:
stage: test
image: cloudron/base:4.2.0@sha256:46da2fffb36353ef714f97ae8e962bd2c212ca091108d768ba473078319a47f4
services:
- name: mysql:8.0
alias: mysql
variables:
MYSQL_ROOT_PASSWORD: password
MYSQL_DATABASE: box
BOX_ENV: ci
DATABASE_URL: mysql://root:password@mysql/box
script:
- echo "Running tests..."
- mysql -hmysql -uroot -ppassword -e "ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'password';"
- mysql -hmysql -uroot -ppassword -e "CREATE DATABASE IF NOT EXISTS box"
- npm install
- node_modules/.bin/db-migrate up
- ln -s /usr/local/node-18.18.0/bin/node /usr/bin/node
- node_modules/.bin/mocha --no-timeouts --bail src/test/tokens-test.js
- echo "Done!"
stages:
- test

View File

@@ -2,40 +2,11 @@
set -eu
readonly source_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly DATA_DIR="${HOME}/.cloudron_test"
readonly DEFAULT_TESTS="./src/test/*-test.js ./src/routes/test/*-test.js"
! "${source_dir}/src/test/check-install" && exit 1
# cleanup old data dirs some of those docker container data requires sudo to be removed
echo "=> Provide root password to purge any leftover data in ${DATA_DIR} and load apparmor profile:"
sudo rm -rf ${DATA_DIR}
# archlinux does not have apparmor
if hash apparmor_parser 2>/dev/null; then
echo "=> Loading app armor profile"
sudo apparmor_parser --replace --write-cache ./setup/start/docker-cloudron-app.apparmor
fi
# create dir structure
mkdir -p ${DATA_DIR}
cd ${DATA_DIR}
mkdir -p appsdata
mkdir -p boxdata/box boxdata/mail boxdata/certs boxdata/mail/dkim/localhost boxdata/mail/dkim/foobar.com
mkdir -p platformdata/addons/mail/banner platformdata/nginx/cert platformdata/nginx/applications/dashboard platformdata/collectd/collectd.conf.d platformdata/addons platformdata/logrotate.d platformdata/backup platformdata/logs/tasks platformdata/sftp/ssh platformdata/firewall platformdata/update
sudo mkdir -p /mnt/cloudron-test-music /media/cloudron-test-music # volume test
# put cert
echo "=> Generating a localhost selfsigned cert"
openssl req -x509 -newkey rsa:2048 -keyout platformdata/nginx/cert/host.key -out platformdata/nginx/cert/host.cert -days 3650 -subj '/CN=localhost' -nodes -config <(cat /etc/ssl/openssl.cnf <(printf "\n[SAN]\nsubjectAltName=DNS:*.localhost"))
cd "${source_dir}"
# clear out any containers if FAST is unset
if [[ -z ${FAST+x} ]]; then
echo "=> Delete all docker containers first"
docker ps -qa --filter "label=isCloudronManaged" | xargs --no-run-if-empty docker rm -f
echo "=> Delete mysql server"
docker rm -f mysql-server
echo "==> To skip this run with: FAST=1 ./run-tests"
else
@@ -62,11 +33,8 @@ while ! mysqladmin ping -h"${MYSQL_IP}" --silent; do
sleep 1
done
echo "=> Ensure local base image"
docker pull cloudron/base:4.2.0@sha256:46da2fffb36353ef714f97ae8e962bd2c212ca091108d768ba473078319a47f4
echo "=> Create iptables blocklist"
sudo ipset create cloudron_blocklist hash:net || true
# echo "=> Create iptables blocklist"
# sudo ipset create cloudron_blocklist hash:net || true
echo "=> Ensure database"
mysql -h"${MYSQL_IP}" -uroot -ppassword -e "ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'password';"
@@ -80,5 +48,6 @@ if [[ $# -gt 0 ]]; then
TESTS="$*"
fi
echo "=> Run tests with mocha"
BOX_ENV=test ./node_modules/.bin/mocha --bail --no-timeouts --exit -R spec ${TESTS}
echo "=> Run tests"
docker run -e BOX_ENV=test -e DATABASE_HOSTNAME=${MYSQL_IP} -v `pwd`:/home/yellowtent/box:ro -v `which node`:/usr/bin/node:ro -v /var/run/docker.sock:/var/run/docker.sock -t cloudron/boxtest node_modules/.bin/mocha --bail --no-timeouts --colors --exit -R spec src/test

View File

@@ -90,7 +90,7 @@ async function detectMetaInfo(applink) {
});
const [jsdomError, dom] = await safe(jsdom.JSDOM.fromURL(applink.upstreamUri, { virtualConsole }));
if (jsdomError) console.error('detectMetaInfo: jsdomError', jsdomError);
if (jsdomError) debug('detectMetaInfo: jsdomError', jsdomError);
if (!applink.icon && dom) {
let favicon = '';

View File

@@ -25,7 +25,7 @@ const assert = require('assert'),
let gConnectionPool = null;
const gDatabase = {
hostname: '127.0.0.1',
hostname: constants.TEST ? process.env.DATABASE_HOSTNAME : '127.0.0.1',
username: 'root',
password: 'password',
port: 3306,
@@ -35,11 +35,6 @@ const gDatabase = {
async function initialize() {
if (gConnectionPool !== null) return;
if (constants.TEST) {
// see setupTest script how the mysql-server is run
gDatabase.hostname = require('child_process').execSync('docker inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" mysql-server').toString().trim();
}
// https://github.com/mysqljs/mysql#pool-options
gConnectionPool = mysql.createPool({
connectionLimit: 5,

View File

@@ -118,6 +118,8 @@ function process(req, res, next) {
async function start() {
assert(gHttpServer === null, 'Already started');
if (constants.TEST) return;
const json = express.json({ strict: true });
// we protect container create as the app/admin can otherwise mount random paths (like the ghost file)

View File

@@ -4,10 +4,7 @@ const constants = require('./constants.js'),
path = require('path');
function baseDir() {
const homeDir = process.env.HOME;
if (constants.CLOUDRON) return homeDir;
if (constants.TEST) return path.join(homeDir, '.cloudron_test');
// cannot reach
return process.env.HOME;
}
// keep these values in sync with start.sh

View File

@@ -46,11 +46,18 @@ fi
# DEBUG has to be hardcoded because it is not set in the tests. --setenv is required for ubuntu 16 (-E does not work)
# NODE_OPTIONS is used because env -S does not work in ubuntu 16/18.
# it seems systemd-run does not return the exit status of the process despite --wait
if ! systemd-run --unit "${service_name}" --nice "${nice}" --uid=${id} --gid=${id} ${options} --setenv HOME=${HOME} --setenv USER=${SUDO_USER} --setenv DEBUG=box:* --setenv BOX_ENV=${BOX_ENV} --setenv NODE_ENV=production --setenv NODE_OPTIONS=--unhandled-rejections=strict --setenv AWS_SDK_JS_SUPPRESS_MAINTENANCE_MODE_MESSAGE=1 "${task_worker}" "${task_id}" "${logfile}"; then
echo "Service ${service_name} failed to run" # this only happens if the path to task worker itself is wrong
fi
if [[ "$CI" == "1" ]]; then
if ! DEBUG=box:* NODE_ENV=production NODE_OPTIONS=--unhandled-rejections=strict gosu $id:$id "${task_worker}" "${task_id}" "${logfile}"; then
echo "Service ${service_name} failed to run" # this only happens if the path to task worker itself is wrong
fi
exit_code=$?
else
if ! systemd-run --unit "${service_name}" --nice "${nice}" --uid=${id} --gid=${id} ${options} --setenv HOME=${HOME} --setenv USER=${SUDO_USER} --setenv DEBUG=box:* --setenv BOX_ENV=${BOX_ENV} --setenv NODE_ENV=production --setenv NODE_OPTIONS=--unhandled-rejections=strict --setenv AWS_SDK_JS_SUPPRESS_MAINTENANCE_MODE_MESSAGE=1 "${task_worker}" "${task_id}" "${logfile}"; then
echo "Service ${service_name} failed to run" # this only happens if the path to task worker itself is wrong
fi
exit_code=$(systemctl show "${service_name}" -p ExecMainCode | sed 's/ExecMainCode=//g')
exit_code=$(systemctl show "${service_name}" -p ExecMainCode | sed 's/ExecMainCode=//g')
fi
echo "Service ${service_name} finished with exit code ${exit_code}"
exit "${exit_code}"

View File

@@ -111,10 +111,10 @@ function sudo(tag, args, options, callback) {
cp.stdout.on('data', (data) => {
if (options.captureStdout) stdoutResult += data.toString('utf8');
if (!options.quiet) process.stdout.write(data); // do not use debug to avoid double timestamps when calling backupupload.js
if (!options.quiet) process.stdout.write(data + '\r'); // do not use debug to avoid double timestamps when calling backupupload.js
});
cp.stderr.on('data', (data) => {
process.stderr.write(data); // do not use debug to avoid double timestamps when calling backupupload.js
process.stderr.write(data + '\r'); // do not use debug to avoid double timestamps when calling backupupload.js
});
cp.on('exit', function (code, signal) {

View File

@@ -81,6 +81,7 @@ async function upload(apiConfig, backupFilePath) {
return {
stream: fs.createWriteStream(backupFilePath, { autoClose: true }),
async finish() {
console.log('OK CHOWNNIG!!!!!', process.env.SUDO_UID, process.getuid());
const backupUid = parseInt(process.env.SUDO_UID, 10) || process.getuid(); // in test, upload() may or may not be called via sudo script
if (hasChownSupportSync(apiConfig)) {
if (!safe.fs.chownSync(backupFilePath, backupUid, backupUid)) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to chown ${backupFilePath}: ${safe.error.message}`);

View File

@@ -97,7 +97,7 @@ async function getDisks() {
const DISK_TYPES = [ 'ext4', 'xfs', 'cifs', 'nfs', 'fuse.sshfs' ]; // we don't show size of contents in untracked disk types
for (const disk of dfEntries) {
if (!DISK_TYPES.includes(disk.type)) continue;
if (!DISK_TYPES.includes(disk.type) && disk.mountpoint !== '/') continue;
if (disk.mountpoint === '/') rootDisk = disk;
disks[disk.filesystem] = {
filesystem: disk.filesystem,

View File

@@ -169,7 +169,7 @@ function startTask(id, options, onTaskFinished) {
let killTimerId = null, timedOut = false;
const sudoOptions = { preserveEnv: true, logStream: null };
if (constants.TEST) sudoOptions.logStream = fs.createWriteStream('/dev/null'); // without this output is messed up, not sure why
//if (constants.TEST) sudoOptions.logStream = fs.createWriteStream('/dev/null'); // without this output is messed up, not sure why
gTasks[id] = shell.sudo('startTask', [ START_TASK_CMD, id, logFile, options.nice || 0, options.memoryLimit || 400, options.oomScoreAdjust || 0 ], sudoOptions, async function (sudoError) {
if (!gTasks[id]) return; // ignore task exit since we are shutting down. see stopAllTasks

View File

@@ -14,6 +14,8 @@ const applinks = require('../applinks.js'),
describe('Applinks', function () {
const { setup, cleanup } = common;
this.timeout(10000);
before(setup);
after(cleanup);
@@ -30,7 +32,7 @@ describe('Applinks', function () {
};
const APPLINK_2 = {
upstreamUri: 'https://google.com'
upstreamUri: 'https://google.de'
};
const APPLINK_3 = {

View File

@@ -1,56 +0,0 @@
#!/bin/bash
set -eu
readonly source_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
readonly sudo_scripts_dir="${source_dir}/src/scripts"
if [[ ! -f /usr/bin/node ]]; then
echo "node is not in root user's environment. '/usr/bin/env node' will not work"
exit 1
fi
# checks if all scripts are sudo access
readarray -d '' scripts < <(find ${sudo_scripts_dir} -type f -print0)
declare -a missing_scripts=()
for script in "${scripts[@]}"; do
# sudo -k ignores a cached sudo session for the command
if [[ $(sudo -k -n "${script}" --check 2>/dev/null) != "OK" ]]; then
missing_scripts+=("${script}")
fi
done
if [[ ${#missing_scripts[@]} -gt 0 ]]; then
echo "The following script(s) have no sudo access: ${missing_scripts[*]} . Try 'sudo -n ${missing_scripts[0]} --check'"
echo -e "\nYou have to add the lines below to /etc/sudoers.d/yellowtent\n\n"
for missing_script in "${missing_scripts[@]}"; do
echo "Defaults!${missing_script} env_keep=\"HOME BOX_ENV\""
echo "${USER} ALL=(ALL) NOPASSWD: ${missing_script}"
echo ""
done
exit 1
fi
setenv_scripts=(starttask.sh backupupload.js)
for script in "${setenv_scripts[@]}"; do
if ! grep -q ":SETENV:.*${script}" "/etc/sudoers.d/yellowtent"; then
echo "SETENV missing for ${script} in /etc/sudoers.d/yellowtent"
exit 1
fi
done
if ! grep -q "backupupload.js closefrom_override" "/etc/sudoers.d/yellowtent"; then
echo "backupupload.js needs closefrom_override in /etc/sudoers.d/yellowtent"
exit 1
fi
images=$(node -e "const i = require('${source_dir}/src/infra_version.js'); console.log(Object.keys(i.images).map(x => i.images[x]).join(' '));")
for image in ${images}; do
if ! docker inspect "${image}" >/dev/null 2>/dev/null; then
docker pull ${image%@sha256:*}
fi
done

View File

@@ -11,7 +11,7 @@ const BoxError = require('../boxerror.js'),
expect = require('expect.js'),
safe = require('safetydance');
describe('Settings', function () {
describe('Cloudron', function () {
const { setup, cleanup } = common;
before(setup);

View File

@@ -17,20 +17,14 @@ describe('System', function () {
after(cleanup);
it('can get disks', async function () {
// does not work on archlinux 8!
if (require('child_process').execSync('uname -a').toString().indexOf('-arch') !== -1) return;
const disks = await df.disks();
expect(disks).to.be.ok();
expect(disks.some(d => d.mountpoint === '/')).to.be.ok();
});
it('can get file', async function () {
// does not work on archlinux 8!
if (require('child_process').execSync('uname -a').toString().indexOf('-arch') !== -1) return;
const disks = await df.file(__dirname);
expect(disks).to.be.ok();
expect(disks.mountpoint).to.be('/home');
expect(disks.mountpoint).to.be('/home/yellowtent/box');
});
});

View File

@@ -17,27 +17,18 @@ describe('System', function () {
after(cleanup);
it('can get disks', async function () {
// does not work on archlinux 8!
if (require('child_process').execSync('uname -a').toString().indexOf('-arch') !== -1) return;
const disks = await system.getDisks();
expect(disks).to.be.ok();
expect(Object.keys(disks).some(fs => disks[fs].mountpoint === '/')).to.be.ok();
});
it('can get swaps', async function () {
// does not work on archlinux 8!
if (require('child_process').execSync('uname -a').toString().indexOf('-arch') !== -1) return;
const swaps = await system.getSwaps();
expect(swaps).to.be.ok();
expect(Object.keys(swaps).some(n => swaps[n].type === 'partition' || swaps[n].type === 'file')).to.be.ok();
});
it('can check for disk space', async function () {
// does not work on archlinux 8!
if (require('child_process').execSync('uname -a').toString().indexOf('-arch') !== -1) return;
await system.checkDiskSpace();
});

33
test/Dockerfile Normal file
View File

@@ -0,0 +1,33 @@
FROM ubuntu:jammy-20230816@sha256:b492494d8e0113c4ad3fe4528a4b5ff89faa5331f7d52c5c138196f69ce176a6
RUN apt update && \
apt install -y openssl mysql-client-8.0 sudo lsb-release vim gosu curl
RUN useradd --system --uid 808 --comment "Cloudron Box" --create-home --shell /usr/bin/bash yellowtent
RUN mkdir -p /mnt/cloudron-test-music /media/cloudron-test-music # volume test
# https://download.docker.com/linux/static/stable/x86_64/
RUN cd /usr/bin && curl -L https://download.docker.com/linux/static/stable/x86_64/docker-25.0.5.tgz | tar -zxvf - --strip-components=1 docker/docker
COPY setup/start/sudoers /etc/sudoers.d/cloudron
COPY test/cloak /usr/bin/cloak
RUN ln -s /usr/bin/cloak /usr/bin/systemd-run && \
ln -s /usr/bin/cloak /usr/bin/systemctl
COPY test/entrypoint.sh /usr/bin/entrypoint.sh
WORKDIR /home/yellowtent
USER yellowtent
RUN mkdir -p appsdata
RUN mkdir -p boxdata/box boxdata/mail boxdata/certs boxdata/mail/dkim/localhost boxdata/mail/dkim/foobar.com
RUN mkdir -p platformdata/addons/mail/banner platformdata/nginx/cert platformdata/nginx/applications/dashboard platformdata/collectd/collectd.conf.d platformdata/addons platformdata/logrotate.d platformdata/backup platformdata/logs/tasks platformdata/sftp/ssh platformdata/firewall platformdata/update
RUN bash -c 'openssl req -x509 -newkey rsa:2048 -keyout platformdata/nginx/cert/host.key -out platformdata/nginx/cert/host.cert -days 3650 -subj '/CN=localhost' -nodes -config <(cat /etc/ssl/openssl.cnf <(printf "\n[SAN]\nsubjectAltName=DNS:*.localhost"))'
WORKDIR /home/yellowtent/box
USER root
ENTRYPOINT [ "/usr/bin/entrypoint.sh" ]

8
test/cloak Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/bash
set -eu
cmd=$(basename $BASH_SOURCE)
# echo $cmd "$@"

10
test/entrypoint.sh Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/bash
set -eu
docker_gid=$(stat -c "%g" /run/docker.sock)
addgroup --gid ${docker_gid} --system docker
usermod -aG docker yellowtent
exec su yellowtent --command "$@"