Files
cloudron-box/src/database.js
T
Girish Ramakrishnan 96dc79cfe6 Migrate codebase from CommonJS to ES Modules
- Convert all require()/module.exports to import/export across 260+ files
- Add "type": "module" to package.json to enable ESM by default
- Add migrations/package.json with "type": "commonjs" to keep db-migrate compatible
- Convert eslint.config.js to ESM with sourceType: "module"
- Replace __dirname/__filename with import.meta.dirname/import.meta.filename
- Replace require.main === module with process.argv[1] === import.meta.filename
- Remove 'use strict' directives (implicit in ESM)
- Convert dynamic require() in switch statements to static import lookup maps
  (dns.js, domains.js, backupformats.js, backupsites.js, network.js)
- Extract self-referencing exports.CONSTANT patterns into standalone const
  declarations (apps.js, services.js, locks.js, users.js, mail.js, etc.)
- Lazify SERVICES object in services.js to avoid circular dependency TDZ issues
- Add clearMailQueue() to mailer.js for ESM-safe queue clearing in tests
- Add _setMockApp() to ldapserver.js for ESM-safe test mocking
- Add _setMockResolve() wrapper to dig.js for ESM-safe DNS mocking in tests
- Convert backupupload.js to use dynamic imports so --check exits before
  loading the module graph (which requires BOX_ENV)
- Update check-install to use ESM import for infra_version.js
- Convert scripts/ (hotfix, release, remote_hotfix.js, find-unused-translations)
- All 1315 tests passing

Migration stats (AI-assisted using Cursor with Claude):
- Wall clock time: ~3-4 hours
- Assistant completions: ~80-100
- Estimated token usage: ~1-2M tokens

Co-authored-by: Cursor <cursoragent@cursor.com>
2026-02-14 15:11:45 +01:00

173 lines
6.9 KiB
JavaScript

import assert from 'node:assert';
import BoxError from './boxerror.js';
import constants from './constants.js';
import debugModule from 'debug';
import { execSync } from 'node:child_process';
import mysql from 'mysql2/promise';
import safe from 'safetydance';
import shellModule from './shell.js';
const debug = debugModule('box:database');
const shell = shellModule('database');
const _clear = clear;
export {
initialize,
uninitialize,
query,
transaction,
runInTransaction,
importFromFile,
exportToFile,
_clear,
};
let gConnectionPool = null;
const gDatabase = {
hostname: '127.0.0.1',
username: 'root',
password: 'password',
port: 3306,
name: 'box'
};
async function initialize() {
if (gConnectionPool !== null) return;
if (constants.TEST) {
// see setupTest script how the mysql-server is run
gDatabase.hostname = execSync('docker inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" mysql-server').toString().trim();
}
// https://github.com/mysqljs/mysql#pool-options
gConnectionPool = mysql.createPool({
connectionLimit: 5,
connectTimeout: 60000,
host: gDatabase.hostname,
user: gDatabase.username,
password: gDatabase.password,
port: gDatabase.port,
database: gDatabase.name,
multipleStatements: false,
waitForConnections: true, // getConnection() will wait until a connection is avaiable
ssl: false,
timezone: 'Z', // mysql follows the SYSTEM timezone. on Cloudron, this is UTC
charset: 'utf8mb4',
jsonStrings: true, // for JSON types, JSONARRAYAGG will return string instead of JSON
});
// run one time setup commands on new connections. connections are reused and so we cannot use 'acquire' event of the pool
gConnectionPool.on('connection', async function (connection) { // https://github.com/sidorares/node-mysql2/issues/565, 567
const conn = connection.promise(); // convert PoolConnection to PromisePoolConnection
try {
// await connection.query('SET NAMES utf8mb4 COLLATE utf8mb4_bin');
await conn.query('SET SESSION sql_mode = \'strict_all_tables\''); // disable type coercion etc
// GROUP_CONCAT has only 1024 default. we use it in the groups API which doesn't support pagination yet
// a crypto.randomUUID is 36 in length. so the value below provides for roughly 10k users
await conn.query('SET SESSION group_concat_max_len = 360000');
} catch (error) {
debug(`failed to init new db connection ${connection.threadId}:`, error); // only log. we will let the app handle the exception when it calls query()/transaction()
}
});
}
async function uninitialize() {
if (!gConnectionPool) return;
await safe(gConnectionPool.end(), { debug });
gConnectionPool = null;
debug('pool closed');
}
async function clear() {
const tables = await query('SELECT table_name FROM information_schema.tables WHERE table_schema = ? AND table_name != ?', [ 'box', 'migrations' ]);
const queries = [{ query: 'SET FOREIGN_KEY_CHECKS = 0' }];
for (const t of tables) queries.push({ query: `TRUNCATE TABLE ${t.TABLE_NAME}` });
queries.push({ query: 'SET FOREIGN_KEY_CHECKS = 1' }); // this is a session/connection variable, must be reset back
await transaction(queries);
}
async function query(...args) {
assert.notStrictEqual(gConnectionPool, null, 'Database connection is already closed');
const [error, result] = await safe(gConnectionPool.query(...args)); // this is same as getConnection/query/release
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error, { sqlCode: error.code, sqlMessage: error.sqlMessage || null });
return result[0]; // the promise version returns a tuple of [rows, fields]
}
async function transaction(queries) {
assert(Array.isArray(queries));
const [error, connection] = await safe(gConnectionPool.getConnection());
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error, { sqlCode: error.code, sqlMessage: error.sqlMessage });
try {
await connection.beginTransaction();
const results = [];
for (const query of queries) {
const [rows /*, fields */] = await connection.query(query.query, query.args);
results.push(rows);
}
await connection.commit();
connection.release(); // no await!
return results;
} catch (error) {
await safe(connection.rollback(), { debug });
connection.release(); // no await!
throw new BoxError(BoxError.DATABASE_ERROR, error, { sqlCode: error.code, sqlMessage: error.sqlMessage || null });
}
}
async function runInTransaction(callback) {
assert.strictEqual(typeof callback, 'function');
const [error, connection] = await safe(gConnectionPool.getConnection());
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error, { sqlCode: error.code, sqlMessage: error.sqlMessage });
try {
await connection.beginTransaction();
const query = async (...args) => {
const [error, result] = await safe(connection.query(...args)); // this is same as getConnection/query/release
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error, { sqlCode: error.code, sqlMessage: error.sqlMessage || null });
return result[0]; // the promise version returns a tuple of [rows, fields]
};
const result = await callback(query);
await connection.commit();
connection.release(); // no await!
return result;
} catch (error) {
await safe(connection.rollback(), { debug });
connection.release(); // no await!
throw new BoxError(BoxError.DATABASE_ERROR, error, { sqlCode: error.code, sqlMessage: error.sqlMessage || null });
}
}
async function importFromFile(file) {
assert.strictEqual(typeof file, 'string');
const cmd = `/usr/bin/mysql -h "${gDatabase.hostname}" -u ${gDatabase.username} -p${gDatabase.password} ${gDatabase.name} < ${file}`;
await query('CREATE DATABASE IF NOT EXISTS box');
const [error] = await safe(shell.bash(cmd, {}));
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error);
}
async function exportToFile(file) {
assert.strictEqual(typeof file, 'string');
// latest mysqldump enables column stats by default which is not present in 5.7 util
const mysqlDumpHelp = await shell.spawn('/usr/bin/mysqldump', ['--help'], { encoding: 'utf8' });
const hasColStats = mysqlDumpHelp.includes('column-statistics');
const colStats = hasColStats ? '--column-statistics=0' : '';
const cmd = `/usr/bin/mysqldump -h "${gDatabase.hostname}" -u root -p${gDatabase.password} ${colStats} --single-transaction --routines --triggers ${gDatabase.name} > "${file}"`;
const [error] = await safe(shell.bash(cmd, {}));
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error);
}