graphs: make transform functions
This commit is contained in:
@@ -54,6 +54,20 @@ function pruneGraphData(dataset, options) {
|
||||
}
|
||||
}
|
||||
|
||||
function transformGiB(data) {
|
||||
return {
|
||||
x: data[1]*1000,
|
||||
y: (data[0] / 1024 / 1024 / 1024).toFixed(2)
|
||||
};
|
||||
}
|
||||
|
||||
function transformMsecs(data) {
|
||||
return {
|
||||
x: data[1]*1000,
|
||||
y: data[0] || 0 // for relative values like cpu, if null make it 0
|
||||
};
|
||||
}
|
||||
|
||||
async function liveRefresh() {
|
||||
metricStream = await systemModel.getMetricStream(LIVE_REFRESH_INTERVAL_MSECS);
|
||||
metricStream.onerror = (error) => console.log('event stream error:', error);
|
||||
@@ -61,42 +75,24 @@ async function liveRefresh() {
|
||||
const data = JSON.parse(message.data);
|
||||
|
||||
///////////// CPU Graph
|
||||
if (data.cpu[0]) { // since cpu% is relative, value can be null if no previous value
|
||||
cpuGraph.data.datasets[0].data.push({
|
||||
x: data.cpu[1] * 1000, // cpuGraph.options.scales.x.max can be used for window edge, if we don't trust server timestamps . but using server timestamps handles network lags better
|
||||
y: data.cpu[0]
|
||||
});
|
||||
|
||||
pruneGraphData(cpuGraph.data.datasets[0], cpuGraph.options);
|
||||
cpuGraph.update('none');
|
||||
}
|
||||
cpuGraph.data.datasets[0].data.push(transformMsecs(data.cpu));
|
||||
pruneGraphData(cpuGraph.data.datasets[0], cpuGraph.options);
|
||||
cpuGraph.update('none');
|
||||
|
||||
///////////// Memory Graph
|
||||
memoryGraph.data.datasets[0].data.push({
|
||||
x: data.memory[1] * 1000,
|
||||
y: (data.memory[0] / 1024 / 1024 / 1024).toFixed(2)
|
||||
});
|
||||
memoryGraph.data.datasets[0].data.push(transformGiB(data.memory));
|
||||
pruneGraphData(memoryGraph.data.datasets[0], memoryGraph.options);
|
||||
|
||||
memoryGraph.data.datasets[1].data.push({
|
||||
x: data.swap[1] * 1000,
|
||||
y: (data.swap[0] / 1024 / 1024 / 1024).toFixed(2)
|
||||
});
|
||||
memoryGraph.data.datasets[1].data.push(transformGiB(data.memory));
|
||||
pruneGraphData(memoryGraph.data.datasets[1], memoryGraph.options);
|
||||
|
||||
memoryGraph.update('none');
|
||||
|
||||
///////////// Disk Graph
|
||||
diskGraph.data.datasets[0].data.push({
|
||||
x: data.blockReadRate[1] * 1000,
|
||||
y: data.blockReadRate[0]
|
||||
});
|
||||
diskGraph.data.datasets[0].data.push(transformMsecs(data.blockReadRate));
|
||||
pruneGraphData(memoryGraph.data.datasets[0], memoryGraph.options);
|
||||
|
||||
diskGraph.data.datasets[1].data.push({
|
||||
x: data.blockWriteRate[1] * 1000,
|
||||
y: data.blockWriteRate[0]
|
||||
});
|
||||
diskGraph.data.datasets[1].data.push(transformMsecs(data.blockWriteRate));
|
||||
pruneGraphData(diskGraph.data.datasets[1], diskGraph.options);
|
||||
|
||||
diskGraph.update('none');
|
||||
@@ -105,24 +101,18 @@ async function liveRefresh() {
|
||||
blockWriteTotal.value = prettyDecimalSize(data.blockWriteTotal);
|
||||
|
||||
///////////// Network Graph
|
||||
networkGraph.data.datasets[0].data.push({
|
||||
x: data.networkReadRate[1] * 1000,
|
||||
y: data.networkReadRate[0]
|
||||
});
|
||||
networkGraph.data.datasets[0].data.push(transformMsecs(data.networkReadRate));
|
||||
pruneGraphData(memoryGraph.data.datasets[0], memoryGraph.options);
|
||||
|
||||
networkGraph.data.datasets[1].data.push({
|
||||
x: data.networkWriteRate[1] * 1000,
|
||||
y: data.networkWriteRate[0]
|
||||
});
|
||||
networkGraph.data.datasets[1].data.push(transformMsecs(data.networkWriteRate));
|
||||
pruneGraphData(networkGraph.data.datasets[1], networkGraph.options);
|
||||
|
||||
networkGraph.update('none');
|
||||
|
||||
networkReadTotal.value = prettyDecimalSize(data.networkReadTotal);
|
||||
networkWriteTotal.value = prettyDecimalSize(data.networkWriteTotal);
|
||||
|
||||
};
|
||||
|
||||
// advances the time window by 500ms. this is independent of incoming data
|
||||
metricStream.intervalId = setInterval(function () {
|
||||
for (const graph of [ cpuGraph, memoryGraph, diskGraph, networkGraph]) {
|
||||
@@ -155,50 +145,13 @@ async function getMetrics(hours) {
|
||||
const [error, result] = await systemModel.getMetrics({ fromSecs: hours * 60 * 60, intervalSecs: 300 });
|
||||
if (error) return console.error(error);
|
||||
|
||||
// time is converted to msecs . cpu is already scaled to cpu*100
|
||||
metrics.cpu = result.cpu.map(v => { return { x: v[1]*1000, y: v[0] };});
|
||||
|
||||
metrics.memory = result.memory.map(v => {
|
||||
return {
|
||||
x: v[1]*1000,
|
||||
y: (v[0] / 1024 / 1024 / 1024).toFixed(2)
|
||||
};
|
||||
});
|
||||
|
||||
metrics.swap = result.swap.map(v => {
|
||||
return {
|
||||
x: v[1]*1000,
|
||||
y: (v[0] / 1024 / 1024 / 1024).toFixed(2)
|
||||
};
|
||||
});
|
||||
|
||||
metrics.blockReadRate = result.blockReadRate.map(v => {
|
||||
return {
|
||||
x: v[1]*1000,
|
||||
y: v[0]
|
||||
};
|
||||
});
|
||||
|
||||
metrics.blockWriteRate = result.blockWriteRate.map(v => {
|
||||
return {
|
||||
x: v[1]*1000,
|
||||
y: v[0]
|
||||
};
|
||||
});
|
||||
|
||||
metrics.networkReadRate = result.networkReadRate.map(v => {
|
||||
return {
|
||||
x: v[1]*1000,
|
||||
y: v[0]
|
||||
};
|
||||
});
|
||||
|
||||
metrics.networkWriteRate = result.networkWriteRate.map(v => {
|
||||
return {
|
||||
x: v[1]*1000,
|
||||
y: v[0]
|
||||
};
|
||||
});
|
||||
metrics.cpu = result.cpu.map(transformMsecs); // cpu is already scaled to cpu*100
|
||||
metrics.memory = result.memory.map(transformGiB);
|
||||
metrics.swap = result.swap.map(transformGiB);
|
||||
metrics.blockReadRate = result.blockReadRate.map(transformMsecs);
|
||||
metrics.blockWriteRate = result.blockWriteRate.map(transformMsecs);
|
||||
metrics.networkReadRate = result.networkReadRate.map(transformMsecs);
|
||||
metrics.networkWriteRate = result.networkWriteRate.map(transformMsecs);
|
||||
|
||||
metrics.networkReadTotal = result.networkReadTotal;
|
||||
metrics.networkWriteTotal = result.networkWriteTotal;
|
||||
|
||||
Reference in New Issue
Block a user