How to know disk used % from a trigger

Also, solution for those who might need it. You need bytes package as a dependency to run it:

const BASIC_AUTH_TOKEN = '<output of btoa(`username:secret`)>';

const GROUP_ID = '<your group id>';
const PROCESS_ID = '<your process id>';
const DB_NAME = '<your db name>';

const DB_STORAGE_SIZE = 2 * (2 ** 30) // 2GB;
// basically maintains the disk at 95% usage
const TARGET_CLEANUP_RATIO = 0.95;

const getAccessToken = () =>
  context.http
    .post({
      url: 'https://cloud.mongodb.com/api/oauth/token',
      headers: {
        Accept: ['application/json'],
        'Cache-Control': ['no-cache'],
        'Content-Type': ['application/x-www-form-urlencoded'],
        Authorization: [`Basic ${BASIC_AUTH_TOKEN}`],
      },
      body: 'grant_type=client_credentials',
    })
    .then(res => JSON.parse(res.body.text()))
    .then(data => data.access_token);

const getProcessMeasurements = (token, measurements) => {
  const url = new URL(
    `https://cloud.mongodb.com/api/atlas/v2/groups/${GROUP_ID}/processes/${PROCESS_ID}/measurements`,
  );

  url.searchParams.append('granularity', 'PT1M');
  url.searchParams.append('period', 'PT1M');
  measurements.forEach(e => url.searchParams.append('m', e));

  return context.http
    .get({
      url: url.href,
      headers: {
        'Cache-Control': ['no-cache'],
        Accept: ['application/vnd.atlas.2025-02-19+json'],
        Authorization: [`Bearer ${token}`],
      },
    })
    .then(res => JSON.parse(res.body.text()));
};

const getDBSize = token =>
  getProcessMeasurements(token, ['LOGICAL_SIZE']).then(
    data => data.measurements[0].dataPoints[0].value,
  );

exports = async function () {
  const bytes = require('bytes');
  const TOKEN = await getAccessToken();
  const DB_SIZE = await getDBSize(TOKEN);

  const CLEANUP_REQUIRED_SIZE = Math.ceil(DB_STORAGE_SIZE * TARGET_CLEANUP_RATIO);
  const CLEANUP_SIZE = DB_SIZE - CLEANUP_REQUIRED_SIZE;
  if (CLEANUP_SIZE < 0) return bytes.format(CLEANUP_SIZE);

  const files = context.services.get('unciv').db('unciv').collection('UncivServer');
  const results = await files
    .aggregate([
      { $sort: { timestamp: 1 } },
      { $limit: 1000 },
      {
        $project: {
          _id: 1,
          timestamp: 1,
          textLength: { $strLenCP: '$text' },
        },
      },
    ])
    .toArray();

  const idsToClean = [];
  let cleanupSizeCandidate = 0;
  let maxTimestamp = Date.now();
  for (const entry of results) {
    if (cleanupSizeCandidate > CLEANUP_SIZE) break;
    cleanupSizeCandidate += entry.textLength;
    maxTimestamp = entry.timestamp;
    idsToClean.push(entry._id);
  }

  return {
    maxTimestamp,
    cleanupSizeCandidate: bytes.format(cleanupSizeCandidate),
    deleteResult: await files.deleteMany({ _id: { $in: idsToClean } }),
  };
};