From c35b721f0ec7d01cb514f433567f6defc0b89576 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Mon, 7 Nov 2022 09:53:11 +0100 Subject: [PATCH] [IMP] forwardport: gc/maintenance of local repo caches The current system makes / lets GC run during fetching. This has a few issues: - the autogc consumes resources during the forward-porting process (not that it's hugely urgent but it seems unnecessary) - the autogc commonly fails due to the combination of large repository (odoo/odoo) and low memory limits (hardmem for odoo, which get translated into soft ulimits) As a result, the garbage collection of the repository sometimes stops entirely, leading to an increase in repository size and a decrease in performances. To mitigate this issue, disable the automagic gc and maintenance during normal operation, and instead add a weekly cron which runs an aggressive GC with memory limits disabled (as far as they can get, if the limits are imposed externally there's nothing to be done). The maintenance is implemented using a full lockout of the forward-port cron and an in-place GC rather than a copy/gc/swap, as doing this maintenance at the small hours of the week-end (sat-sun night) seems like a non-issue: currently an aggressive GC of odoo/odoo (using the default aggressive options) takes a total of 2:30 wallclock (5h user) on a fairly elderly machine (it's closer to 20mn wallclock and 2h user on my local machine, also turns out the cache repos are kinda badly configured leading to ~30% more objects than necessary which doesn't help). For the record, a fresh checkout of odoo/odoo right now yields: | Overall repository size | | | * Commits | | | * Count | 199 k | | * Total size | 102 MiB | | * Trees | | | * Count | 1.60 M | | * Total size | 2.67 GiB | | * Total tree entries | 74.1 M | | * Blobs | | | * Count | 1.69 M | | * Total size | 72.4 GiB | If this still proves insufficient, a further option would be to deploy a "generational repacking" strategy: https://gitlab.com/gitlab-org/gitaly/-/issues/2861 (though apparently it's not yet been implemented / deployed on gitlab so...). But for now we'll see how it shakes out. Close #489 --- forwardport/data/crons.xml | 13 +++++++++ forwardport/models/forwardport.py | 47 +++++++++++++++++++++++++++++++ forwardport/models/project.py | 3 +- 3 files changed, 62 insertions(+), 1 deletion(-) diff --git a/forwardport/data/crons.xml b/forwardport/data/crons.xml index 02d2be1e..1360914c 100644 --- a/forwardport/data/crons.xml +++ b/forwardport/data/crons.xml @@ -42,4 +42,17 @@ -1 + + + Maintenance of repo cache + + code + model._run() + + + 1 + weeks + -1 + + diff --git a/forwardport/models/forwardport.py b/forwardport/models/forwardport.py index 727fb6a0..f94b6db9 100644 --- a/forwardport/models/forwardport.py +++ b/forwardport/models/forwardport.py @@ -1,5 +1,8 @@ # -*- coding: utf-8 -*- import logging +import pathlib +import resource +import subprocess import uuid from contextlib import ExitStack from datetime import datetime, timedelta @@ -8,6 +11,7 @@ from dateutil import relativedelta from odoo import fields, models from odoo.addons.runbot_merge.github import GH +from odoo.tools.appdirs import user_cache_dir # how long a merged PR survives MERGE_AGE = relativedelta.relativedelta(weeks=2) @@ -266,3 +270,46 @@ class DeleteBranches(models.Model, Queue): r.json() ) _deleter.info('✔ deleted branch %s of PR %s', self.pr_id.label, self.pr_id.display_name) + +_gc = _logger.getChild('maintenance') +def _bypass_limits(): + """Allow git to go beyond the limits set for Odoo. + + On large repositories, git gc can take a *lot* of memory (especially with + `--aggressive`), if the Odoo limits are too low this can prevent the gc + from running, leading to a lack of packing and a massive amount of cruft + accumulating in the working copy. + """ + resource.setrlimit(resource.RLIMIT_AS, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) + +class GC(models.TransientModel): + _name = 'forwardport.maintenance' + _description = "Weekly maintenance of... cache repos?" + + def _run(self): + # lock out the forward port cron to avoid concurrency issues while we're + # GC-ing it: wait until it's available, then SELECT FOR UPDATE it, + # which should prevent cron workers from running it + fp_cron = self.env.ref('forwardport.port_forward') + self.env.cr.execute(""" + SELECT 1 FROM ir_cron + WHERE id = %s + FOR UPDATE + """, [fp_cron.id]) + + repos_dir = pathlib.Path(user_cache_dir('forwardport')) + # run on all repos with a forwardport target (~ forwardport enabled) + for repo in self.env['runbot_merge.repository'].search([('fp_remote_target', '!=', False)]): + repo_dir = repos_dir / repo.name + if not repo_dir.is_dir(): + continue + + _gc.info('Running maintenance on %s', repo.name) + r = subprocess.run( + ['git', '--git-dir', repo_dir, 'gc', '--aggressive', '--prune=now'], + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + encoding='utf-8', + preexec_fn = _bypass_limits, + ) + if r.returncode: + _gc.warning("Maintenance failure (status=%d):\n%s", r.returncode, r.stdout) diff --git a/forwardport/models/project.py b/forwardport/models/project.py index 7fe95a1f..1f96e4c8 100644 --- a/forwardport/models/project.py +++ b/forwardport/models/project.py @@ -1152,6 +1152,7 @@ class Feedback(models.Model): token_field = fields.Selection(selection_add=[('fp_github_token', 'Forwardport Bot')]) +ALWAYS = ('gc.auto=0', 'maintenance.auto=0') def git(directory): return Repo(directory, check=True) class Repo: def __init__(self, directory, **config): @@ -1167,7 +1168,7 @@ class Repo: def _run(self, *args, **kwargs): opts = {**self._config, **kwargs} args = ('git', '-C', self._directory)\ - + tuple(itertools.chain.from_iterable(('-c', p) for p in self._params))\ + + tuple(itertools.chain.from_iterable(('-c', p) for p in self._params + ALWAYS))\ + args try: return self._opener(args, **opts)