2019-08-23 21:16:30 +07:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
import logging
|
[IMP] forwardport: gc/maintenance of local repo caches
The current system makes / lets GC run during fetching. This has a few
issues:
- the autogc consumes resources during the forward-porting
process (not that it's hugely urgent but it seems unnecessary)
- the autogc commonly fails due to the combination of large repository
(odoo/odoo) and low memory limits (hardmem for odoo, which get
translated into soft ulimits)
As a result, the garbage collection of the repository sometimes stops
entirely, leading to an increase in repository size and a decrease in
performances.
To mitigate this issue, disable the automagic gc and maintenance
during normal operation, and instead add a weekly cron which runs an
aggressive GC with memory limits disabled (as far as they can get, if
the limits are imposed externally there's nothing to be done).
The maintenance is implemented using a full lockout of the
forward-port cron and an in-place GC rather than a copy/gc/swap, as
doing this maintenance at the small hours of the week-end (sat-sun
night) seems like a non-issue: currently an aggressive GC of odoo/odoo
(using the default aggressive options) takes a total of 2:30 wallclock
(5h user) on a fairly elderly machine (it's closer to 20mn wallclock
and 2h user on my local machine, also turns out the cache repos are
kinda badly configured leading to ~30% more objects than necessary
which doesn't help).
For the record, a fresh checkout of odoo/odoo right now yields:
| Overall repository size | |
| * Commits | |
| * Count | 199 k |
| * Total size | 102 MiB |
| * Trees | |
| * Count | 1.60 M |
| * Total size | 2.67 GiB |
| * Total tree entries | 74.1 M |
| * Blobs | |
| * Count | 1.69 M |
| * Total size | 72.4 GiB |
If this still proves insufficient, a further option would be to deploy
a "generational repacking" strategy:
https://gitlab.com/gitlab-org/gitaly/-/issues/2861 (though apparently
it's not yet been implemented / deployed on gitlab so...).
But for now we'll see how it shakes out.
Close #489
2022-11-07 15:53:11 +07:00
|
|
|
import pathlib
|
|
|
|
import resource
|
|
|
|
import subprocess
|
2021-03-01 18:38:23 +07:00
|
|
|
import uuid
|
2019-08-23 21:16:30 +07:00
|
|
|
from contextlib import ExitStack
|
2022-10-28 13:03:12 +07:00
|
|
|
from datetime import datetime, timedelta
|
2019-10-16 19:41:26 +07:00
|
|
|
|
|
|
|
from dateutil import relativedelta
|
2019-08-23 21:16:30 +07:00
|
|
|
|
|
|
|
from odoo import fields, models
|
2019-10-16 19:41:26 +07:00
|
|
|
from odoo.addons.runbot_merge.github import GH
|
[IMP] forwardport: gc/maintenance of local repo caches
The current system makes / lets GC run during fetching. This has a few
issues:
- the autogc consumes resources during the forward-porting
process (not that it's hugely urgent but it seems unnecessary)
- the autogc commonly fails due to the combination of large repository
(odoo/odoo) and low memory limits (hardmem for odoo, which get
translated into soft ulimits)
As a result, the garbage collection of the repository sometimes stops
entirely, leading to an increase in repository size and a decrease in
performances.
To mitigate this issue, disable the automagic gc and maintenance
during normal operation, and instead add a weekly cron which runs an
aggressive GC with memory limits disabled (as far as they can get, if
the limits are imposed externally there's nothing to be done).
The maintenance is implemented using a full lockout of the
forward-port cron and an in-place GC rather than a copy/gc/swap, as
doing this maintenance at the small hours of the week-end (sat-sun
night) seems like a non-issue: currently an aggressive GC of odoo/odoo
(using the default aggressive options) takes a total of 2:30 wallclock
(5h user) on a fairly elderly machine (it's closer to 20mn wallclock
and 2h user on my local machine, also turns out the cache repos are
kinda badly configured leading to ~30% more objects than necessary
which doesn't help).
For the record, a fresh checkout of odoo/odoo right now yields:
| Overall repository size | |
| * Commits | |
| * Count | 199 k |
| * Total size | 102 MiB |
| * Trees | |
| * Count | 1.60 M |
| * Total size | 2.67 GiB |
| * Total tree entries | 74.1 M |
| * Blobs | |
| * Count | 1.69 M |
| * Total size | 72.4 GiB |
If this still proves insufficient, a further option would be to deploy
a "generational repacking" strategy:
https://gitlab.com/gitlab-org/gitaly/-/issues/2861 (though apparently
it's not yet been implemented / deployed on gitlab so...).
But for now we'll see how it shakes out.
Close #489
2022-11-07 15:53:11 +07:00
|
|
|
from odoo.tools.appdirs import user_cache_dir
|
2019-08-23 21:16:30 +07:00
|
|
|
|
2019-10-16 19:41:26 +07:00
|
|
|
# how long a merged PR survives
|
|
|
|
MERGE_AGE = relativedelta.relativedelta(weeks=2)
|
2019-08-23 21:16:30 +07:00
|
|
|
|
|
|
|
_logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
class Queue:
|
2022-08-23 19:33:37 +07:00
|
|
|
__slots__ = ()
|
2019-10-16 19:41:26 +07:00
|
|
|
limit = 100
|
|
|
|
|
2019-08-23 21:16:30 +07:00
|
|
|
def _process_item(self):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def _process(self):
|
2019-10-16 19:41:26 +07:00
|
|
|
for b in self.search(self._search_domain(), order='create_date, id', limit=self.limit):
|
[IMP] forwardport: processing queue reliability
The queue would get items to process one at a time, process, commit,
and go to the next. However this is an issue if one of the item fails
systematically for some reason (aka it's not just a transient
failure): the cron fails, then restarts at the exact same point, and
fails again with the same issue, leading to following items never
getting processed.
Fix by getting all the queue contents at once, processing them one by
one and "skipping" any item which fails (leaving it in place so it can
get re-processed later).
That way, even if an item causes issues, the rest of the queue gets
processed normally. The interruption was an issue following
odoo/enterprise#5670 not getting properly updated in the
backend (backend didn't get notified of the last two updates /
force-push to the PR, so it was trying to forward-port a commit which
didn't exist - and failing).
2019-10-10 13:41:33 +07:00
|
|
|
try:
|
2019-10-11 14:02:50 +07:00
|
|
|
b._process_item()
|
[IMP] forwardport: processing queue reliability
The queue would get items to process one at a time, process, commit,
and go to the next. However this is an issue if one of the item fails
systematically for some reason (aka it's not just a transient
failure): the cron fails, then restarts at the exact same point, and
fails again with the same issue, leading to following items never
getting processed.
Fix by getting all the queue contents at once, processing them one by
one and "skipping" any item which fails (leaving it in place so it can
get re-processed later).
That way, even if an item causes issues, the rest of the queue gets
processed normally. The interruption was an issue following
odoo/enterprise#5670 not getting properly updated in the
backend (backend didn't get notified of the last two updates /
force-push to the PR, so it was trying to forward-port a commit which
didn't exist - and failing).
2019-10-10 13:41:33 +07:00
|
|
|
b.unlink()
|
|
|
|
self.env.cr.commit()
|
|
|
|
except Exception:
|
|
|
|
_logger.exception("Error while processing %s, skipping", b)
|
2019-10-11 14:02:50 +07:00
|
|
|
self.env.cr.rollback()
|
2022-10-28 13:03:12 +07:00
|
|
|
b._on_failure()
|
|
|
|
self.env.cr.commit()
|
|
|
|
|
|
|
|
def _on_failure(self):
|
|
|
|
pass
|
2019-08-23 21:16:30 +07:00
|
|
|
|
2019-10-16 19:41:26 +07:00
|
|
|
def _search_domain(self):
|
|
|
|
return []
|
|
|
|
|
2022-02-08 16:11:57 +07:00
|
|
|
class ForwardPortTasks(models.Model, Queue):
|
2019-08-23 21:16:30 +07:00
|
|
|
_name = 'forwardport.batches'
|
|
|
|
_description = 'batches which got merged and are candidates for forward-porting'
|
|
|
|
|
2019-10-16 19:41:26 +07:00
|
|
|
limit = 10
|
|
|
|
|
2019-08-23 21:16:30 +07:00
|
|
|
batch_id = fields.Many2one('runbot_merge.batch', required=True)
|
|
|
|
source = fields.Selection([
|
|
|
|
('merge', 'Merge'),
|
|
|
|
('fp', 'Forward Port Followup'),
|
2020-01-27 21:39:25 +07:00
|
|
|
('insert', 'New branch port')
|
2019-08-23 21:16:30 +07:00
|
|
|
], required=True)
|
2022-10-28 13:03:12 +07:00
|
|
|
retry_after = fields.Datetime(required=True, default='1900-01-01 01:01:01')
|
|
|
|
|
|
|
|
def _search_domain(self):
|
|
|
|
return super()._search_domain() + [
|
|
|
|
('retry_after', '<=', fields.Datetime.to_string(fields.Datetime.now())),
|
|
|
|
]
|
|
|
|
|
|
|
|
def _on_failure(self):
|
|
|
|
super()._on_failure()
|
|
|
|
self.retry_after = fields.Datetime.to_string(fields.Datetime.now() + timedelta(minutes=30))
|
2019-08-23 21:16:30 +07:00
|
|
|
|
|
|
|
def _process_item(self):
|
|
|
|
batch = self.batch_id
|
|
|
|
newbatch = batch.prs._port_forward()
|
2020-01-27 21:39:25 +07:00
|
|
|
|
2019-08-23 21:16:30 +07:00
|
|
|
if newbatch:
|
|
|
|
_logger.info(
|
|
|
|
"Processing %s (from %s): %s (%s) -> %s (%s)",
|
|
|
|
self.id, self.source,
|
|
|
|
batch, batch.prs,
|
|
|
|
newbatch, newbatch.prs,
|
|
|
|
)
|
2022-02-10 19:42:32 +07:00
|
|
|
# insert new batch in ancestry sequence unless conflict (= no parent)
|
|
|
|
if self.source == 'insert':
|
|
|
|
for pr in newbatch.prs:
|
|
|
|
if not pr.parent_id:
|
|
|
|
break
|
|
|
|
newchild = pr.search([
|
|
|
|
('parent_id', '=', pr.parent_id.id),
|
|
|
|
('id', '!=', pr.id),
|
|
|
|
])
|
|
|
|
if newchild:
|
|
|
|
newchild.parent_id = pr.id
|
2019-08-23 21:16:30 +07:00
|
|
|
else: # reached end of seq (or batch is empty)
|
|
|
|
# FIXME: or configuration is fucky so doesn't want to FP (maybe should error and retry?)
|
|
|
|
_logger.info(
|
|
|
|
"Processing %s (from %s): %s (%s) -> end of the sequence",
|
|
|
|
self.id, self.source,
|
|
|
|
batch, batch.prs
|
|
|
|
)
|
|
|
|
batch.active = False
|
|
|
|
|
2021-07-27 14:17:18 +07:00
|
|
|
|
2022-06-23 19:25:07 +07:00
|
|
|
CONFLICT_TEMPLATE = "{ping}WARNING: the latest change ({previous.head}) triggered " \
|
2021-07-27 14:17:18 +07:00
|
|
|
"a conflict when updating the next forward-port " \
|
|
|
|
"({next.display_name}), and has been ignored.\n\n" \
|
|
|
|
"You will need to update this pull request differently, " \
|
|
|
|
"or fix the issue by hand on {next.display_name}."
|
2022-06-23 19:25:07 +07:00
|
|
|
CHILD_CONFLICT = "{ping}WARNING: the update of {previous.display_name} to " \
|
2021-07-27 14:17:18 +07:00
|
|
|
"{previous.head} has caused a conflict in this pull request, " \
|
|
|
|
"data may have been lost."
|
2019-08-23 21:16:30 +07:00
|
|
|
class UpdateQueue(models.Model, Queue):
|
|
|
|
_name = 'forwardport.updates'
|
|
|
|
_description = 'if a forward-port PR gets updated & has followups (cherrypick succeeded) the followups need to be updated as well'
|
|
|
|
|
2019-10-16 19:41:26 +07:00
|
|
|
limit = 10
|
|
|
|
|
2019-08-23 21:16:30 +07:00
|
|
|
original_root = fields.Many2one('runbot_merge.pull_requests')
|
|
|
|
new_root = fields.Many2one('runbot_merge.pull_requests')
|
|
|
|
|
|
|
|
def _process_item(self):
|
2021-07-27 14:17:18 +07:00
|
|
|
Feedback = self.env['runbot_merge.pull_requests.feedback']
|
2019-08-23 21:16:30 +07:00
|
|
|
previous = self.new_root
|
|
|
|
with ExitStack() as s:
|
|
|
|
for child in self.new_root._iter_descendants():
|
2021-10-19 15:30:55 +07:00
|
|
|
self.env.cr.execute("""
|
|
|
|
SELECT id
|
|
|
|
FROM runbot_merge_pull_requests
|
|
|
|
WHERE id = %s
|
|
|
|
FOR UPDATE NOWAIT
|
|
|
|
""", [child.id])
|
2020-02-19 18:12:02 +07:00
|
|
|
_logger.info(
|
|
|
|
"Re-port %s from %s (changed root %s -> %s)",
|
|
|
|
child.display_name,
|
|
|
|
previous.display_name,
|
|
|
|
self.original_root.display_name,
|
|
|
|
self.new_root.display_name
|
|
|
|
)
|
|
|
|
if child.state in ('closed', 'merged'):
|
2021-07-27 14:17:18 +07:00
|
|
|
Feedback.create({
|
2020-02-19 18:12:02 +07:00
|
|
|
'repository': child.repository.id,
|
|
|
|
'pull_request': child.number,
|
2022-06-23 19:25:07 +07:00
|
|
|
'message': "%sancestor PR %s has been updated but this PR"
|
2020-02-19 18:12:02 +07:00
|
|
|
" is %s and can't be updated to match."
|
|
|
|
"\n\n"
|
|
|
|
"You may want or need to manually update any"
|
|
|
|
" followup PR." % (
|
2022-06-23 19:25:07 +07:00
|
|
|
child.ping(),
|
|
|
|
self.new_root.display_name,
|
|
|
|
child.state,
|
|
|
|
)
|
2020-02-19 18:12:02 +07:00
|
|
|
})
|
|
|
|
return
|
2021-08-11 16:36:35 +07:00
|
|
|
|
2021-07-27 14:17:18 +07:00
|
|
|
conflicts, working_copy = previous._create_fp_branch(
|
2019-08-23 21:16:30 +07:00
|
|
|
child.target, child.refname, s)
|
2021-07-27 14:17:18 +07:00
|
|
|
if conflicts:
|
2021-08-11 19:08:18 +07:00
|
|
|
_, out, err, _ = conflicts
|
2021-07-27 14:17:18 +07:00
|
|
|
Feedback.create({
|
|
|
|
'repository': previous.repository.id,
|
|
|
|
'pull_request': previous.number,
|
|
|
|
'message': CONFLICT_TEMPLATE.format(
|
2022-06-23 19:25:07 +07:00
|
|
|
ping=previous.ping(),
|
2021-07-27 14:17:18 +07:00
|
|
|
previous=previous,
|
|
|
|
next=child
|
|
|
|
)
|
|
|
|
})
|
|
|
|
Feedback.create({
|
|
|
|
'repository': child.repository.id,
|
|
|
|
'pull_request': child.number,
|
2022-06-23 19:25:07 +07:00
|
|
|
'message': CHILD_CONFLICT.format(ping=child.ping(), previous=previous, next=child)\
|
2021-07-27 14:17:18 +07:00
|
|
|
+ (f'\n\nstdout:\n```\n{out.strip()}\n```' if out.strip() else '')
|
|
|
|
+ (f'\n\nstderr:\n```\n{err.strip()}\n```' if err.strip() else '')
|
|
|
|
})
|
2019-08-23 21:16:30 +07:00
|
|
|
|
|
|
|
new_head = working_copy.stdout().rev_parse(child.refname).stdout.decode().strip()
|
2021-02-26 15:45:44 +07:00
|
|
|
commits_count = int(working_copy.stdout().rev_list(
|
|
|
|
f'{child.target.name}..{child.refname}',
|
|
|
|
count=True
|
|
|
|
).stdout.decode().strip())
|
2021-10-19 15:30:55 +07:00
|
|
|
old_head = child.head
|
2019-08-23 21:16:30 +07:00
|
|
|
# update child's head to the head we're going to push
|
2021-02-26 15:45:44 +07:00
|
|
|
child.with_context(ignore_head_update=True).write({
|
|
|
|
'head': new_head,
|
|
|
|
# 'state': 'opened',
|
|
|
|
'squash': commits_count == 1,
|
|
|
|
})
|
2021-10-19 15:30:55 +07:00
|
|
|
# push the new head to the local cache: in some cases github
|
|
|
|
# doesn't propagate revisions fast enough so on the next loop we
|
|
|
|
# can't find the revision we just pushed
|
2021-03-01 18:38:23 +07:00
|
|
|
dummy_branch = str(uuid.uuid4())
|
|
|
|
ref = previous._get_local_directory()
|
|
|
|
working_copy.push(ref._directory, f'{new_head}:refs/heads/{dummy_branch}')
|
|
|
|
ref.branch('--delete', '--force', dummy_branch)
|
2021-10-19 15:30:55 +07:00
|
|
|
# then update the child's branch to the new head
|
|
|
|
working_copy.push(f'--force-with-lease={child.refname}:{old_head}',
|
|
|
|
'target', child.refname)
|
2021-03-01 18:38:23 +07:00
|
|
|
|
2019-08-23 21:16:30 +07:00
|
|
|
# committing here means github could technically trigger its
|
|
|
|
# webhook before sending a response, but committing before
|
|
|
|
# would mean we can update the PR in database but fail to
|
|
|
|
# update on github, which is probably worse?
|
|
|
|
# alternatively we can commit, push, and rollback if the push
|
|
|
|
# fails
|
|
|
|
# FIXME: handle failures (especially on non-first update)
|
|
|
|
self.env.cr.commit()
|
|
|
|
|
|
|
|
previous = child
|
2019-10-16 19:41:26 +07:00
|
|
|
|
|
|
|
_deleter = _logger.getChild('deleter')
|
|
|
|
class DeleteBranches(models.Model, Queue):
|
|
|
|
_name = 'forwardport.branch_remover'
|
|
|
|
_description = "Removes branches of merged PRs"
|
|
|
|
|
|
|
|
pr_id = fields.Many2one('runbot_merge.pull_requests')
|
|
|
|
|
|
|
|
def _search_domain(self):
|
|
|
|
cutoff = self.env.context.get('forwardport_merged_before') \
|
|
|
|
or fields.Datetime.to_string(datetime.now() - MERGE_AGE)
|
2020-05-20 17:42:45 +07:00
|
|
|
return [('pr_id.merge_date', '<', cutoff)]
|
2019-10-16 19:41:26 +07:00
|
|
|
|
|
|
|
def _process_item(self):
|
|
|
|
_deleter.info(
|
|
|
|
"PR %s: checking deletion of linked branch %s",
|
|
|
|
self.pr_id.display_name,
|
|
|
|
self.pr_id.label
|
|
|
|
)
|
|
|
|
|
|
|
|
if self.pr_id.state != 'merged':
|
|
|
|
_deleter.info('✘ PR is not "merged" (got %s)', self.pr_id.state)
|
|
|
|
return
|
|
|
|
|
|
|
|
repository = self.pr_id.repository
|
|
|
|
fp_remote = repository.fp_remote_target
|
|
|
|
if not fp_remote:
|
|
|
|
_deleter.info('✘ no forward-port target')
|
|
|
|
return
|
|
|
|
|
|
|
|
repo_owner, repo_name = fp_remote.split('/')
|
|
|
|
owner, branch = self.pr_id.label.split(':')
|
|
|
|
if repo_owner != owner:
|
|
|
|
_deleter.info('✘ PR owner != FP target owner (%s)', repo_owner)
|
|
|
|
return # probably don't have access to arbitrary repos
|
|
|
|
|
2019-10-18 13:11:27 +07:00
|
|
|
github = GH(token=repository.project_id.fp_github_token, repo=fp_remote)
|
2019-10-16 19:41:26 +07:00
|
|
|
refurl = 'git/refs/heads/' + branch
|
2019-10-18 13:11:27 +07:00
|
|
|
ref = github('get', refurl, check=False)
|
2019-10-16 19:41:26 +07:00
|
|
|
if ref.status_code != 200:
|
|
|
|
_deleter.info("✘ branch already deleted (%s)", ref.json())
|
|
|
|
return
|
|
|
|
|
|
|
|
ref = ref.json()
|
2019-10-18 16:21:52 +07:00
|
|
|
if isinstance(ref, list):
|
|
|
|
_deleter.info(
|
|
|
|
"✘ got a fuzzy match (%s), branch probably deleted",
|
|
|
|
', '.join(r['ref'] for r in ref)
|
|
|
|
)
|
2019-10-18 17:01:47 +07:00
|
|
|
return
|
2019-10-18 16:21:52 +07:00
|
|
|
|
2019-10-16 19:41:26 +07:00
|
|
|
if ref['object']['sha'] != self.pr_id.head:
|
|
|
|
_deleter.info(
|
|
|
|
"✘ branch %s head mismatch, expected %s, got %s",
|
|
|
|
self.pr_id.label,
|
|
|
|
self.pr_id.head,
|
|
|
|
ref['object']['sha']
|
|
|
|
)
|
|
|
|
return
|
|
|
|
|
2019-10-18 13:11:27 +07:00
|
|
|
r = github('delete', refurl, check=False)
|
2019-10-16 19:41:26 +07:00
|
|
|
assert r.status_code == 204, \
|
|
|
|
"Tried to delete branch %s of %s, got %s" % (
|
|
|
|
branch, self.pr_id.display_name,
|
|
|
|
r.json()
|
|
|
|
)
|
|
|
|
_deleter.info('✔ deleted branch %s of PR %s', self.pr_id.label, self.pr_id.display_name)
|
[IMP] forwardport: gc/maintenance of local repo caches
The current system makes / lets GC run during fetching. This has a few
issues:
- the autogc consumes resources during the forward-porting
process (not that it's hugely urgent but it seems unnecessary)
- the autogc commonly fails due to the combination of large repository
(odoo/odoo) and low memory limits (hardmem for odoo, which get
translated into soft ulimits)
As a result, the garbage collection of the repository sometimes stops
entirely, leading to an increase in repository size and a decrease in
performances.
To mitigate this issue, disable the automagic gc and maintenance
during normal operation, and instead add a weekly cron which runs an
aggressive GC with memory limits disabled (as far as they can get, if
the limits are imposed externally there's nothing to be done).
The maintenance is implemented using a full lockout of the
forward-port cron and an in-place GC rather than a copy/gc/swap, as
doing this maintenance at the small hours of the week-end (sat-sun
night) seems like a non-issue: currently an aggressive GC of odoo/odoo
(using the default aggressive options) takes a total of 2:30 wallclock
(5h user) on a fairly elderly machine (it's closer to 20mn wallclock
and 2h user on my local machine, also turns out the cache repos are
kinda badly configured leading to ~30% more objects than necessary
which doesn't help).
For the record, a fresh checkout of odoo/odoo right now yields:
| Overall repository size | |
| * Commits | |
| * Count | 199 k |
| * Total size | 102 MiB |
| * Trees | |
| * Count | 1.60 M |
| * Total size | 2.67 GiB |
| * Total tree entries | 74.1 M |
| * Blobs | |
| * Count | 1.69 M |
| * Total size | 72.4 GiB |
If this still proves insufficient, a further option would be to deploy
a "generational repacking" strategy:
https://gitlab.com/gitlab-org/gitaly/-/issues/2861 (though apparently
it's not yet been implemented / deployed on gitlab so...).
But for now we'll see how it shakes out.
Close #489
2022-11-07 15:53:11 +07:00
|
|
|
|
|
|
|
_gc = _logger.getChild('maintenance')
|
|
|
|
def _bypass_limits():
|
|
|
|
"""Allow git to go beyond the limits set for Odoo.
|
|
|
|
|
|
|
|
On large repositories, git gc can take a *lot* of memory (especially with
|
|
|
|
`--aggressive`), if the Odoo limits are too low this can prevent the gc
|
|
|
|
from running, leading to a lack of packing and a massive amount of cruft
|
|
|
|
accumulating in the working copy.
|
|
|
|
"""
|
|
|
|
resource.setrlimit(resource.RLIMIT_AS, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
|
|
|
|
|
|
|
|
class GC(models.TransientModel):
|
|
|
|
_name = 'forwardport.maintenance'
|
|
|
|
_description = "Weekly maintenance of... cache repos?"
|
|
|
|
|
|
|
|
def _run(self):
|
|
|
|
# lock out the forward port cron to avoid concurrency issues while we're
|
|
|
|
# GC-ing it: wait until it's available, then SELECT FOR UPDATE it,
|
|
|
|
# which should prevent cron workers from running it
|
|
|
|
fp_cron = self.env.ref('forwardport.port_forward')
|
|
|
|
self.env.cr.execute("""
|
|
|
|
SELECT 1 FROM ir_cron
|
|
|
|
WHERE id = %s
|
|
|
|
FOR UPDATE
|
|
|
|
""", [fp_cron.id])
|
|
|
|
|
|
|
|
repos_dir = pathlib.Path(user_cache_dir('forwardport'))
|
|
|
|
# run on all repos with a forwardport target (~ forwardport enabled)
|
|
|
|
for repo in self.env['runbot_merge.repository'].search([('fp_remote_target', '!=', False)]):
|
|
|
|
repo_dir = repos_dir / repo.name
|
|
|
|
if not repo_dir.is_dir():
|
|
|
|
continue
|
|
|
|
|
|
|
|
_gc.info('Running maintenance on %s', repo.name)
|
|
|
|
r = subprocess.run(
|
|
|
|
['git', '--git-dir', repo_dir, 'gc', '--aggressive', '--prune=now'],
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
|
|
|
encoding='utf-8',
|
|
|
|
preexec_fn = _bypass_limits,
|
|
|
|
)
|
|
|
|
if r.returncode:
|
|
|
|
_gc.warning("Maintenance failure (status=%d):\n%s", r.returncode, r.stdout)
|