[IMP] *: crons tests running for better triggered compatibility

Mergebot / forwardport crons need to run in a specific ordering in
order to flow into one another correctly. The default ordering being
unspecified, it was not possible to use the normal cron
runner (instead of the external driver running crons in sequence one
at a time). This can be fixed by setting *sequences* on crons, as the
cron runner (`_process_jobs`) will use that order to acquire and run
crons.

Also override `_process_jobs` however: the built-in cron runner
fetches a static list of ready crons, then runs that.

This is fine for normal situation where the cron runner runs in a loop
anyway but it's any issue for the tests, as we expect that cron A can
trigger cron B, and we want cron B to run *right now* even if it
hadn't been triggered before cron A ran.

We can replace `_process_job` with a cut down version which does
that (cut down because we don't need most of the error handling /
resilience, there's no concurrent workers, there's no module being
installed, versions must match, ...). This allows e.g. the cron
propagating commit statuses to trigger the staging cron, and both will
run within the same `run_crons` session.

Something I didn't touch is that `_process_jobs` internally creates
completely new environments so there is no way to pass context into
the cron jobs anymore (whereas it works for `method_direct_trigger`),
this means the context values have to be shunted elsewhere for that
purpose which is gross. But even though I'm replacing `_process_jobs`,
this seems a bit too much of a change in cron execution semantics. So
left it out.

While at it tho, silence the spammy `py.warnings` stuff I can't do
much about.
This commit is contained in:
Xavier Morel 2024-07-30 09:21:05 +02:00
parent 32871c3896
commit dd17730f4c
8 changed files with 96 additions and 23 deletions

View File

@ -1,5 +1,6 @@
from __future__ import annotations
import datetime
import select
import shutil
import threading
@ -316,7 +317,8 @@ class DbDict(dict):
'-d', db, '-i', module + ',saas_worker,auth_oauth',
'--max-cron-threads', '0',
'--stop-after-init',
'--log-level', 'warn'
'--log-level', 'warn',
'--log-handler', 'py.warnings:ERROR',
],
check=True,
env={**os.environ, 'XDG_DATA_HOME': str(d)}
@ -413,15 +415,58 @@ def dummy_addons_path():
mod = pathlib.Path(dummy_addons_path, 'saas_worker')
mod.mkdir(0o700)
(mod / '__init__.py').write_text('''\
import builtins
import logging
import threading
import psycopg2
import odoo
from odoo import api, fields, models
_logger = logging.getLogger(__name__)
class Base(models.AbstractModel):
_inherit = 'base'
def run_crons(self):
builtins.forwardport_merged_before = self.env.context.get('forwardport_merged_before')
builtins.forwardport_updated_before = self.env.context.get('forwardport_updated_before')
self.env['ir.cron']._process_jobs(self.env.cr.dbname)
del builtins.forwardport_updated_before
del builtins.forwardport_merged_before
return True
class IrCron(models.Model):
_inherit = 'ir.cron'
@classmethod
def _process_jobs(cls, db_name):
t = threading.current_thread()
try:
db = odoo.sql_db.db_connect(db_name)
t.dbname = db_name
with db.cursor() as cron_cr:
# FIXME: override `_get_all_ready_jobs` to directly lock the cron?
while jobs := next((
job
for j in cls._get_all_ready_jobs(cron_cr)
if (job := cls._acquire_one_job(cron_cr, (j['id'],)))
), None):
# take into account overridings of _process_job() on that database
registry = odoo.registry(db_name)
registry[cls._name]._process_job(db, cron_cr, job)
cron_cr.commit()
except psycopg2.ProgrammingError as e:
raise
except Exception:
_logger.warning('Exception in cron:', exc_info=True)
finally:
if hasattr(t, 'dbname'):
del t.dbname
''', encoding='utf-8')
(mod / '__manifest__.py').write_text(pprint.pformat({
'name': 'dummy saas_worker',
@ -445,6 +490,7 @@ def addons_path(request, dummy_addons_path):
def server(request, db, port, module, addons_path, tmpdir):
log_handlers = [
'odoo.modules.loading:WARNING',
'py.warnings:ERROR',
]
if not request.config.getoption('--log-github'):
log_handlers.append('github_requests:WARNING')
@ -1210,15 +1256,19 @@ class Environment:
def run_crons(self, *xids, **kw):
crons = xids or self._default_crons
print('running crons', crons, file=sys.stderr)
cron_ids = []
for xid in crons:
t0 = time.time()
print('\trunning cron', xid, '...', file=sys.stderr)
if xid is None:
continue
model, cron_id = self('ir.model.data', 'check_object_reference', *xid.split('.', 1))
assert model == 'ir.cron', "Expected {} to be a cron, got {}".format(xid, model)
self('ir.cron', 'method_direct_trigger', [cron_id], **kw)
print('\tdone %.3fs' % (time.time() - t0), file=sys.stderr)
print('done', file=sys.stderr)
cron_ids.append(cron_id)
if cron_ids:
self('ir.cron', 'write', cron_ids, {
'nextcall': (datetime.datetime.utcnow() - datetime.timedelta(seconds=30)).isoformat(" ", "seconds")
}, **kw)
self('base', 'run_crons', [], **kw)
# sleep for some time as a lot of crap may have happened (?)
wait_for_hook()

View File

@ -8,6 +8,7 @@
<field name="interval_type">minutes</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">43</field>
</record>
<record model="ir.cron" id="updates">
@ -19,6 +20,7 @@
<field name="interval_type">minutes</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">46</field>
</record>
<record model="ir.cron" id="reminder">

View File

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
import builtins
import contextlib
import logging
import re
@ -345,7 +346,7 @@ class DeleteBranches(models.Model, Queue):
pr_id = fields.Many2one('runbot_merge.pull_requests')
def _search_domain(self):
cutoff = self.env.context.get('forwardport_merged_before') \
cutoff = getattr(builtins, 'forwardport_merged_before', None) \
or fields.Datetime.to_string(datetime.now() - MERGE_AGE)
return [('pr_id.merge_date', '<', cutoff)]

View File

@ -13,6 +13,7 @@ it up), ...
"""
from __future__ import annotations
import builtins
import datetime
import itertools
import json
@ -487,7 +488,7 @@ stderr:
], order='source_id, id'), lambda p: p.source_id)
def _reminder(self):
cutoff = self.env.context.get('forwardport_updated_before') \
cutoff = getattr(builtins, 'forwardport_updated_before', None) \
or fields.Datetime.to_string(datetime.datetime.now() - DEFAULT_DELTA)
cutoff_dt = fields.Datetime.from_string(cutoff)

View File

@ -8,6 +8,7 @@
<field name="interval_type">minutes</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">30</field>
</record>
<record model="ir.cron" id="staging_cron">
<field name="name">Check for progress of PRs and create Stagings</field>
@ -18,6 +19,7 @@
<field name="interval_type">minutes</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">40</field>
</record>
<record model="ir.cron" id="feedback_cron">
<field name="name">Send feedback to PR</field>
@ -28,6 +30,7 @@
<field name="interval_type">minutes</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">60</field>
</record>
<record model="ir.cron" id="labels_cron">
<field name="name">Update labels on PR</field>
@ -38,6 +41,7 @@
<field name="interval_type">minutes</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">70</field>
</record>
<record model="ir.cron" id="fetch_prs_cron">
<field name="name">Check for PRs to fetch</field>
@ -48,6 +52,7 @@
<field name="interval_type">minutes</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">10</field>
</record>
<record model="ir.cron" id="check_linked_prs_status">
<field name="name">Warn on linked PRs where only one is ready</field>
@ -58,6 +63,7 @@
<field name="interval_type">hours</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">50</field>
</record>
<record model="ir.cron" id="process_updated_commits">
<field name="name">Impact commit statuses on PRs and stagings</field>
@ -68,5 +74,6 @@
<field name="interval_type">minutes</field>
<field name="numbercall">-1</field>
<field name="doall" eval="False"/>
<field name="priority">20</field>
</record>
</odoo>

View File

@ -29,4 +29,4 @@ class BranchCleanup(models.TransientModel):
_logger.info("no tmp branch found for %s:%s", r.name, b.name)
res = gh('delete', f'git/refs/heads/staging.{b.name}', check=False)
if res.status_code != 204:
_logger.info("no staging branch found for %s:%s", res.name, b.name)
_logger.info("no staging branch found for %s:%s", r.name, b.name)

View File

@ -45,10 +45,21 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
staging_id = branch_id.active_staging_id
assert staging_id == pr_id.staging_id
# staging of `pr` should have generated a staging branch
_ = repo.get_ref('heads/staging.other')
# stagings should not need a tmp branch anymore, so this should not exist
with pytest.raises(AssertionError, match=r'Not Found'):
repo.get_ref('heads/tmp.other')
# disable branch "other"
branch_id.active = False
env.run_crons()
# triggered cleanup should have deleted the staging for the disabled `other`
# target branch
with pytest.raises(AssertionError, match=r'Not Found'):
repo.get_ref('heads/staging.other')
# the PR should not have been closed implicitly
assert pr_id.state == 'ready'
# but it should be unstaged
@ -86,17 +97,10 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf
assert pr_id.staging_id
# staging of `pr` should have generated a staging branch
_ = repo.get_ref('heads/staging.other')
_ = repo.get_ref('heads/staging.other2')
# stagings should not need a tmp branch anymore, so this should not exist
with pytest.raises(AssertionError, match=r'Not Found'):
repo.get_ref('heads/tmp.other')
assert env['base'].run_crons()
# triggered cleanup should have deleted the staging for the disabled `other`
# target branch
with pytest.raises(AssertionError, match=r'Not Found'):
repo.get_ref('heads/staging.other')
repo.get_ref('heads/tmp.other2')
def test_new_pr_no_branch(env, project, make_repo, setreviewers, users):
""" A new PR to an *unknown* branch should be ignored and warn

View File

@ -1,3 +1,4 @@
import datetime
import functools
from itertools import repeat
@ -66,16 +67,16 @@ def test_staging_priority(env, project, repo, config, mode, cutoff, second):
with repo:
[m] = repo.make_commits(None, Commit("m", tree={"ble": "1"}), ref="heads/master")
[c] = repo.make_commits(m, Commit("c", tree={"1": "1"}), ref="heads/pr1")
repo.make_commits(m, Commit("c", tree={"1": "1"}), ref="heads/pr1")
pr1 = repo.make_pr(title="whatever", target="master", head="pr1")
[c] = repo.make_commits(m, Commit("c", tree={"2": "2"}), ref="heads/pr2")
repo.make_commits(m, Commit("c", tree={"2": "2"}), ref="heads/pr2")
pr2 = repo.make_pr(title="whatever", target="master", head="pr2")
[c] = repo.make_commits(m, Commit("c", tree={"3": "3"}), ref="heads/pr3")
repo.make_commits(m, Commit("c", tree={"3": "3"}), ref="heads/pr3")
pr3 = repo.make_pr(title="whatever", target="master", head="pr3")
[c] = repo.make_commits(m, Commit("c", tree={"4": "4"}), ref="heads/pr4")
repo.make_commits(m, Commit("c", tree={"4": "4"}), ref="heads/pr4")
pr4 = repo.make_pr(title="whatever", target="master", head="pr4")
prs = [pr1, pr2, pr3, pr4]
@ -106,6 +107,13 @@ def test_staging_priority(env, project, repo, config, mode, cutoff, second):
# trigger a split
with repo:
repo.post_status('staging.master', 'failure')
# specifically delay creation of new staging to observe the failed
# staging's state and the splits
model, cron_id = env['ir.model.data'].check_object_reference('runbot_merge', 'staging_cron')
env[model].browse([cron_id]).write({
'nextcall': (datetime.datetime.utcnow() + datetime.timedelta(minutes=10)).isoformat(" ", "seconds")
})
env.run_crons('runbot_merge.process_updated_commits', 'runbot_merge.merge_cron')
assert not staging.active
assert not env['runbot_merge.stagings'].search([]).active