From 32871c389690923aeb8a06703977af0fdec9c7fe Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Tue, 30 Jul 2024 13:47:49 +0200 Subject: [PATCH 1/8] [FIX] *: stray prints Found a bunch of old leftover `print` calls which should not be in the repo. --- conftest.py | 1 - forwardport/models/project.py | 2 -- forwardport/tests/test_weird.py | 5 ----- 3 files changed, 8 deletions(-) diff --git a/conftest.py b/conftest.py index 66afa127..7fb85dde 100644 --- a/conftest.py +++ b/conftest.py @@ -602,7 +602,6 @@ def _rate_limited(req): if not q.ok and q.headers.get('X-RateLimit-Remaining') == '0': reset = int(q.headers['X-RateLimit-Reset']) delay = max(0, round(reset - time.time() + 1.0)) - print("Hit rate limit, sleeping for", delay, "seconds") time.sleep(delay) continue break diff --git a/forwardport/models/project.py b/forwardport/models/project.py index e6599975..30c2df5d 100644 --- a/forwardport/models/project.py +++ b/forwardport/models/project.py @@ -501,8 +501,6 @@ stderr: pr_ids = {p.id for p in prs} for pr in prs: pr_ids.discard(pr.parent_id.id) - print(source, prs, [p.parent_id for p in prs], - '\n\t->', pr_ids, flush=True) for pr in (p for p in prs if p.id in pr_ids): self.env.ref('runbot_merge.forwardport.reminder')._send( repository=pr.repository, diff --git a/forwardport/tests/test_weird.py b/forwardport/tests/test_weird.py index e2c573b6..47cf970a 100644 --- a/forwardport/tests/test_weird.py +++ b/forwardport/tests/test_weird.py @@ -1197,11 +1197,6 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port pr_c_id.write({'parent_id': False, 'detach_reason': 'because'}) env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) - for p in env['runbot_merge.pull_requests'].search_read( - [], ['id', 'parent_id', 'source_id', 'display_name'] - ): - print(p, flush=True) - assert pr_b.comments[2:] == [ (users['user'], "@%s @%s child PR %s was modified / updated and has become a normal PR. This PR (and any of its parents) will need to be merged independently as approvals won't cross." % ( users['user'], From dd17730f4cd3c24ea60bc27282a5badec9738cfc Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Tue, 30 Jul 2024 09:21:05 +0200 Subject: [PATCH 2/8] [IMP] *: crons tests running for better triggered compatibility Mergebot / forwardport crons need to run in a specific ordering in order to flow into one another correctly. The default ordering being unspecified, it was not possible to use the normal cron runner (instead of the external driver running crons in sequence one at a time). This can be fixed by setting *sequences* on crons, as the cron runner (`_process_jobs`) will use that order to acquire and run crons. Also override `_process_jobs` however: the built-in cron runner fetches a static list of ready crons, then runs that. This is fine for normal situation where the cron runner runs in a loop anyway but it's any issue for the tests, as we expect that cron A can trigger cron B, and we want cron B to run *right now* even if it hadn't been triggered before cron A ran. We can replace `_process_job` with a cut down version which does that (cut down because we don't need most of the error handling / resilience, there's no concurrent workers, there's no module being installed, versions must match, ...). This allows e.g. the cron propagating commit statuses to trigger the staging cron, and both will run within the same `run_crons` session. Something I didn't touch is that `_process_jobs` internally creates completely new environments so there is no way to pass context into the cron jobs anymore (whereas it works for `method_direct_trigger`), this means the context values have to be shunted elsewhere for that purpose which is gross. But even though I'm replacing `_process_jobs`, this seems a bit too much of a change in cron execution semantics. So left it out. While at it tho, silence the spammy `py.warnings` stuff I can't do much about. --- conftest.py | 64 +++++++++++++++++-- forwardport/data/crons.xml | 2 + forwardport/models/forwardport.py | 3 +- forwardport/models/project.py | 3 +- runbot_merge/data/merge_cron.xml | 7 ++ .../models/crons/cleanup_scratch_branches.py | 2 +- runbot_merge/tests/test_disabled_branch.py | 22 ++++--- runbot_merge/tests/test_project_toggles.py | 16 +++-- 8 files changed, 96 insertions(+), 23 deletions(-) diff --git a/conftest.py b/conftest.py index 7fb85dde..59f0753e 100644 --- a/conftest.py +++ b/conftest.py @@ -1,5 +1,6 @@ from __future__ import annotations +import datetime import select import shutil import threading @@ -316,7 +317,8 @@ class DbDict(dict): '-d', db, '-i', module + ',saas_worker,auth_oauth', '--max-cron-threads', '0', '--stop-after-init', - '--log-level', 'warn' + '--log-level', 'warn', + '--log-handler', 'py.warnings:ERROR', ], check=True, env={**os.environ, 'XDG_DATA_HOME': str(d)} @@ -413,15 +415,58 @@ def dummy_addons_path(): mod = pathlib.Path(dummy_addons_path, 'saas_worker') mod.mkdir(0o700) (mod / '__init__.py').write_text('''\ +import builtins +import logging +import threading + +import psycopg2 + +import odoo from odoo import api, fields, models +_logger = logging.getLogger(__name__) + class Base(models.AbstractModel): _inherit = 'base' def run_crons(self): + builtins.forwardport_merged_before = self.env.context.get('forwardport_merged_before') + builtins.forwardport_updated_before = self.env.context.get('forwardport_updated_before') self.env['ir.cron']._process_jobs(self.env.cr.dbname) + del builtins.forwardport_updated_before + del builtins.forwardport_merged_before return True + + +class IrCron(models.Model): + _inherit = 'ir.cron' + + @classmethod + def _process_jobs(cls, db_name): + t = threading.current_thread() + try: + db = odoo.sql_db.db_connect(db_name) + t.dbname = db_name + with db.cursor() as cron_cr: + # FIXME: override `_get_all_ready_jobs` to directly lock the cron? + while jobs := next(( + job + for j in cls._get_all_ready_jobs(cron_cr) + if (job := cls._acquire_one_job(cron_cr, (j['id'],))) + ), None): + # take into account overridings of _process_job() on that database + registry = odoo.registry(db_name) + registry[cls._name]._process_job(db, cron_cr, job) + cron_cr.commit() + + except psycopg2.ProgrammingError as e: + raise + except Exception: + _logger.warning('Exception in cron:', exc_info=True) + finally: + if hasattr(t, 'dbname'): + del t.dbname ''', encoding='utf-8') (mod / '__manifest__.py').write_text(pprint.pformat({ 'name': 'dummy saas_worker', @@ -445,6 +490,7 @@ def addons_path(request, dummy_addons_path): def server(request, db, port, module, addons_path, tmpdir): log_handlers = [ 'odoo.modules.loading:WARNING', + 'py.warnings:ERROR', ] if not request.config.getoption('--log-github'): log_handlers.append('github_requests:WARNING') @@ -1210,15 +1256,19 @@ class Environment: def run_crons(self, *xids, **kw): crons = xids or self._default_crons - print('running crons', crons, file=sys.stderr) + cron_ids = [] for xid in crons: - t0 = time.time() - print('\trunning cron', xid, '...', file=sys.stderr) + if xid is None: + continue + model, cron_id = self('ir.model.data', 'check_object_reference', *xid.split('.', 1)) assert model == 'ir.cron', "Expected {} to be a cron, got {}".format(xid, model) - self('ir.cron', 'method_direct_trigger', [cron_id], **kw) - print('\tdone %.3fs' % (time.time() - t0), file=sys.stderr) - print('done', file=sys.stderr) + cron_ids.append(cron_id) + if cron_ids: + self('ir.cron', 'write', cron_ids, { + 'nextcall': (datetime.datetime.utcnow() - datetime.timedelta(seconds=30)).isoformat(" ", "seconds") + }, **kw) + self('base', 'run_crons', [], **kw) # sleep for some time as a lot of crap may have happened (?) wait_for_hook() diff --git a/forwardport/data/crons.xml b/forwardport/data/crons.xml index 02d2be1e..7d0e1bf7 100644 --- a/forwardport/data/crons.xml +++ b/forwardport/data/crons.xml @@ -8,6 +8,7 @@ minutes -1 + 43 @@ -19,6 +20,7 @@ minutes -1 + 46 diff --git a/forwardport/models/forwardport.py b/forwardport/models/forwardport.py index a0604da7..b6ac07d8 100644 --- a/forwardport/models/forwardport.py +++ b/forwardport/models/forwardport.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import builtins import contextlib import logging import re @@ -345,7 +346,7 @@ class DeleteBranches(models.Model, Queue): pr_id = fields.Many2one('runbot_merge.pull_requests') def _search_domain(self): - cutoff = self.env.context.get('forwardport_merged_before') \ + cutoff = getattr(builtins, 'forwardport_merged_before', None) \ or fields.Datetime.to_string(datetime.now() - MERGE_AGE) return [('pr_id.merge_date', '<', cutoff)] diff --git a/forwardport/models/project.py b/forwardport/models/project.py index 30c2df5d..72630453 100644 --- a/forwardport/models/project.py +++ b/forwardport/models/project.py @@ -13,6 +13,7 @@ it up), ... """ from __future__ import annotations +import builtins import datetime import itertools import json @@ -487,7 +488,7 @@ stderr: ], order='source_id, id'), lambda p: p.source_id) def _reminder(self): - cutoff = self.env.context.get('forwardport_updated_before') \ + cutoff = getattr(builtins, 'forwardport_updated_before', None) \ or fields.Datetime.to_string(datetime.datetime.now() - DEFAULT_DELTA) cutoff_dt = fields.Datetime.from_string(cutoff) diff --git a/runbot_merge/data/merge_cron.xml b/runbot_merge/data/merge_cron.xml index b0ff6e1d..ffd3147f 100644 --- a/runbot_merge/data/merge_cron.xml +++ b/runbot_merge/data/merge_cron.xml @@ -8,6 +8,7 @@ minutes -1 + 30 Check for progress of PRs and create Stagings @@ -18,6 +19,7 @@ minutes -1 + 40 Send feedback to PR @@ -28,6 +30,7 @@ minutes -1 + 60 Update labels on PR @@ -38,6 +41,7 @@ minutes -1 + 70 Check for PRs to fetch @@ -48,6 +52,7 @@ minutes -1 + 10 Warn on linked PRs where only one is ready @@ -58,6 +63,7 @@ hours -1 + 50 Impact commit statuses on PRs and stagings @@ -68,5 +74,6 @@ minutes -1 + 20 diff --git a/runbot_merge/models/crons/cleanup_scratch_branches.py b/runbot_merge/models/crons/cleanup_scratch_branches.py index 1e7b6274..a5e041ce 100644 --- a/runbot_merge/models/crons/cleanup_scratch_branches.py +++ b/runbot_merge/models/crons/cleanup_scratch_branches.py @@ -29,4 +29,4 @@ class BranchCleanup(models.TransientModel): _logger.info("no tmp branch found for %s:%s", r.name, b.name) res = gh('delete', f'git/refs/heads/staging.{b.name}', check=False) if res.status_code != 204: - _logger.info("no staging branch found for %s:%s", res.name, b.name) + _logger.info("no staging branch found for %s:%s", r.name, b.name) diff --git a/runbot_merge/tests/test_disabled_branch.py b/runbot_merge/tests/test_disabled_branch.py index cfd16bae..88e5e490 100644 --- a/runbot_merge/tests/test_disabled_branch.py +++ b/runbot_merge/tests/test_disabled_branch.py @@ -45,10 +45,21 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf staging_id = branch_id.active_staging_id assert staging_id == pr_id.staging_id + # staging of `pr` should have generated a staging branch + _ = repo.get_ref('heads/staging.other') + # stagings should not need a tmp branch anymore, so this should not exist + with pytest.raises(AssertionError, match=r'Not Found'): + repo.get_ref('heads/tmp.other') + # disable branch "other" branch_id.active = False env.run_crons() + # triggered cleanup should have deleted the staging for the disabled `other` + # target branch + with pytest.raises(AssertionError, match=r'Not Found'): + repo.get_ref('heads/staging.other') + # the PR should not have been closed implicitly assert pr_id.state == 'ready' # but it should be unstaged @@ -86,17 +97,10 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf assert pr_id.staging_id # staging of `pr` should have generated a staging branch - _ = repo.get_ref('heads/staging.other') + _ = repo.get_ref('heads/staging.other2') # stagings should not need a tmp branch anymore, so this should not exist with pytest.raises(AssertionError, match=r'Not Found'): - repo.get_ref('heads/tmp.other') - - assert env['base'].run_crons() - - # triggered cleanup should have deleted the staging for the disabled `other` - # target branch - with pytest.raises(AssertionError, match=r'Not Found'): - repo.get_ref('heads/staging.other') + repo.get_ref('heads/tmp.other2') def test_new_pr_no_branch(env, project, make_repo, setreviewers, users): """ A new PR to an *unknown* branch should be ignored and warn diff --git a/runbot_merge/tests/test_project_toggles.py b/runbot_merge/tests/test_project_toggles.py index 615e6e4b..30fe4eff 100644 --- a/runbot_merge/tests/test_project_toggles.py +++ b/runbot_merge/tests/test_project_toggles.py @@ -1,3 +1,4 @@ +import datetime import functools from itertools import repeat @@ -66,16 +67,16 @@ def test_staging_priority(env, project, repo, config, mode, cutoff, second): with repo: [m] = repo.make_commits(None, Commit("m", tree={"ble": "1"}), ref="heads/master") - [c] = repo.make_commits(m, Commit("c", tree={"1": "1"}), ref="heads/pr1") + repo.make_commits(m, Commit("c", tree={"1": "1"}), ref="heads/pr1") pr1 = repo.make_pr(title="whatever", target="master", head="pr1") - [c] = repo.make_commits(m, Commit("c", tree={"2": "2"}), ref="heads/pr2") + repo.make_commits(m, Commit("c", tree={"2": "2"}), ref="heads/pr2") pr2 = repo.make_pr(title="whatever", target="master", head="pr2") - [c] = repo.make_commits(m, Commit("c", tree={"3": "3"}), ref="heads/pr3") + repo.make_commits(m, Commit("c", tree={"3": "3"}), ref="heads/pr3") pr3 = repo.make_pr(title="whatever", target="master", head="pr3") - [c] = repo.make_commits(m, Commit("c", tree={"4": "4"}), ref="heads/pr4") + repo.make_commits(m, Commit("c", tree={"4": "4"}), ref="heads/pr4") pr4 = repo.make_pr(title="whatever", target="master", head="pr4") prs = [pr1, pr2, pr3, pr4] @@ -106,6 +107,13 @@ def test_staging_priority(env, project, repo, config, mode, cutoff, second): # trigger a split with repo: repo.post_status('staging.master', 'failure') + + # specifically delay creation of new staging to observe the failed + # staging's state and the splits + model, cron_id = env['ir.model.data'].check_object_reference('runbot_merge', 'staging_cron') + env[model].browse([cron_id]).write({ + 'nextcall': (datetime.datetime.utcnow() + datetime.timedelta(minutes=10)).isoformat(" ", "seconds") + }) env.run_crons('runbot_merge.process_updated_commits', 'runbot_merge.merge_cron') assert not staging.active assert not env['runbot_merge.stagings'].search([]).active From 57a82235d9694bf6494b3b4090abfdee285307bc Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Tue, 30 Jul 2024 13:42:00 +0200 Subject: [PATCH 3/8] [IMP] *: rely only on triggers to run statuses propagation The cron had been converted to using mostly triggers in 7cd9afe7f2ff2cbaaabe0ecf6eb364b55aaf8d60 but I forgot to update the *tests* to avoid explicitly triggering it. --- forwardport/tests/conftest.py | 1 - forwardport/tests/test_simple.py | 2 +- runbot_merge/data/merge_cron.xml | 4 ++-- runbot_merge/tests/conftest.py | 2 -- runbot_merge/tests/test_basic.py | 8 ++++---- runbot_merge/tests/test_by_branch.py | 20 ++++++++++---------- runbot_merge/tests/test_project_toggles.py | 4 ++-- 7 files changed, 19 insertions(+), 22 deletions(-) diff --git a/forwardport/tests/conftest.py b/forwardport/tests/conftest.py index 408345af..0a656c52 100644 --- a/forwardport/tests/conftest.py +++ b/forwardport/tests/conftest.py @@ -7,7 +7,6 @@ import requests @pytest.fixture def default_crons(): return [ - 'runbot_merge.process_updated_commits', 'runbot_merge.merge_cron', 'runbot_merge.staging_cron', 'forwardport.port_forward', diff --git a/forwardport/tests/test_simple.py b/forwardport/tests/test_simple.py index 65bfca2d..2e5a5af8 100644 --- a/forwardport/tests/test_simple.py +++ b/forwardport/tests/test_simple.py @@ -597,7 +597,7 @@ def test_disapproval(env, config, make_repo, users): prod.post_status(pr2_id.head, 'success') pr2.post_comment('hansen r+', token=config['role_other']['token']) # no point creating staging for our needs, just propagate statuses - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr1_id.state == 'ready' assert pr2_id.state == 'ready' diff --git a/runbot_merge/data/merge_cron.xml b/runbot_merge/data/merge_cron.xml index ffd3147f..3240e3ae 100644 --- a/runbot_merge/data/merge_cron.xml +++ b/runbot_merge/data/merge_cron.xml @@ -70,8 +70,8 @@ code model._notify() - 30 - minutes + 6 + hours -1 20 diff --git a/runbot_merge/tests/conftest.py b/runbot_merge/tests/conftest.py index b663a688..5f84298c 100644 --- a/runbot_merge/tests/conftest.py +++ b/runbot_merge/tests/conftest.py @@ -9,8 +9,6 @@ def default_crons(): return [ # env['runbot_merge.project']._check_fetch() 'runbot_merge.fetch_prs_cron', - # env['runbot_merge.commit']._notify() - 'runbot_merge.process_updated_commits', # env['runbot_merge.project']._check_stagings() 'runbot_merge.merge_cron', # env['runbot_merge.project']._create_stagings() diff --git a/runbot_merge/tests/test_basic.py b/runbot_merge/tests/test_basic.py index abb03ec4..e738787d 100644 --- a/runbot_merge/tests/test_basic.py +++ b/runbot_merge/tests/test_basic.py @@ -607,7 +607,7 @@ def test_timeout_bump_on_pending(env, repo, config): st.timeout_limit = old_timeout with repo: repo.post_status('staging.master', 'pending', 'ci/runbot') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.timeout_limit > old_timeout def test_staging_ci_failure_single(env, repo, users, config, page): @@ -2463,7 +2463,7 @@ class TestPRUpdate(object): env.run_crons() pr_id = to_pr(env, pr) - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.message == 'title\n\nbody' assert pr_id.state == 'ready' old_reviewer = pr_id.reviewed_by @@ -3008,7 +3008,7 @@ class TestBatching(object): reviewer=None, ) pr02.post_comment('hansen alone r+', config['role_reviewer']['token']) - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) pr01_id = to_pr(env, pr01) assert pr01_id.blocked is False pr02_id = to_pr(env, pr02) @@ -3077,7 +3077,7 @@ class TestBatching(object): with repo: repo.post_status('staging.master', 'success', 'ci/runbot') repo.post_status('staging.master', 'success', 'legal/cla') - env.run_crons('runbot_merge.process_updated_commits', 'runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') assert pr2.state == 'merged' class TestReviewing: diff --git a/runbot_merge/tests/test_by_branch.py b/runbot_merge/tests/test_by_branch.py index efaf0394..9134a4e2 100644 --- a/runbot_merge/tests/test_by_branch.py +++ b/runbot_merge/tests/test_by_branch.py @@ -34,15 +34,15 @@ def test_status_applies(env, repo, config): with repo: repo.post_status(c, 'success', 'ci') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'opened' with repo: repo.post_status(c, 'success', 'pr') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'opened' with repo: repo.post_status(c, 'success', 'lint') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'validated' with repo: @@ -53,15 +53,15 @@ def test_status_applies(env, repo, config): assert st.state == 'pending' with repo: repo.post_status('staging.master', 'success', 'ci') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'pending' with repo: repo.post_status('staging.master', 'success', 'lint') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'pending' with repo: repo.post_status('staging.master', 'success', 'staging') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'success' @pytest.mark.usefixtures('_setup_statuses') @@ -84,11 +84,11 @@ def test_status_skipped(env, project, repo, config): with repo: repo.post_status(c, 'success', 'ci') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'opened' with repo: repo.post_status(c, 'success', 'pr') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'validated' with repo: @@ -99,11 +99,11 @@ def test_status_skipped(env, project, repo, config): assert st.state == 'pending' with repo: repo.post_status('staging.maintenance', 'success', 'staging') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'pending' with repo: repo.post_status('staging.maintenance', 'success', 'ci') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'success' def test_pseudo_version_tag(env, project, make_repo, setreviewers, config): diff --git a/runbot_merge/tests/test_project_toggles.py b/runbot_merge/tests/test_project_toggles.py index 30fe4eff..30ce5f78 100644 --- a/runbot_merge/tests/test_project_toggles.py +++ b/runbot_merge/tests/test_project_toggles.py @@ -101,7 +101,7 @@ def test_staging_priority(env, project, repo, config, mode, cutoff, second): for pr, pr_id in zip(prs[cutoff:], pr_ids[cutoff:]): pr.post_comment('hansen r+', config['role_reviewer']['token']) repo.post_status(pr_id.head, 'success') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert not pr_ids.filtered(lambda p: p.blocked) # trigger a split @@ -114,7 +114,7 @@ def test_staging_priority(env, project, repo, config, mode, cutoff, second): env[model].browse([cron_id]).write({ 'nextcall': (datetime.datetime.utcnow() + datetime.timedelta(minutes=10)).isoformat(" ", "seconds") }) - env.run_crons('runbot_merge.process_updated_commits', 'runbot_merge.merge_cron') + env.run_crons('runbot_merge.merge_cron') assert not staging.active assert not env['runbot_merge.stagings'].search([]).active assert env['runbot_merge.split'].search_count([]) == 2 From 029957dbebb2039d40f55c83835e6576c463e18a Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Tue, 30 Jul 2024 13:45:51 +0200 Subject: [PATCH 4/8] [IMP] *: trigger-ify task queue type crons These are pretty simple to convert as they are straightforward: an item is added to a work queue (table), then a cron regularly scans through the table executing the items and deleting them. That means the cron trigger can just be added on `create` and things should work out fine. There's just two wrinkles in the port_forward cron: - It can be requeued in the future, so needs a conditional trigger-ing in `write`. - It is disabled during freeze (maybe something to change), as a result triggers don't enqueue at all, so we need to immediately trigger after freeze to force the cron re-enabling it. --- forwardport/data/crons.xml | 10 +++++----- forwardport/models/forwardport.py | 18 ++++++++++++++++++ forwardport/models/project_freeze.py | 4 +++- forwardport/tests/conftest.py | 3 --- forwardport/tests/test_simple.py | 12 ++++++------ forwardport/tests/test_weird.py | 16 ++++++---------- runbot_merge/data/merge_cron.xml | 8 ++++---- runbot_merge/models/pull_requests.py | 11 +++++++++++ runbot_merge/tests/conftest.py | 4 ---- runbot_merge/tests/test_basic.py | 2 -- 10 files changed, 53 insertions(+), 35 deletions(-) diff --git a/forwardport/data/crons.xml b/forwardport/data/crons.xml index 7d0e1bf7..b3ce4591 100644 --- a/forwardport/data/crons.xml +++ b/forwardport/data/crons.xml @@ -4,8 +4,8 @@ code model._process() - 1 - minutes + 6 + hours -1 43 @@ -16,8 +16,8 @@ code model._process() - 1 - minutes + 6 + hours -1 46 @@ -39,7 +39,7 @@ code model._process() - 1 + 6 hours -1 diff --git a/forwardport/models/forwardport.py b/forwardport/models/forwardport.py index b6ac07d8..e805e59a 100644 --- a/forwardport/models/forwardport.py +++ b/forwardport/models/forwardport.py @@ -77,6 +77,16 @@ class ForwardPortTasks(models.Model, Queue): retry_after = fields.Datetime(required=True, default='1900-01-01 01:01:01') pr_id = fields.Many2one('runbot_merge.pull_requests') + def create(self, vals_list): + self.env.ref('forwardport.port_forward')._trigger() + return super().create(vals_list) + + def write(self, vals): + if retry := vals.get('retry_after'): + self.env.ref('forwardport.port_forward')\ + ._trigger(fields.Datetime.to_datetime(retry)) + return super().write(vals) + def _search_domain(self): return super()._search_domain() + [ ('retry_after', '<=', fields.Datetime.to_string(fields.Datetime.now())), @@ -260,6 +270,10 @@ class UpdateQueue(models.Model, Queue): original_root = fields.Many2one('runbot_merge.pull_requests') new_root = fields.Many2one('runbot_merge.pull_requests') + def create(self, vals_list): + self.env.ref('forwardport.updates')._trigger() + return super().create(vals_list) + def _process_item(self): previous = self.new_root sentry_sdk.set_tag("update-root", self.new_root.display_name) @@ -345,6 +359,10 @@ class DeleteBranches(models.Model, Queue): pr_id = fields.Many2one('runbot_merge.pull_requests') + def create(self, vals_list): + self.env.ref('forwardport.remover')._trigger(datetime.now() - MERGE_AGE) + return super().create(vals_list) + def _search_domain(self): cutoff = getattr(builtins, 'forwardport_merged_before', None) \ or fields.Datetime.to_string(datetime.now() - MERGE_AGE) diff --git a/forwardport/models/project_freeze.py b/forwardport/models/project_freeze.py index 635912c0..0796fbaf 100644 --- a/forwardport/models/project_freeze.py +++ b/forwardport/models/project_freeze.py @@ -22,5 +22,7 @@ class FreezeWizard(models.Model): def unlink(self): r = super().unlink() if not (self.env.context.get('forwardport_keep_disabled') or self.search_count([])): - self.env.ref('forwardport.port_forward').active = True + cron = self.env.ref('forwardport.port_forward') + cron.active = True + cron._trigger() # process forward ports enqueued during the freeze period return r diff --git a/forwardport/tests/conftest.py b/forwardport/tests/conftest.py index 0a656c52..5243d96e 100644 --- a/forwardport/tests/conftest.py +++ b/forwardport/tests/conftest.py @@ -9,10 +9,7 @@ def default_crons(): return [ 'runbot_merge.merge_cron', 'runbot_merge.staging_cron', - 'forwardport.port_forward', - 'forwardport.updates', 'runbot_merge.check_linked_prs_status', - 'runbot_merge.feedback_cron', ] # public_repo — necessary to leave comments diff --git a/forwardport/tests/test_simple.py b/forwardport/tests/test_simple.py index 2e5a5af8..634a25c0 100644 --- a/forwardport/tests/test_simple.py +++ b/forwardport/tests/test_simple.py @@ -123,7 +123,7 @@ def test_straightforward_flow(env, config, make_repo, users): prod.post_status(pr1.head, 'success', 'legal/cla') env.run_crons() - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) pr0_, pr1_, pr2 = env['runbot_merge.pull_requests'].search([], order='number') @@ -329,8 +329,8 @@ def test_empty(env, config, make_repo, users): assert project.fp_github_name == users['other'] # check reminder - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) awaiting = ( users['other'], @@ -374,7 +374,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port # check that this stops if we close the PR with prod: fail_pr.close() - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) assert pr1.comments == [ (users['reviewer'], 'hansen r+'), seen(env, pr1, users), @@ -604,7 +604,7 @@ def test_disapproval(env, config, make_repo, users): # oh no, pr1 has an error! with prod: pr1.post_comment('hansen r-', token=config['role_other']['token']) - env.run_crons('runbot_merge.feedback_cron') + env.run_crons(None) assert pr1_id.state == 'validated', "pr1 should not be approved anymore" assert pr2_id.state == 'ready', "pr2 should not be affected" @@ -899,7 +899,7 @@ class TestClosing: prod.post_status(pr1_id.head, 'success', 'legal/cla') prod.post_status(pr1_id.head, 'success', 'ci/runbot') env.run_crons() - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron') + env.run_crons('forwardport.reminder') assert env['runbot_merge.pull_requests'].search([], order='number') == pr0_id | pr1_id,\ "closing the PR should suppress the FP sequence" diff --git a/forwardport/tests/test_weird.py b/forwardport/tests/test_weird.py index 47cf970a..19feda24 100644 --- a/forwardport/tests/test_weird.py +++ b/forwardport/tests/test_weird.py @@ -695,7 +695,7 @@ def test_retarget_after_freeze(env, config, make_repo, users): port_pr.base = 'bprime' assert port_id.target == new_branch - env.run_crons('forwardport.port_forward') + env.run_crons(None) assert not job.exists(), "job should have succeeded and apoptosed" # since the PR was "already forward-ported" to the new branch it should not @@ -816,11 +816,7 @@ def test_freeze(env, config, make_repo, users): # re-enable forward-port cron after freeze _, cron_id = env['ir.model.data'].check_object_reference('forwardport', 'port_forward', context={'active_test': False}) env['ir.cron'].browse([cron_id]).active = True - - # run crons to process the feedback, run a second time in case of e.g. - # forward porting - env.run_crons() - env.run_crons() + env.run_crons('forwardport.port_forward') assert release_id.state == 'merged' assert not env['runbot_merge.pull_requests'].search([ @@ -896,7 +892,7 @@ def test_missing_magic_ref(env, config, make_repo): # check that the batch is still here and targeted for the future req = env['forwardport.batches'].search([]) assert len(req) == 1 - assert req.retry_after > datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + assert req.retry_after > datetime.utcnow().isoformat(" ", "seconds") # reset retry_after req.retry_after = '1900-01-01 01:01:01' @@ -905,7 +901,7 @@ def test_missing_magic_ref(env, config, make_repo): [c2] = prod.make_commits(a_head.id, Commit('y', tree={'x': '0'})) assert c2 != c pr_id.head = c2 - env.run_crons() + env.run_crons(None) fp_id = env['runbot_merge.pull_requests'].search([('source_id', '=', pr_id.id)]) assert fp_id @@ -1161,7 +1157,7 @@ def test_reminder_detached(env, config, make_repo, users): pr_c = prod.get_pr(pr_c_id.number) # region sanity check - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) assert pr_b.comments == [ seen(env, pr_b, users), @@ -1195,7 +1191,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port # region check detached pr_c_id.write({'parent_id': False, 'detach_reason': 'because'}) - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) assert pr_b.comments[2:] == [ (users['user'], "@%s @%s child PR %s was modified / updated and has become a normal PR. This PR (and any of its parents) will need to be merged independently as approvals won't cross." % ( diff --git a/runbot_merge/data/merge_cron.xml b/runbot_merge/data/merge_cron.xml index 3240e3ae..be06078f 100644 --- a/runbot_merge/data/merge_cron.xml +++ b/runbot_merge/data/merge_cron.xml @@ -26,8 +26,8 @@ code model._send() - 1 - minutes + 6 + hours -1 60 @@ -48,8 +48,8 @@ code model._check(True) - 1 - minutes + 6 + hours -1 10 diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index a5870cdc..cd331642 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -1774,6 +1774,12 @@ class Feedback(models.Model): help="Token field (from repo's project) to use to post messages" ) + @api.model_create_multi + def create(self, vals_list): + # any time a feedback is created, it can be sent + self.env.ref('runbot_merge.feedback_cron')._trigger() + return super().create(vals_list) + def _send(self): ghs = {} to_remove = [] @@ -2424,6 +2430,11 @@ class FetchJob(models.Model): number = fields.Integer(required=True, group_operator=None) closing = fields.Boolean(default=False) + @api.model_create_multi + def create(self, vals_list): + self.env.ref('runbot_merge.fetch_prs_cron')._trigger() + return super().create(vals_list) + def _check(self, commit=False): """ :param bool commit: commit after each fetch has been executed diff --git a/runbot_merge/tests/conftest.py b/runbot_merge/tests/conftest.py index 5f84298c..34d70479 100644 --- a/runbot_merge/tests/conftest.py +++ b/runbot_merge/tests/conftest.py @@ -7,16 +7,12 @@ def module(): @pytest.fixture def default_crons(): return [ - # env['runbot_merge.project']._check_fetch() - 'runbot_merge.fetch_prs_cron', # env['runbot_merge.project']._check_stagings() 'runbot_merge.merge_cron', # env['runbot_merge.project']._create_stagings() 'runbot_merge.staging_cron', # env['runbot_merge.pull_requests']._check_linked_prs_statuses() 'runbot_merge.check_linked_prs_status', - # env['runbot_merge.pull_requests.feedback']._send() - 'runbot_merge.feedback_cron', ] @pytest.fixture diff --git a/runbot_merge/tests/test_basic.py b/runbot_merge/tests/test_basic.py index e738787d..3208e48f 100644 --- a/runbot_merge/tests/test_basic.py +++ b/runbot_merge/tests/test_basic.py @@ -3534,7 +3534,6 @@ class TestUnknownPR: prx.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons( 'runbot_merge.fetch_prs_cron', - 'runbot_merge.feedback_cron', ) assert prx.comments == [ @@ -3559,7 +3558,6 @@ class TestUnknownPR: prx.post_review('APPROVE', 'hansen r+', config['role_reviewer']['token']) env.run_crons( 'runbot_merge.fetch_prs_cron', - 'runbot_merge.feedback_cron', ) # FIXME: either split out reviews in local or merge reviews & comments in remote From f367a64481e1c88a5b9cb239e599a44a7c6e3a90 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Wed, 31 Jul 2024 09:40:53 +0200 Subject: [PATCH 5/8] [IMP] *: trigger-ify merge cron The merge cron is the one in charge of checking staging state and either integrating the staging into the reference branch (if successful) or cancelling the staging (if failed). The most obvious trigger for the merge cron is a change in staging state from the states computation (transition from pending to either success or failure). Explicitly cancelling / failing a staging marks it as inactive so the merge cron isn't actually needed. However an other major trigger is *timeout*, which doesn't have a trivial signal. Instead, it needs to be hooked on the `timeout_limit`, and has to be re-triggered at every update to the `timeout_limit`, which in normal operations is mostly from "pending" statuses bumping the timeout limit. In that case, `_trigger` to the `timeout_limit` as that's where / when we expect a status change. Worst case scenario with this is we have parasitic wakeups of this cron, but having half a dozen wakeups unnecessary wakeups in an hour is still probably better than having a wakeup every minute. --- forwardport/tests/conftest.py | 1 - runbot_merge/data/merge_cron.xml | 4 +- runbot_merge/models/pull_requests.py | 21 ++- runbot_merge/tests/conftest.py | 2 - runbot_merge/tests/test_basic.py | 146 +++++++++++---------- runbot_merge/tests/test_multirepo.py | 2 +- runbot_merge/tests/test_project_toggles.py | 2 +- 7 files changed, 99 insertions(+), 79 deletions(-) diff --git a/forwardport/tests/conftest.py b/forwardport/tests/conftest.py index 5243d96e..c5951502 100644 --- a/forwardport/tests/conftest.py +++ b/forwardport/tests/conftest.py @@ -7,7 +7,6 @@ import requests @pytest.fixture def default_crons(): return [ - 'runbot_merge.merge_cron', 'runbot_merge.staging_cron', 'runbot_merge.check_linked_prs_status', ] diff --git a/runbot_merge/data/merge_cron.xml b/runbot_merge/data/merge_cron.xml index be06078f..f6a4f3da 100644 --- a/runbot_merge/data/merge_cron.xml +++ b/runbot_merge/data/merge_cron.xml @@ -4,8 +4,8 @@ code model._check_stagings(True) - 1 - minutes + 6 + hours -1 30 diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index cd331642..0955c853 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -2062,16 +2062,25 @@ class Stagings(models.Model): for context, status in statuses.get(commit.sha, {}).items() ] + def write(self, vals): + if timeout := vals.get('timeout_limit'): + self.env.ref("runbot_merge.merge_cron")\ + ._trigger(fields.Datetime.to_datetime(timeout)) + + return super().write(vals) + # only depend on staged_at as it should not get modified, but we might # update the CI timeout after the staging have been created and we # *do not* want to update the staging timeouts in that case @api.depends('staged_at') def _compute_timeout_limit(self): + timeouts = set() for st in self: - st.timeout_limit = fields.Datetime.to_string( - fields.Datetime.from_string(st.staged_at) - + datetime.timedelta(minutes=st.target.project_id.ci_timeout) - ) + t = st.timeout_limit = st.staged_at + datetime.timedelta(minutes=st.target.project_id.ci_timeout) + timeouts.add(t) + if timeouts: + # we might have very different limits for each staging so need to schedule them all + self.env.ref("runbot_merge.merge_cron")._trigger_list(timeouts) @api.depends('batch_ids.prs') def _compute_prs(self): @@ -2147,9 +2156,11 @@ class Stagings(models.Model): s.state = st if s.state != 'pending': + self.env.ref("runbot_merge.merge_cron")._trigger() s.staging_end = fields.Datetime.now() if update_timeout_limit: - s.timeout_limit = fields.Datetime.to_string(datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout)) + s.timeout_limit = datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout) + self.env.ref("runbot_merge.merge_cron")._trigger(s.timeout_limit) _logger.debug("%s got pending status, bumping timeout to %s (%s)", self, s.timeout_limit, cmap) def action_cancel(self): diff --git a/runbot_merge/tests/conftest.py b/runbot_merge/tests/conftest.py index 34d70479..e200434a 100644 --- a/runbot_merge/tests/conftest.py +++ b/runbot_merge/tests/conftest.py @@ -7,8 +7,6 @@ def module(): @pytest.fixture def default_crons(): return [ - # env['runbot_merge.project']._check_stagings() - 'runbot_merge.merge_cron', # env['runbot_merge.project']._create_stagings() 'runbot_merge.staging_cron', # env['runbot_merge.pull_requests']._check_linked_prs_statuses() diff --git a/runbot_merge/tests/test_basic.py b/runbot_merge/tests/test_basic.py index 3208e48f..9026aa02 100644 --- a/runbot_merge/tests/test_basic.py +++ b/runbot_merge/tests/test_basic.py @@ -3,6 +3,7 @@ import itertools import json import textwrap import time +from typing import Callable from unittest import mock import pytest @@ -13,8 +14,9 @@ import odoo from utils import _simple_init, seen, matches, get_partner, Commit, pr_page, to_pr, part_of, ensure_one @pytest.fixture(autouse=True) -def _configure_statuses(project, repo): - project.repo_ids.required_statuses = 'legal/cla,ci/runbot' +def _configure_statuses(request, project, repo): + if 'defaultstatuses' not in request.keywords: + project.repo_ids.required_statuses = 'legal/cla,ci/runbot' @pytest.fixture(autouse=True, params=["statuses", "rpc"]) def stagings(request, env, project, repo): @@ -562,19 +564,32 @@ def test_staging_conflict_second(env, repo, users, config): assert pr1_id.state == 'error', "now pr1 should be in error" -def test_staging_ci_timeout(env, repo, config, page): +@pytest.mark.defaultstatuses +@pytest.mark.parametrize('update_op', [ + pytest.param( + lambda _: {'timeout_limit': datetime.datetime.now().isoformat(" ", "seconds")}, + id="set-timeout-limit", + ), + pytest.param( + lambda timeout: {'staged_at': (datetime.datetime.now() - datetime.timedelta(minutes=2*timeout)).isoformat(" ", "seconds")}, + id="set-staged-at", + ), +]) +def test_staging_ci_timeout(env, repo, config, page, update_op: Callable[[int], dict]): """If a staging timeouts (~ delay since staged greater than configured)... requeue? """ with repo: - m = repo.make_commit(None, 'initial', None, tree={'f': 'm'}) + m, _, c2 = repo.make_commits( + None, + Commit('initial', tree={'f': 'm'}), + Commit('first', tree={'f': 'c1'}), + Commit('second', tree={'f': 'c2'}), + ) repo.make_ref('heads/master', m) - c1 = repo.make_commit(m, 'first', None, tree={'f': 'c1'}) - c2 = repo.make_commit(c1, 'second', None, tree={'f': 'c2'}) pr = repo.make_pr(title='title', body='body', target='master', head=c2) - repo.post_status(pr.head, 'success', 'ci/runbot') - repo.post_status(pr.head, 'success', 'legal/cla') + repo.post_status(pr.head, 'success') pr.post_comment('hansen r+ rebase-merge', config['role_reviewer']['token']) env.run_crons() @@ -582,23 +597,26 @@ def test_staging_ci_timeout(env, repo, config, page): assert pr_id.staging_id timeout = env['runbot_merge.project'].search([]).ci_timeout - pr_id.staging_id.staged_at = odoo.fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(minutes=2*timeout)) - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + pr_id.staging_id.write(update_op(timeout)) + env.run_crons('runbot_merge.staging_cron') assert pr_id.state == 'error', "timeout should fail the PR" dangerbox = pr_page(page, pr).cssselect('.alert-danger span') assert dangerbox assert dangerbox[0].text == 'timed out (>60 minutes)' +@pytest.mark.defaultstatuses def test_timeout_bump_on_pending(env, repo, config): with repo: - m = repo.make_commit(None, 'initial', None, tree={'f': '0'}) + [m, c] = repo.make_commits( + None, + Commit('initial', tree={'f': '0'}), + Commit('c', tree={'f': '1'}), + ) repo.make_ref('heads/master', m) - c = repo.make_commit(m, 'c', None, tree={'f': '1'}) prx = repo.make_pr(title='title', body='body', target='master', head=c) - repo.post_status(prx.head, 'success', 'ci/runbot') - repo.post_status(prx.head, 'success', 'legal/cla') + repo.post_status(prx.head, 'success') prx.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() @@ -606,7 +624,7 @@ def test_timeout_bump_on_pending(env, repo, config): old_timeout = odoo.fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(days=15)) st.timeout_limit = old_timeout with repo: - repo.post_status('staging.master', 'pending', 'ci/runbot') + repo.post_status('staging.master', 'pending') env.run_crons(None) assert st.timeout_limit > old_timeout @@ -1092,63 +1110,63 @@ def test_reopen_merged_pr(env, repo, config, users): ] class TestNoRequiredStatus: + @pytest.mark.defaultstatuses def test_basic(self, env, repo, config): """ check that mergebot can work on a repo with no CI at all """ env['runbot_merge.repository'].search([('name', '=', repo.name)]).status_ids = False with repo: - m = repo.make_commit(None, 'initial', None, tree={'0': '0'}) + [m, c] = repo.make_commits( + None, + Commit('initial', tree={'0': '0'}), + Commit('first', tree={'0': '1'}), + ) repo.make_ref('heads/master', m) - c = repo.make_commit(m, 'first', None, tree={'0': '1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) - prx.post_comment('hansen r+', config['role_reviewer']['token']) + pr = repo.make_pr(title='title', body='body', target='master', head=c) + pr.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]) - assert pr.state == 'ready' - st = pr.staging_id - assert st - env.run_crons() + pr_id = to_pr(env, pr) + + st = env['runbot_merge.stagings'].search([], context={'active_test': False}) assert st.state == 'success' - assert pr.state == 'merged' + assert pr_id.state == 'merged' + @pytest.mark.defaultstatuses def test_updated(self, env, repo, config): env['runbot_merge.repository'].search([('name', '=', repo.name)]).status_ids = False with repo: - m = repo.make_commit(None, 'initial', None, tree={'0': '0'}) + [m, c] = repo.make_commits( + None, + Commit('initial', tree={'0': '0'}), + Commit('first', tree={'0': '1'}), + ) repo.make_ref('heads/master', m) - c = repo.make_commit(m, 'first', None, tree={'0': '1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) + pr = repo.make_pr(title='title', body='body', target='master', head=c) env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]) - assert pr.state == 'validated' + pr_id = to_pr(env, pr) + assert pr_id.state == 'validated' # normal push with repo: - repo.make_commits(c, repo.Commit('second', tree={'0': '2'}), ref=prx.ref) + repo.make_commits(c, repo.Commit('second', tree={'0': '2'}), ref=pr.ref) env.run_crons() - assert pr.state == 'validated' + assert pr_id.state == 'validated' with repo: - prx.post_comment('hansen r+', config['role_reviewer']['token']) - assert pr.state == 'ready' + pr.post_comment('hansen r+', config['role_reviewer']['token']) + assert pr_id.state == 'ready' # force push with repo: - repo.make_commits(m, repo.Commit('xxx', tree={'0': 'm'}), ref=prx.ref) + repo.make_commits(m, repo.Commit('xxx', tree={'0': 'm'}), ref=pr.ref) env.run_crons() - assert pr.state == 'validated' + assert pr_id.state == 'validated' with repo: - prx.post_comment('hansen r+', config['role_reviewer']['token']) - assert pr.state == 'ready' + pr.post_comment('hansen r+', config['role_reviewer']['token']) + assert pr_id.state == 'ready' class TestRetry: @pytest.mark.xfail(reason="This may not be a good idea as it could lead to tons of rebuild spam") @@ -1230,7 +1248,7 @@ class TestRetry: with repo: pr.post_comment('hansen retry', config['role_' + retrier]['token']) assert pr_id.state == 'ready' - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons('runbot_merge.staging_cron') staging_head2 = repo.commit('heads/staging.master') assert staging_head2 != staging_head @@ -1263,7 +1281,7 @@ class TestRetry: with repo: pr.post_comment('hansen retry', config['role_reviewer']['token']) - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons('runbot_merge.staging_cron') with repo: repo.post_status('staging.master', 'success', 'legal/cla') @@ -1293,42 +1311,36 @@ class TestRetry: (users['user'], "@{reviewer} retry makes no sense when the PR is not in error.".format_map(users)), ] + @pytest.mark.defaultstatuses @pytest.mark.parametrize('disabler', ['user', 'other', 'reviewer']) def test_retry_disable(self, env, repo, disabler, users, config): with repo: prx = _simple_init(repo) - repo.post_status(prx.head, 'success', 'ci/runbot') - repo.post_status(prx.head, 'success', 'legal/cla') + repo.post_status(prx.head, 'success') prx.post_comment('hansen r+ delegate=%s rebase-merge' % users['other'], config["role_reviewer"]['token']) env.run_crons() - assert env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]).staging_id + pr_id = to_pr(env, prx) + staging_id = pr_id.staging_id + assert staging_id - staging_head = repo.commit('heads/staging.master') with repo: - repo.post_status('staging.master', 'success', 'legal/cla') - repo.post_status('staging.master', 'failure', 'ci/runbot') + repo.post_status('staging.master', 'failure') env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]) - assert pr.state == 'error' + assert staging_id.state == 'failure' + assert not staging_id.active + assert pr_id.state == 'error' with repo: prx.post_comment('hansen r-', config['role_' + disabler]['token']) - assert pr.state == 'validated' + assert pr_id.state == 'validated' with repo: repo.make_commit(prx.ref, 'third', None, tree={'m': 'c3'}) # just in case, apparently in some case the first post_status uses the old head... with repo: - repo.post_status(prx.head, 'success', 'ci/runbot') - repo.post_status(prx.head, 'success', 'legal/cla') + repo.post_status(prx.head, 'success') env.run_crons() - assert pr.state == 'validated' + assert pr_id.state == 'validated' class TestMergeMethod: """ @@ -1759,7 +1771,7 @@ commits, I need to know how to merge it: c0 = repo.make_commit(m, 'C0', None, tree={'a': 'b'}) prx = repo.make_pr(title="gibberish", body="blahblah", target='master', head=c0) - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons('runbot_merge.staging_cron') with repo: repo.post_status(prx.head, 'success', 'legal/cla') @@ -1801,7 +1813,7 @@ commits, I need to know how to merge it: c0 = repo.make_commit(m, 'C0', None, tree={'a': 'b'}) prx = repo.make_pr(title="gibberish", body=None, target='master', head=c0) - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons('runbot_merge.staging_cron') with repo: repo.post_status(prx.head, 'success', 'legal/cla') @@ -3077,7 +3089,7 @@ class TestBatching(object): with repo: repo.post_status('staging.master', 'success', 'ci/runbot') repo.post_status('staging.master', 'success', 'legal/cla') - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons('runbot_merge.staging_cron') assert pr2.state == 'merged' class TestReviewing: diff --git a/runbot_merge/tests/test_multirepo.py b/runbot_merge/tests/test_multirepo.py index 3ac35a8f..3c68bd9e 100644 --- a/runbot_merge/tests/test_multirepo.py +++ b/runbot_merge/tests/test_multirepo.py @@ -493,7 +493,7 @@ def test_ff_fail(env, project, repo_a, repo_b, config): with repo_a, repo_b: repo_a.post_status('heads/staging.master', 'success') repo_b.post_status('heads/staging.master', 'success') - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons('runbot_merge.staging_cron') assert repo_b.commit('heads/master').id == cn,\ "B should still be at the conflicting commit" assert repo_a.commit('heads/master').id == root_a,\ diff --git a/runbot_merge/tests/test_project_toggles.py b/runbot_merge/tests/test_project_toggles.py index 30ce5f78..0dffdccc 100644 --- a/runbot_merge/tests/test_project_toggles.py +++ b/runbot_merge/tests/test_project_toggles.py @@ -114,7 +114,7 @@ def test_staging_priority(env, project, repo, config, mode, cutoff, second): env[model].browse([cron_id]).write({ 'nextcall': (datetime.datetime.utcnow() + datetime.timedelta(minutes=10)).isoformat(" ", "seconds") }) - env.run_crons('runbot_merge.merge_cron') + env.run_crons(None) assert not staging.active assert not env['runbot_merge.stagings'].search([]).active assert env['runbot_merge.split'].search_count([]) == 2 From 3ee3e9cc8140fdf682429e31ee50b04fd7299768 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Thu, 1 Aug 2024 10:15:32 +0200 Subject: [PATCH 6/8] [IMP] *: trigger-ify staging cron The staging cron turns out to be pretty reasonable to trigger, as we already have a handler on the transition of a batch to `not blocked`, which is exactly when we want to create a staging (that and the completion of the previous staging). The batch transition is in a compute which is not awesome, but on the flip side we also cancel active stagings in that exact scenario (if it applies), so that matches. The only real finesse is that one of the tests wants to observe the instant between the end of a staging (and creation of splits) and the start of the next one, which because the staging cron is triggered by the failure of the previous staging is now "atomic", requiring disabling the staging cron, which means the trigger is skipped entirely. So this requires triggering the staging cron by hand. --- forwardport/tests/conftest.py | 1 - runbot_merge/data/merge_cron.xml | 4 ++-- runbot_merge/models/batch.py | 16 +++++++++------- runbot_merge/models/pull_requests.py | 3 +++ runbot_merge/tests/conftest.py | 2 -- runbot_merge/tests/test_basic.py | 12 ++++++------ runbot_merge/tests/test_multirepo.py | 2 +- runbot_merge/tests/test_project_toggles.py | 10 ++++++---- 8 files changed, 27 insertions(+), 23 deletions(-) diff --git a/forwardport/tests/conftest.py b/forwardport/tests/conftest.py index c5951502..58f57184 100644 --- a/forwardport/tests/conftest.py +++ b/forwardport/tests/conftest.py @@ -7,7 +7,6 @@ import requests @pytest.fixture def default_crons(): return [ - 'runbot_merge.staging_cron', 'runbot_merge.check_linked_prs_status', ] diff --git a/runbot_merge/data/merge_cron.xml b/runbot_merge/data/merge_cron.xml index f6a4f3da..aa9de63e 100644 --- a/runbot_merge/data/merge_cron.xml +++ b/runbot_merge/data/merge_cron.xml @@ -15,8 +15,8 @@ code model._create_stagings(True) - 1 - minutes + 6 + hours -1 40 diff --git a/runbot_merge/models/batch.py b/runbot_merge/models/batch.py index eace55b4..d920802f 100644 --- a/runbot_merge/models/batch.py +++ b/runbot_merge/models/batch.py @@ -223,13 +223,15 @@ class Batch(models.Model): failed and f"{failed} have failed CI", ])) else: - if batch.blocked and batch.cancel_staging: - if splits := batch.target.split_ids: - splits.unlink() - batch.target.active_staging_id.cancel( - 'unstaged by %s becoming ready', - ', '.join(batch.prs.mapped('display_name')), - ) + if batch.blocked: + self.env.ref("runbot_merge.staging_cron")._trigger() + if batch.cancel_staging: + if splits := batch.target.split_ids: + splits.unlink() + batch.target.active_staging_id.cancel( + 'unstaged by %s becoming ready', + ', '.join(batch.prs.mapped('display_name')), + ) batch.blocked = False diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index 0955c853..ea55d03b 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -2067,6 +2067,9 @@ class Stagings(models.Model): self.env.ref("runbot_merge.merge_cron")\ ._trigger(fields.Datetime.to_datetime(timeout)) + if vals.get('active') is False: + self.env.ref("runbot_merge.staging_cron")._trigger() + return super().write(vals) # only depend on staged_at as it should not get modified, but we might diff --git a/runbot_merge/tests/conftest.py b/runbot_merge/tests/conftest.py index e200434a..6143afe6 100644 --- a/runbot_merge/tests/conftest.py +++ b/runbot_merge/tests/conftest.py @@ -7,8 +7,6 @@ def module(): @pytest.fixture def default_crons(): return [ - # env['runbot_merge.project']._create_stagings() - 'runbot_merge.staging_cron', # env['runbot_merge.pull_requests']._check_linked_prs_statuses() 'runbot_merge.check_linked_prs_status', ] diff --git a/runbot_merge/tests/test_basic.py b/runbot_merge/tests/test_basic.py index 9026aa02..a68d70f8 100644 --- a/runbot_merge/tests/test_basic.py +++ b/runbot_merge/tests/test_basic.py @@ -598,7 +598,7 @@ def test_staging_ci_timeout(env, repo, config, page, update_op: Callable[[int], timeout = env['runbot_merge.project'].search([]).ci_timeout pr_id.staging_id.write(update_op(timeout)) - env.run_crons('runbot_merge.staging_cron') + env.run_crons(None) assert pr_id.state == 'error', "timeout should fail the PR" dangerbox = pr_page(page, pr).cssselect('.alert-danger span') @@ -1248,7 +1248,7 @@ class TestRetry: with repo: pr.post_comment('hansen retry', config['role_' + retrier]['token']) assert pr_id.state == 'ready' - env.run_crons('runbot_merge.staging_cron') + env.run_crons(None) staging_head2 = repo.commit('heads/staging.master') assert staging_head2 != staging_head @@ -1281,7 +1281,7 @@ class TestRetry: with repo: pr.post_comment('hansen retry', config['role_reviewer']['token']) - env.run_crons('runbot_merge.staging_cron') + env.run_crons(None) with repo: repo.post_status('staging.master', 'success', 'legal/cla') @@ -1771,7 +1771,7 @@ commits, I need to know how to merge it: c0 = repo.make_commit(m, 'C0', None, tree={'a': 'b'}) prx = repo.make_pr(title="gibberish", body="blahblah", target='master', head=c0) - env.run_crons('runbot_merge.staging_cron') + env.run_crons(None) with repo: repo.post_status(prx.head, 'success', 'legal/cla') @@ -1813,7 +1813,7 @@ commits, I need to know how to merge it: c0 = repo.make_commit(m, 'C0', None, tree={'a': 'b'}) prx = repo.make_pr(title="gibberish", body=None, target='master', head=c0) - env.run_crons('runbot_merge.staging_cron') + env.run_crons(None) with repo: repo.post_status(prx.head, 'success', 'legal/cla') @@ -3089,7 +3089,7 @@ class TestBatching(object): with repo: repo.post_status('staging.master', 'success', 'ci/runbot') repo.post_status('staging.master', 'success', 'legal/cla') - env.run_crons('runbot_merge.staging_cron') + env.run_crons(None) assert pr2.state == 'merged' class TestReviewing: diff --git a/runbot_merge/tests/test_multirepo.py b/runbot_merge/tests/test_multirepo.py index 3c68bd9e..0efb7cfc 100644 --- a/runbot_merge/tests/test_multirepo.py +++ b/runbot_merge/tests/test_multirepo.py @@ -493,7 +493,7 @@ def test_ff_fail(env, project, repo_a, repo_b, config): with repo_a, repo_b: repo_a.post_status('heads/staging.master', 'success') repo_b.post_status('heads/staging.master', 'success') - env.run_crons('runbot_merge.staging_cron') + env.run_crons(None) assert repo_b.commit('heads/master').id == cn,\ "B should still be at the conflicting commit" assert repo_a.commit('heads/master').id == root_a,\ diff --git a/runbot_merge/tests/test_project_toggles.py b/runbot_merge/tests/test_project_toggles.py index 0dffdccc..db838df6 100644 --- a/runbot_merge/tests/test_project_toggles.py +++ b/runbot_merge/tests/test_project_toggles.py @@ -111,15 +111,17 @@ def test_staging_priority(env, project, repo, config, mode, cutoff, second): # specifically delay creation of new staging to observe the failed # staging's state and the splits model, cron_id = env['ir.model.data'].check_object_reference('runbot_merge', 'staging_cron') - env[model].browse([cron_id]).write({ - 'nextcall': (datetime.datetime.utcnow() + datetime.timedelta(minutes=10)).isoformat(" ", "seconds") - }) + staging_cron = env[model].browse([cron_id]) + staging_cron.active = False + env.run_crons(None) assert not staging.active assert not env['runbot_merge.stagings'].search([]).active assert env['runbot_merge.split'].search_count([]) == 2 - env.run_crons() + staging_cron.active = True + # manually trigger that cron, as having the cron disabled prevented the creation of the triggers entirely + env.run_crons('runbot_merge.staging_cron') # check that st.pr_ids are the PRs we expect st = env['runbot_merge.stagings'].search([]) From 157657af49552d8e6b9f4eafc793c980d2d71f91 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Fri, 2 Aug 2024 09:13:19 +0200 Subject: [PATCH 7/8] [REM] *: default_crons fixture With the trigger-ification pretty much complete the only cron that's still routinely triggered explicitly is the cross-pr check, and it's that in all modules, so there's no cause to keep an overridable fixture. --- conftest.py | 9 ++++----- forwardport/tests/conftest.py | 6 ------ runbot_merge/tests/conftest.py | 7 ------- 3 files changed, 4 insertions(+), 18 deletions(-) diff --git a/conftest.py b/conftest.py index 59f0753e..b043a48b 100644 --- a/conftest.py +++ b/conftest.py @@ -540,8 +540,8 @@ def server(request, db, port, module, addons_path, tmpdir): p.wait(timeout=30) @pytest.fixture -def env(request, port, server, db, default_crons): - yield Environment(port, db, default_crons) +def env(request, port, server, db): + yield Environment(port, db) if request.node.get_closest_marker('expect_log_errors'): if b"Traceback (most recent call last):" not in server[1]: pytest.fail("should have found error in logs.") @@ -1229,11 +1229,10 @@ class LabelsProxy(collections.abc.MutableSet): assert r.ok, r.text class Environment: - def __init__(self, port, db, default_crons=()): + def __init__(self, port, db): self._uid = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/common'.format(port)).authenticate(db, 'admin', 'admin', {}) self._object = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/object'.format(port)) self._db = db - self._default_crons = default_crons def __call__(self, model, method, *args, **kwargs): return self._object.execute_kw( @@ -1255,7 +1254,7 @@ class Environment: def run_crons(self, *xids, **kw): - crons = xids or self._default_crons + crons = xids or ['runbot_merge.check_linked_prs_status'] cron_ids = [] for xid in crons: if xid is None: diff --git a/forwardport/tests/conftest.py b/forwardport/tests/conftest.py index 58f57184..8764943d 100644 --- a/forwardport/tests/conftest.py +++ b/forwardport/tests/conftest.py @@ -4,12 +4,6 @@ import re import pytest import requests -@pytest.fixture -def default_crons(): - return [ - 'runbot_merge.check_linked_prs_status', - ] - # public_repo — necessary to leave comments # admin:repo_hook — to set up hooks (duh) # delete_repo — to cleanup repos created under a user diff --git a/runbot_merge/tests/conftest.py b/runbot_merge/tests/conftest.py index 6143afe6..17b1546e 100644 --- a/runbot_merge/tests/conftest.py +++ b/runbot_merge/tests/conftest.py @@ -4,13 +4,6 @@ import pytest def module(): return 'runbot_merge' -@pytest.fixture -def default_crons(): - return [ - # env['runbot_merge.pull_requests']._check_linked_prs_statuses() - 'runbot_merge.check_linked_prs_status', - ] - @pytest.fixture def project(env, config): return env['runbot_merge.project'].create({ From 78cc8835cec024e246e6bfeafbdae9222db8b363 Mon Sep 17 00:00:00 2001 From: Xavier Morel Date: Mon, 5 Aug 2024 08:03:56 +0200 Subject: [PATCH 8/8] [IMP] rubnbot_merge: avoid triggering every cron on every test Since every cron runs on a fresh database, on the first `run_crons` every single cron in the db will run even though almost none of them is relevant. Aside from the slight inefficiency, this creates unnecessary extra garbage in the test logs. By setting the `nextcall` of all crons to infinity in the template we avoid this issue, only triggered crons (or the crons whose nextcall we set ourselves) will trigger during calls. This requires adjusting the branch cleanup cron slightly: it didn't correctly handle the initial run (`lastcall` being false). --- conftest.py | 1 + runbot_merge/models/crons/cleanup_scratch_branches.py | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/conftest.py b/conftest.py index b043a48b..b17d2b09 100644 --- a/conftest.py +++ b/conftest.py @@ -326,6 +326,7 @@ class DbDict(dict): f.write(db) f.flush() os.fsync(f.fileno()) + subprocess.run(['psql', db, '-c', "UPDATE ir_cron SET nextcall = 'infinity'"]) return db diff --git a/runbot_merge/models/crons/cleanup_scratch_branches.py b/runbot_merge/models/crons/cleanup_scratch_branches.py index a5e041ce..cfe05f32 100644 --- a/runbot_merge/models/crons/cleanup_scratch_branches.py +++ b/runbot_merge/models/crons/cleanup_scratch_branches.py @@ -9,10 +9,11 @@ class BranchCleanup(models.TransientModel): _description = "cleans up scratch refs for deactivated branches" def _run(self): - deactivated = self.env['runbot_merge.branch'].search([ - ('active', '=', False), - ('write_date', '>=', self.env.context['lastcall']), - ]) + domain = [('active', '=', False)] + if lastcall := self.env.context['lastcall']: + domain.append(('write_date', '>=', lastcall)) + deactivated = self.env['runbot_merge.branch'].search(domain) + _logger.info( "deleting scratch (tmp and staging) refs for branches %s", ', '.join(b.name for b in deactivated)