diff --git a/conftest.py b/conftest.py index 610a75d3..1357fdf3 100644 --- a/conftest.py +++ b/conftest.py @@ -1,5 +1,6 @@ from __future__ import annotations +import datetime import select import shutil import threading @@ -316,7 +317,8 @@ class DbDict(dict): '-d', db, '-i', module + ',saas_worker,auth_oauth', '--max-cron-threads', '0', '--stop-after-init', - '--log-level', 'warn' + '--log-level', 'warn', + '--log-handler', 'py.warnings:ERROR', ], check=True, env={**os.environ, 'XDG_DATA_HOME': str(d)} @@ -324,6 +326,7 @@ class DbDict(dict): f.write(db) f.flush() os.fsync(f.fileno()) + subprocess.run(['psql', db, '-c', "UPDATE ir_cron SET nextcall = 'infinity'"]) return db @@ -413,15 +416,58 @@ def dummy_addons_path(): mod = pathlib.Path(dummy_addons_path, 'saas_worker') mod.mkdir(0o700) (mod / '__init__.py').write_text('''\ +import builtins +import logging +import threading + +import psycopg2 + +import odoo from odoo import api, fields, models +_logger = logging.getLogger(__name__) + class Base(models.AbstractModel): _inherit = 'base' def run_crons(self): + builtins.forwardport_merged_before = self.env.context.get('forwardport_merged_before') + builtins.forwardport_updated_before = self.env.context.get('forwardport_updated_before') self.env['ir.cron']._process_jobs(self.env.cr.dbname) + del builtins.forwardport_updated_before + del builtins.forwardport_merged_before return True + + +class IrCron(models.Model): + _inherit = 'ir.cron' + + @classmethod + def _process_jobs(cls, db_name): + t = threading.current_thread() + try: + db = odoo.sql_db.db_connect(db_name) + t.dbname = db_name + with db.cursor() as cron_cr: + # FIXME: override `_get_all_ready_jobs` to directly lock the cron? + while jobs := next(( + job + for j in cls._get_all_ready_jobs(cron_cr) + if (job := cls._acquire_one_job(cron_cr, (j['id'],))) + ), None): + # take into account overridings of _process_job() on that database + registry = odoo.registry(db_name) + registry[cls._name]._process_job(db, cron_cr, job) + cron_cr.commit() + + except psycopg2.ProgrammingError as e: + raise + except Exception: + _logger.warning('Exception in cron:', exc_info=True) + finally: + if hasattr(t, 'dbname'): + del t.dbname ''', encoding='utf-8') (mod / '__manifest__.py').write_text(pprint.pformat({ 'name': 'dummy saas_worker', @@ -445,6 +491,7 @@ def addons_path(request, dummy_addons_path): def server(request, db, port, module, addons_path, tmpdir): log_handlers = [ 'odoo.modules.loading:WARNING', + 'py.warnings:ERROR', ] if not request.config.getoption('--log-github'): log_handlers.append('github_requests:WARNING') @@ -500,8 +547,8 @@ def server(request, db, port, module, addons_path, tmpdir): p.wait(timeout=30) @pytest.fixture -def env(request, port, server, db, default_crons): - yield Environment(port, db, default_crons) +def env(request, port, server, db): + yield Environment(port, db) if request.node.get_closest_marker('expect_log_errors'): if b"Traceback (most recent call last):" not in server[1]: pytest.fail("should have found error in logs.") @@ -608,7 +655,6 @@ def _rate_limited(req): if not q.ok and q.headers.get('X-RateLimit-Remaining') == '0': reset = int(q.headers['X-RateLimit-Reset']) delay = max(0, round(reset - time.time() + 1.0)) - print("Hit rate limit, sleeping for", delay, "seconds") time.sleep(delay) continue break @@ -1190,11 +1236,10 @@ class LabelsProxy(collections.abc.MutableSet): assert r.ok, r.text class Environment: - def __init__(self, port, db, default_crons=()): + def __init__(self, port, db): self._uid = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/common'.format(port)).authenticate(db, 'admin', 'admin', {}) self._object = xmlrpc.client.ServerProxy('http://localhost:{}/xmlrpc/2/object'.format(port)) self._db = db - self._default_crons = default_crons def __call__(self, model, method, *args, **kwargs): return self._object.execute_kw( @@ -1216,16 +1261,20 @@ class Environment: def run_crons(self, *xids, **kw): - crons = xids or self._default_crons - print('running crons', crons, file=sys.stderr) + crons = xids or ['runbot_merge.check_linked_prs_status'] + cron_ids = [] for xid in crons: - t0 = time.time() - print('\trunning cron', xid, '...', file=sys.stderr) + if xid is None: + continue + model, cron_id = self('ir.model.data', 'check_object_reference', *xid.split('.', 1)) assert model == 'ir.cron', "Expected {} to be a cron, got {}".format(xid, model) - self('ir.cron', 'method_direct_trigger', [cron_id], **kw) - print('\tdone %.3fs' % (time.time() - t0), file=sys.stderr) - print('done', file=sys.stderr) + cron_ids.append(cron_id) + if cron_ids: + self('ir.cron', 'write', cron_ids, { + 'nextcall': (datetime.datetime.utcnow() - datetime.timedelta(seconds=30)).isoformat(" ", "seconds") + }, **kw) + self('base', 'run_crons', [], **kw) # sleep for some time as a lot of crap may have happened (?) wait_for_hook() diff --git a/forwardport/data/crons.xml b/forwardport/data/crons.xml index 02d2be1e..b3ce4591 100644 --- a/forwardport/data/crons.xml +++ b/forwardport/data/crons.xml @@ -4,10 +4,11 @@ code model._process() - 1 - minutes + 6 + hours -1 + 43 @@ -15,10 +16,11 @@ code model._process() - 1 - minutes + 6 + hours -1 + 46 @@ -37,7 +39,7 @@ code model._process() - 1 + 6 hours -1 diff --git a/forwardport/models/forwardport.py b/forwardport/models/forwardport.py index a0604da7..e805e59a 100644 --- a/forwardport/models/forwardport.py +++ b/forwardport/models/forwardport.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import builtins import contextlib import logging import re @@ -76,6 +77,16 @@ class ForwardPortTasks(models.Model, Queue): retry_after = fields.Datetime(required=True, default='1900-01-01 01:01:01') pr_id = fields.Many2one('runbot_merge.pull_requests') + def create(self, vals_list): + self.env.ref('forwardport.port_forward')._trigger() + return super().create(vals_list) + + def write(self, vals): + if retry := vals.get('retry_after'): + self.env.ref('forwardport.port_forward')\ + ._trigger(fields.Datetime.to_datetime(retry)) + return super().write(vals) + def _search_domain(self): return super()._search_domain() + [ ('retry_after', '<=', fields.Datetime.to_string(fields.Datetime.now())), @@ -259,6 +270,10 @@ class UpdateQueue(models.Model, Queue): original_root = fields.Many2one('runbot_merge.pull_requests') new_root = fields.Many2one('runbot_merge.pull_requests') + def create(self, vals_list): + self.env.ref('forwardport.updates')._trigger() + return super().create(vals_list) + def _process_item(self): previous = self.new_root sentry_sdk.set_tag("update-root", self.new_root.display_name) @@ -344,8 +359,12 @@ class DeleteBranches(models.Model, Queue): pr_id = fields.Many2one('runbot_merge.pull_requests') + def create(self, vals_list): + self.env.ref('forwardport.remover')._trigger(datetime.now() - MERGE_AGE) + return super().create(vals_list) + def _search_domain(self): - cutoff = self.env.context.get('forwardport_merged_before') \ + cutoff = getattr(builtins, 'forwardport_merged_before', None) \ or fields.Datetime.to_string(datetime.now() - MERGE_AGE) return [('pr_id.merge_date', '<', cutoff)] diff --git a/forwardport/models/project.py b/forwardport/models/project.py index e6599975..72630453 100644 --- a/forwardport/models/project.py +++ b/forwardport/models/project.py @@ -13,6 +13,7 @@ it up), ... """ from __future__ import annotations +import builtins import datetime import itertools import json @@ -487,7 +488,7 @@ stderr: ], order='source_id, id'), lambda p: p.source_id) def _reminder(self): - cutoff = self.env.context.get('forwardport_updated_before') \ + cutoff = getattr(builtins, 'forwardport_updated_before', None) \ or fields.Datetime.to_string(datetime.datetime.now() - DEFAULT_DELTA) cutoff_dt = fields.Datetime.from_string(cutoff) @@ -501,8 +502,6 @@ stderr: pr_ids = {p.id for p in prs} for pr in prs: pr_ids.discard(pr.parent_id.id) - print(source, prs, [p.parent_id for p in prs], - '\n\t->', pr_ids, flush=True) for pr in (p for p in prs if p.id in pr_ids): self.env.ref('runbot_merge.forwardport.reminder')._send( repository=pr.repository, diff --git a/forwardport/models/project_freeze.py b/forwardport/models/project_freeze.py index 635912c0..0796fbaf 100644 --- a/forwardport/models/project_freeze.py +++ b/forwardport/models/project_freeze.py @@ -22,5 +22,7 @@ class FreezeWizard(models.Model): def unlink(self): r = super().unlink() if not (self.env.context.get('forwardport_keep_disabled') or self.search_count([])): - self.env.ref('forwardport.port_forward').active = True + cron = self.env.ref('forwardport.port_forward') + cron.active = True + cron._trigger() # process forward ports enqueued during the freeze period return r diff --git a/forwardport/tests/conftest.py b/forwardport/tests/conftest.py index 408345af..8764943d 100644 --- a/forwardport/tests/conftest.py +++ b/forwardport/tests/conftest.py @@ -4,18 +4,6 @@ import re import pytest import requests -@pytest.fixture -def default_crons(): - return [ - 'runbot_merge.process_updated_commits', - 'runbot_merge.merge_cron', - 'runbot_merge.staging_cron', - 'forwardport.port_forward', - 'forwardport.updates', - 'runbot_merge.check_linked_prs_status', - 'runbot_merge.feedback_cron', - ] - # public_repo — necessary to leave comments # admin:repo_hook — to set up hooks (duh) # delete_repo — to cleanup repos created under a user diff --git a/forwardport/tests/test_simple.py b/forwardport/tests/test_simple.py index 65bfca2d..634a25c0 100644 --- a/forwardport/tests/test_simple.py +++ b/forwardport/tests/test_simple.py @@ -123,7 +123,7 @@ def test_straightforward_flow(env, config, make_repo, users): prod.post_status(pr1.head, 'success', 'legal/cla') env.run_crons() - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) pr0_, pr1_, pr2 = env['runbot_merge.pull_requests'].search([], order='number') @@ -329,8 +329,8 @@ def test_empty(env, config, make_repo, users): assert project.fp_github_name == users['other'] # check reminder - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) awaiting = ( users['other'], @@ -374,7 +374,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port # check that this stops if we close the PR with prod: fail_pr.close() - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) assert pr1.comments == [ (users['reviewer'], 'hansen r+'), seen(env, pr1, users), @@ -597,14 +597,14 @@ def test_disapproval(env, config, make_repo, users): prod.post_status(pr2_id.head, 'success') pr2.post_comment('hansen r+', token=config['role_other']['token']) # no point creating staging for our needs, just propagate statuses - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr1_id.state == 'ready' assert pr2_id.state == 'ready' # oh no, pr1 has an error! with prod: pr1.post_comment('hansen r-', token=config['role_other']['token']) - env.run_crons('runbot_merge.feedback_cron') + env.run_crons(None) assert pr1_id.state == 'validated', "pr1 should not be approved anymore" assert pr2_id.state == 'ready', "pr2 should not be affected" @@ -899,7 +899,7 @@ class TestClosing: prod.post_status(pr1_id.head, 'success', 'legal/cla') prod.post_status(pr1_id.head, 'success', 'ci/runbot') env.run_crons() - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron') + env.run_crons('forwardport.reminder') assert env['runbot_merge.pull_requests'].search([], order='number') == pr0_id | pr1_id,\ "closing the PR should suppress the FP sequence" diff --git a/forwardport/tests/test_weird.py b/forwardport/tests/test_weird.py index e2c573b6..19feda24 100644 --- a/forwardport/tests/test_weird.py +++ b/forwardport/tests/test_weird.py @@ -695,7 +695,7 @@ def test_retarget_after_freeze(env, config, make_repo, users): port_pr.base = 'bprime' assert port_id.target == new_branch - env.run_crons('forwardport.port_forward') + env.run_crons(None) assert not job.exists(), "job should have succeeded and apoptosed" # since the PR was "already forward-ported" to the new branch it should not @@ -816,11 +816,7 @@ def test_freeze(env, config, make_repo, users): # re-enable forward-port cron after freeze _, cron_id = env['ir.model.data'].check_object_reference('forwardport', 'port_forward', context={'active_test': False}) env['ir.cron'].browse([cron_id]).active = True - - # run crons to process the feedback, run a second time in case of e.g. - # forward porting - env.run_crons() - env.run_crons() + env.run_crons('forwardport.port_forward') assert release_id.state == 'merged' assert not env['runbot_merge.pull_requests'].search([ @@ -896,7 +892,7 @@ def test_missing_magic_ref(env, config, make_repo): # check that the batch is still here and targeted for the future req = env['forwardport.batches'].search([]) assert len(req) == 1 - assert req.retry_after > datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + assert req.retry_after > datetime.utcnow().isoformat(" ", "seconds") # reset retry_after req.retry_after = '1900-01-01 01:01:01' @@ -905,7 +901,7 @@ def test_missing_magic_ref(env, config, make_repo): [c2] = prod.make_commits(a_head.id, Commit('y', tree={'x': '0'})) assert c2 != c pr_id.head = c2 - env.run_crons() + env.run_crons(None) fp_id = env['runbot_merge.pull_requests'].search([('source_id', '=', pr_id.id)]) assert fp_id @@ -1161,7 +1157,7 @@ def test_reminder_detached(env, config, make_repo, users): pr_c = prod.get_pr(pr_c_id.number) # region sanity check - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) assert pr_b.comments == [ seen(env, pr_b, users), @@ -1195,12 +1191,7 @@ More info at https://github.com/odoo/odoo/wiki/Mergebot#forward-port # region check detached pr_c_id.write({'parent_id': False, 'detach_reason': 'because'}) - env.run_crons('forwardport.reminder', 'runbot_merge.feedback_cron', context={'forwardport_updated_before': FAKE_PREV_WEEK}) - - for p in env['runbot_merge.pull_requests'].search_read( - [], ['id', 'parent_id', 'source_id', 'display_name'] - ): - print(p, flush=True) + env.run_crons('forwardport.reminder', context={'forwardport_updated_before': FAKE_PREV_WEEK}) assert pr_b.comments[2:] == [ (users['user'], "@%s @%s child PR %s was modified / updated and has become a normal PR. This PR (and any of its parents) will need to be merged independently as approvals won't cross." % ( diff --git a/runbot_merge/data/merge_cron.xml b/runbot_merge/data/merge_cron.xml index b0ff6e1d..aa9de63e 100644 --- a/runbot_merge/data/merge_cron.xml +++ b/runbot_merge/data/merge_cron.xml @@ -4,30 +4,33 @@ code model._check_stagings(True) - 1 - minutes + 6 + hours -1 + 30 Check for progress of PRs and create Stagings code model._create_stagings(True) - 1 - minutes + 6 + hours -1 + 40 Send feedback to PR code model._send() - 1 - minutes + 6 + hours -1 + 60 Update labels on PR @@ -38,16 +41,18 @@ minutes -1 + 70 Check for PRs to fetch code model._check(True) - 1 - minutes + 6 + hours -1 + 10 Warn on linked PRs where only one is ready @@ -58,15 +63,17 @@ hours -1 + 50 Impact commit statuses on PRs and stagings code model._notify() - 30 - minutes + 6 + hours -1 + 20 diff --git a/runbot_merge/models/batch.py b/runbot_merge/models/batch.py index eace55b4..d920802f 100644 --- a/runbot_merge/models/batch.py +++ b/runbot_merge/models/batch.py @@ -223,13 +223,15 @@ class Batch(models.Model): failed and f"{failed} have failed CI", ])) else: - if batch.blocked and batch.cancel_staging: - if splits := batch.target.split_ids: - splits.unlink() - batch.target.active_staging_id.cancel( - 'unstaged by %s becoming ready', - ', '.join(batch.prs.mapped('display_name')), - ) + if batch.blocked: + self.env.ref("runbot_merge.staging_cron")._trigger() + if batch.cancel_staging: + if splits := batch.target.split_ids: + splits.unlink() + batch.target.active_staging_id.cancel( + 'unstaged by %s becoming ready', + ', '.join(batch.prs.mapped('display_name')), + ) batch.blocked = False diff --git a/runbot_merge/models/crons/cleanup_scratch_branches.py b/runbot_merge/models/crons/cleanup_scratch_branches.py index 1e7b6274..cfe05f32 100644 --- a/runbot_merge/models/crons/cleanup_scratch_branches.py +++ b/runbot_merge/models/crons/cleanup_scratch_branches.py @@ -9,10 +9,11 @@ class BranchCleanup(models.TransientModel): _description = "cleans up scratch refs for deactivated branches" def _run(self): - deactivated = self.env['runbot_merge.branch'].search([ - ('active', '=', False), - ('write_date', '>=', self.env.context['lastcall']), - ]) + domain = [('active', '=', False)] + if lastcall := self.env.context['lastcall']: + domain.append(('write_date', '>=', lastcall)) + deactivated = self.env['runbot_merge.branch'].search(domain) + _logger.info( "deleting scratch (tmp and staging) refs for branches %s", ', '.join(b.name for b in deactivated) @@ -29,4 +30,4 @@ class BranchCleanup(models.TransientModel): _logger.info("no tmp branch found for %s:%s", r.name, b.name) res = gh('delete', f'git/refs/heads/staging.{b.name}', check=False) if res.status_code != 204: - _logger.info("no staging branch found for %s:%s", res.name, b.name) + _logger.info("no staging branch found for %s:%s", r.name, b.name) diff --git a/runbot_merge/models/pull_requests.py b/runbot_merge/models/pull_requests.py index a5870cdc..ea55d03b 100644 --- a/runbot_merge/models/pull_requests.py +++ b/runbot_merge/models/pull_requests.py @@ -1774,6 +1774,12 @@ class Feedback(models.Model): help="Token field (from repo's project) to use to post messages" ) + @api.model_create_multi + def create(self, vals_list): + # any time a feedback is created, it can be sent + self.env.ref('runbot_merge.feedback_cron')._trigger() + return super().create(vals_list) + def _send(self): ghs = {} to_remove = [] @@ -2056,16 +2062,28 @@ class Stagings(models.Model): for context, status in statuses.get(commit.sha, {}).items() ] + def write(self, vals): + if timeout := vals.get('timeout_limit'): + self.env.ref("runbot_merge.merge_cron")\ + ._trigger(fields.Datetime.to_datetime(timeout)) + + if vals.get('active') is False: + self.env.ref("runbot_merge.staging_cron")._trigger() + + return super().write(vals) + # only depend on staged_at as it should not get modified, but we might # update the CI timeout after the staging have been created and we # *do not* want to update the staging timeouts in that case @api.depends('staged_at') def _compute_timeout_limit(self): + timeouts = set() for st in self: - st.timeout_limit = fields.Datetime.to_string( - fields.Datetime.from_string(st.staged_at) - + datetime.timedelta(minutes=st.target.project_id.ci_timeout) - ) + t = st.timeout_limit = st.staged_at + datetime.timedelta(minutes=st.target.project_id.ci_timeout) + timeouts.add(t) + if timeouts: + # we might have very different limits for each staging so need to schedule them all + self.env.ref("runbot_merge.merge_cron")._trigger_list(timeouts) @api.depends('batch_ids.prs') def _compute_prs(self): @@ -2141,9 +2159,11 @@ class Stagings(models.Model): s.state = st if s.state != 'pending': + self.env.ref("runbot_merge.merge_cron")._trigger() s.staging_end = fields.Datetime.now() if update_timeout_limit: - s.timeout_limit = fields.Datetime.to_string(datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout)) + s.timeout_limit = datetime.datetime.now() + datetime.timedelta(minutes=s.target.project_id.ci_timeout) + self.env.ref("runbot_merge.merge_cron")._trigger(s.timeout_limit) _logger.debug("%s got pending status, bumping timeout to %s (%s)", self, s.timeout_limit, cmap) def action_cancel(self): @@ -2424,6 +2444,11 @@ class FetchJob(models.Model): number = fields.Integer(required=True, group_operator=None) closing = fields.Boolean(default=False) + @api.model_create_multi + def create(self, vals_list): + self.env.ref('runbot_merge.fetch_prs_cron')._trigger() + return super().create(vals_list) + def _check(self, commit=False): """ :param bool commit: commit after each fetch has been executed diff --git a/runbot_merge/tests/conftest.py b/runbot_merge/tests/conftest.py index b663a688..17b1546e 100644 --- a/runbot_merge/tests/conftest.py +++ b/runbot_merge/tests/conftest.py @@ -4,23 +4,6 @@ import pytest def module(): return 'runbot_merge' -@pytest.fixture -def default_crons(): - return [ - # env['runbot_merge.project']._check_fetch() - 'runbot_merge.fetch_prs_cron', - # env['runbot_merge.commit']._notify() - 'runbot_merge.process_updated_commits', - # env['runbot_merge.project']._check_stagings() - 'runbot_merge.merge_cron', - # env['runbot_merge.project']._create_stagings() - 'runbot_merge.staging_cron', - # env['runbot_merge.pull_requests']._check_linked_prs_statuses() - 'runbot_merge.check_linked_prs_status', - # env['runbot_merge.pull_requests.feedback']._send() - 'runbot_merge.feedback_cron', - ] - @pytest.fixture def project(env, config): return env['runbot_merge.project'].create({ diff --git a/runbot_merge/tests/test_basic.py b/runbot_merge/tests/test_basic.py index abb03ec4..a68d70f8 100644 --- a/runbot_merge/tests/test_basic.py +++ b/runbot_merge/tests/test_basic.py @@ -3,6 +3,7 @@ import itertools import json import textwrap import time +from typing import Callable from unittest import mock import pytest @@ -13,8 +14,9 @@ import odoo from utils import _simple_init, seen, matches, get_partner, Commit, pr_page, to_pr, part_of, ensure_one @pytest.fixture(autouse=True) -def _configure_statuses(project, repo): - project.repo_ids.required_statuses = 'legal/cla,ci/runbot' +def _configure_statuses(request, project, repo): + if 'defaultstatuses' not in request.keywords: + project.repo_ids.required_statuses = 'legal/cla,ci/runbot' @pytest.fixture(autouse=True, params=["statuses", "rpc"]) def stagings(request, env, project, repo): @@ -562,19 +564,32 @@ def test_staging_conflict_second(env, repo, users, config): assert pr1_id.state == 'error', "now pr1 should be in error" -def test_staging_ci_timeout(env, repo, config, page): +@pytest.mark.defaultstatuses +@pytest.mark.parametrize('update_op', [ + pytest.param( + lambda _: {'timeout_limit': datetime.datetime.now().isoformat(" ", "seconds")}, + id="set-timeout-limit", + ), + pytest.param( + lambda timeout: {'staged_at': (datetime.datetime.now() - datetime.timedelta(minutes=2*timeout)).isoformat(" ", "seconds")}, + id="set-staged-at", + ), +]) +def test_staging_ci_timeout(env, repo, config, page, update_op: Callable[[int], dict]): """If a staging timeouts (~ delay since staged greater than configured)... requeue? """ with repo: - m = repo.make_commit(None, 'initial', None, tree={'f': 'm'}) + m, _, c2 = repo.make_commits( + None, + Commit('initial', tree={'f': 'm'}), + Commit('first', tree={'f': 'c1'}), + Commit('second', tree={'f': 'c2'}), + ) repo.make_ref('heads/master', m) - c1 = repo.make_commit(m, 'first', None, tree={'f': 'c1'}) - c2 = repo.make_commit(c1, 'second', None, tree={'f': 'c2'}) pr = repo.make_pr(title='title', body='body', target='master', head=c2) - repo.post_status(pr.head, 'success', 'ci/runbot') - repo.post_status(pr.head, 'success', 'legal/cla') + repo.post_status(pr.head, 'success') pr.post_comment('hansen r+ rebase-merge', config['role_reviewer']['token']) env.run_crons() @@ -582,23 +597,26 @@ def test_staging_ci_timeout(env, repo, config, page): assert pr_id.staging_id timeout = env['runbot_merge.project'].search([]).ci_timeout - pr_id.staging_id.staged_at = odoo.fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(minutes=2*timeout)) - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + pr_id.staging_id.write(update_op(timeout)) + env.run_crons(None) assert pr_id.state == 'error', "timeout should fail the PR" dangerbox = pr_page(page, pr).cssselect('.alert-danger span') assert dangerbox assert dangerbox[0].text == 'timed out (>60 minutes)' +@pytest.mark.defaultstatuses def test_timeout_bump_on_pending(env, repo, config): with repo: - m = repo.make_commit(None, 'initial', None, tree={'f': '0'}) + [m, c] = repo.make_commits( + None, + Commit('initial', tree={'f': '0'}), + Commit('c', tree={'f': '1'}), + ) repo.make_ref('heads/master', m) - c = repo.make_commit(m, 'c', None, tree={'f': '1'}) prx = repo.make_pr(title='title', body='body', target='master', head=c) - repo.post_status(prx.head, 'success', 'ci/runbot') - repo.post_status(prx.head, 'success', 'legal/cla') + repo.post_status(prx.head, 'success') prx.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() @@ -606,8 +624,8 @@ def test_timeout_bump_on_pending(env, repo, config): old_timeout = odoo.fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(days=15)) st.timeout_limit = old_timeout with repo: - repo.post_status('staging.master', 'pending', 'ci/runbot') - env.run_crons('runbot_merge.process_updated_commits') + repo.post_status('staging.master', 'pending') + env.run_crons(None) assert st.timeout_limit > old_timeout def test_staging_ci_failure_single(env, repo, users, config, page): @@ -1092,63 +1110,63 @@ def test_reopen_merged_pr(env, repo, config, users): ] class TestNoRequiredStatus: + @pytest.mark.defaultstatuses def test_basic(self, env, repo, config): """ check that mergebot can work on a repo with no CI at all """ env['runbot_merge.repository'].search([('name', '=', repo.name)]).status_ids = False with repo: - m = repo.make_commit(None, 'initial', None, tree={'0': '0'}) + [m, c] = repo.make_commits( + None, + Commit('initial', tree={'0': '0'}), + Commit('first', tree={'0': '1'}), + ) repo.make_ref('heads/master', m) - c = repo.make_commit(m, 'first', None, tree={'0': '1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) - prx.post_comment('hansen r+', config['role_reviewer']['token']) + pr = repo.make_pr(title='title', body='body', target='master', head=c) + pr.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]) - assert pr.state == 'ready' - st = pr.staging_id - assert st - env.run_crons() + pr_id = to_pr(env, pr) + + st = env['runbot_merge.stagings'].search([], context={'active_test': False}) assert st.state == 'success' - assert pr.state == 'merged' + assert pr_id.state == 'merged' + @pytest.mark.defaultstatuses def test_updated(self, env, repo, config): env['runbot_merge.repository'].search([('name', '=', repo.name)]).status_ids = False with repo: - m = repo.make_commit(None, 'initial', None, tree={'0': '0'}) + [m, c] = repo.make_commits( + None, + Commit('initial', tree={'0': '0'}), + Commit('first', tree={'0': '1'}), + ) repo.make_ref('heads/master', m) - c = repo.make_commit(m, 'first', None, tree={'0': '1'}) - prx = repo.make_pr(title='title', body='body', target='master', head=c) + pr = repo.make_pr(title='title', body='body', target='master', head=c) env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]) - assert pr.state == 'validated' + pr_id = to_pr(env, pr) + assert pr_id.state == 'validated' # normal push with repo: - repo.make_commits(c, repo.Commit('second', tree={'0': '2'}), ref=prx.ref) + repo.make_commits(c, repo.Commit('second', tree={'0': '2'}), ref=pr.ref) env.run_crons() - assert pr.state == 'validated' + assert pr_id.state == 'validated' with repo: - prx.post_comment('hansen r+', config['role_reviewer']['token']) - assert pr.state == 'ready' + pr.post_comment('hansen r+', config['role_reviewer']['token']) + assert pr_id.state == 'ready' # force push with repo: - repo.make_commits(m, repo.Commit('xxx', tree={'0': 'm'}), ref=prx.ref) + repo.make_commits(m, repo.Commit('xxx', tree={'0': 'm'}), ref=pr.ref) env.run_crons() - assert pr.state == 'validated' + assert pr_id.state == 'validated' with repo: - prx.post_comment('hansen r+', config['role_reviewer']['token']) - assert pr.state == 'ready' + pr.post_comment('hansen r+', config['role_reviewer']['token']) + assert pr_id.state == 'ready' class TestRetry: @pytest.mark.xfail(reason="This may not be a good idea as it could lead to tons of rebuild spam") @@ -1230,7 +1248,7 @@ class TestRetry: with repo: pr.post_comment('hansen retry', config['role_' + retrier]['token']) assert pr_id.state == 'ready' - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons(None) staging_head2 = repo.commit('heads/staging.master') assert staging_head2 != staging_head @@ -1263,7 +1281,7 @@ class TestRetry: with repo: pr.post_comment('hansen retry', config['role_reviewer']['token']) - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons(None) with repo: repo.post_status('staging.master', 'success', 'legal/cla') @@ -1293,42 +1311,36 @@ class TestRetry: (users['user'], "@{reviewer} retry makes no sense when the PR is not in error.".format_map(users)), ] + @pytest.mark.defaultstatuses @pytest.mark.parametrize('disabler', ['user', 'other', 'reviewer']) def test_retry_disable(self, env, repo, disabler, users, config): with repo: prx = _simple_init(repo) - repo.post_status(prx.head, 'success', 'ci/runbot') - repo.post_status(prx.head, 'success', 'legal/cla') + repo.post_status(prx.head, 'success') prx.post_comment('hansen r+ delegate=%s rebase-merge' % users['other'], config["role_reviewer"]['token']) env.run_crons() - assert env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]).staging_id + pr_id = to_pr(env, prx) + staging_id = pr_id.staging_id + assert staging_id - staging_head = repo.commit('heads/staging.master') with repo: - repo.post_status('staging.master', 'success', 'legal/cla') - repo.post_status('staging.master', 'failure', 'ci/runbot') + repo.post_status('staging.master', 'failure') env.run_crons() - pr = env['runbot_merge.pull_requests'].search([ - ('repository.name', '=', repo.name), - ('number', '=', prx.number) - ]) - assert pr.state == 'error' + assert staging_id.state == 'failure' + assert not staging_id.active + assert pr_id.state == 'error' with repo: prx.post_comment('hansen r-', config['role_' + disabler]['token']) - assert pr.state == 'validated' + assert pr_id.state == 'validated' with repo: repo.make_commit(prx.ref, 'third', None, tree={'m': 'c3'}) # just in case, apparently in some case the first post_status uses the old head... with repo: - repo.post_status(prx.head, 'success', 'ci/runbot') - repo.post_status(prx.head, 'success', 'legal/cla') + repo.post_status(prx.head, 'success') env.run_crons() - assert pr.state == 'validated' + assert pr_id.state == 'validated' class TestMergeMethod: """ @@ -1759,7 +1771,7 @@ commits, I need to know how to merge it: c0 = repo.make_commit(m, 'C0', None, tree={'a': 'b'}) prx = repo.make_pr(title="gibberish", body="blahblah", target='master', head=c0) - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons(None) with repo: repo.post_status(prx.head, 'success', 'legal/cla') @@ -1801,7 +1813,7 @@ commits, I need to know how to merge it: c0 = repo.make_commit(m, 'C0', None, tree={'a': 'b'}) prx = repo.make_pr(title="gibberish", body=None, target='master', head=c0) - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons(None) with repo: repo.post_status(prx.head, 'success', 'legal/cla') @@ -2463,7 +2475,7 @@ class TestPRUpdate(object): env.run_crons() pr_id = to_pr(env, pr) - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.message == 'title\n\nbody' assert pr_id.state == 'ready' old_reviewer = pr_id.reviewed_by @@ -3008,7 +3020,7 @@ class TestBatching(object): reviewer=None, ) pr02.post_comment('hansen alone r+', config['role_reviewer']['token']) - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) pr01_id = to_pr(env, pr01) assert pr01_id.blocked is False pr02_id = to_pr(env, pr02) @@ -3077,7 +3089,7 @@ class TestBatching(object): with repo: repo.post_status('staging.master', 'success', 'ci/runbot') repo.post_status('staging.master', 'success', 'legal/cla') - env.run_crons('runbot_merge.process_updated_commits', 'runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons(None) assert pr2.state == 'merged' class TestReviewing: @@ -3534,7 +3546,6 @@ class TestUnknownPR: prx.post_comment('hansen r+', config['role_reviewer']['token']) env.run_crons( 'runbot_merge.fetch_prs_cron', - 'runbot_merge.feedback_cron', ) assert prx.comments == [ @@ -3559,7 +3570,6 @@ class TestUnknownPR: prx.post_review('APPROVE', 'hansen r+', config['role_reviewer']['token']) env.run_crons( 'runbot_merge.fetch_prs_cron', - 'runbot_merge.feedback_cron', ) # FIXME: either split out reviews in local or merge reviews & comments in remote diff --git a/runbot_merge/tests/test_by_branch.py b/runbot_merge/tests/test_by_branch.py index efaf0394..9134a4e2 100644 --- a/runbot_merge/tests/test_by_branch.py +++ b/runbot_merge/tests/test_by_branch.py @@ -34,15 +34,15 @@ def test_status_applies(env, repo, config): with repo: repo.post_status(c, 'success', 'ci') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'opened' with repo: repo.post_status(c, 'success', 'pr') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'opened' with repo: repo.post_status(c, 'success', 'lint') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'validated' with repo: @@ -53,15 +53,15 @@ def test_status_applies(env, repo, config): assert st.state == 'pending' with repo: repo.post_status('staging.master', 'success', 'ci') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'pending' with repo: repo.post_status('staging.master', 'success', 'lint') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'pending' with repo: repo.post_status('staging.master', 'success', 'staging') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'success' @pytest.mark.usefixtures('_setup_statuses') @@ -84,11 +84,11 @@ def test_status_skipped(env, project, repo, config): with repo: repo.post_status(c, 'success', 'ci') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'opened' with repo: repo.post_status(c, 'success', 'pr') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert pr_id.state == 'validated' with repo: @@ -99,11 +99,11 @@ def test_status_skipped(env, project, repo, config): assert st.state == 'pending' with repo: repo.post_status('staging.maintenance', 'success', 'staging') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'pending' with repo: repo.post_status('staging.maintenance', 'success', 'ci') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert st.state == 'success' def test_pseudo_version_tag(env, project, make_repo, setreviewers, config): diff --git a/runbot_merge/tests/test_disabled_branch.py b/runbot_merge/tests/test_disabled_branch.py index cfd16bae..88e5e490 100644 --- a/runbot_merge/tests/test_disabled_branch.py +++ b/runbot_merge/tests/test_disabled_branch.py @@ -45,10 +45,21 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf staging_id = branch_id.active_staging_id assert staging_id == pr_id.staging_id + # staging of `pr` should have generated a staging branch + _ = repo.get_ref('heads/staging.other') + # stagings should not need a tmp branch anymore, so this should not exist + with pytest.raises(AssertionError, match=r'Not Found'): + repo.get_ref('heads/tmp.other') + # disable branch "other" branch_id.active = False env.run_crons() + # triggered cleanup should have deleted the staging for the disabled `other` + # target branch + with pytest.raises(AssertionError, match=r'Not Found'): + repo.get_ref('heads/staging.other') + # the PR should not have been closed implicitly assert pr_id.state == 'ready' # but it should be unstaged @@ -86,17 +97,10 @@ def test_existing_pr_disabled_branch(env, project, make_repo, setreviewers, conf assert pr_id.staging_id # staging of `pr` should have generated a staging branch - _ = repo.get_ref('heads/staging.other') + _ = repo.get_ref('heads/staging.other2') # stagings should not need a tmp branch anymore, so this should not exist with pytest.raises(AssertionError, match=r'Not Found'): - repo.get_ref('heads/tmp.other') - - assert env['base'].run_crons() - - # triggered cleanup should have deleted the staging for the disabled `other` - # target branch - with pytest.raises(AssertionError, match=r'Not Found'): - repo.get_ref('heads/staging.other') + repo.get_ref('heads/tmp.other2') def test_new_pr_no_branch(env, project, make_repo, setreviewers, users): """ A new PR to an *unknown* branch should be ignored and warn diff --git a/runbot_merge/tests/test_multirepo.py b/runbot_merge/tests/test_multirepo.py index 3ac35a8f..0efb7cfc 100644 --- a/runbot_merge/tests/test_multirepo.py +++ b/runbot_merge/tests/test_multirepo.py @@ -493,7 +493,7 @@ def test_ff_fail(env, project, repo_a, repo_b, config): with repo_a, repo_b: repo_a.post_status('heads/staging.master', 'success') repo_b.post_status('heads/staging.master', 'success') - env.run_crons('runbot_merge.merge_cron', 'runbot_merge.staging_cron') + env.run_crons(None) assert repo_b.commit('heads/master').id == cn,\ "B should still be at the conflicting commit" assert repo_a.commit('heads/master').id == root_a,\ diff --git a/runbot_merge/tests/test_project_toggles.py b/runbot_merge/tests/test_project_toggles.py index 615e6e4b..db838df6 100644 --- a/runbot_merge/tests/test_project_toggles.py +++ b/runbot_merge/tests/test_project_toggles.py @@ -1,3 +1,4 @@ +import datetime import functools from itertools import repeat @@ -66,16 +67,16 @@ def test_staging_priority(env, project, repo, config, mode, cutoff, second): with repo: [m] = repo.make_commits(None, Commit("m", tree={"ble": "1"}), ref="heads/master") - [c] = repo.make_commits(m, Commit("c", tree={"1": "1"}), ref="heads/pr1") + repo.make_commits(m, Commit("c", tree={"1": "1"}), ref="heads/pr1") pr1 = repo.make_pr(title="whatever", target="master", head="pr1") - [c] = repo.make_commits(m, Commit("c", tree={"2": "2"}), ref="heads/pr2") + repo.make_commits(m, Commit("c", tree={"2": "2"}), ref="heads/pr2") pr2 = repo.make_pr(title="whatever", target="master", head="pr2") - [c] = repo.make_commits(m, Commit("c", tree={"3": "3"}), ref="heads/pr3") + repo.make_commits(m, Commit("c", tree={"3": "3"}), ref="heads/pr3") pr3 = repo.make_pr(title="whatever", target="master", head="pr3") - [c] = repo.make_commits(m, Commit("c", tree={"4": "4"}), ref="heads/pr4") + repo.make_commits(m, Commit("c", tree={"4": "4"}), ref="heads/pr4") pr4 = repo.make_pr(title="whatever", target="master", head="pr4") prs = [pr1, pr2, pr3, pr4] @@ -100,18 +101,27 @@ def test_staging_priority(env, project, repo, config, mode, cutoff, second): for pr, pr_id in zip(prs[cutoff:], pr_ids[cutoff:]): pr.post_comment('hansen r+', config['role_reviewer']['token']) repo.post_status(pr_id.head, 'success') - env.run_crons('runbot_merge.process_updated_commits') + env.run_crons(None) assert not pr_ids.filtered(lambda p: p.blocked) # trigger a split with repo: repo.post_status('staging.master', 'failure') - env.run_crons('runbot_merge.process_updated_commits', 'runbot_merge.merge_cron') + + # specifically delay creation of new staging to observe the failed + # staging's state and the splits + model, cron_id = env['ir.model.data'].check_object_reference('runbot_merge', 'staging_cron') + staging_cron = env[model].browse([cron_id]) + staging_cron.active = False + + env.run_crons(None) assert not staging.active assert not env['runbot_merge.stagings'].search([]).active assert env['runbot_merge.split'].search_count([]) == 2 - env.run_crons() + staging_cron.active = True + # manually trigger that cron, as having the cron disabled prevented the creation of the triggers entirely + env.run_crons('runbot_merge.staging_cron') # check that st.pr_ids are the PRs we expect st = env['runbot_merge.stagings'].search([])